vmware-nsxlib-12.0.1/0000775000175100017510000000000013244536266014430 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/CONTRIBUTING.rst0000666000175100017510000000122013244536000017050 0ustar zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: http://docs.openstack.org/infra/manual/developers.html If you already have a good understanding of how the system works and your OpenStack accounts are set up, you can skip to the development workflow section of this documentation to learn how changes to OpenStack should be submitted for review via the Gerrit tool: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/vmware-nsxlibvmware-nsxlib-12.0.1/README.rst0000666000175100017510000000024613244535763016124 0ustar zuulzuul00000000000000============= vmware-nsxlib ============= * Free software: Apache license * Source: http://git.openstack.org/cgit/openstack/vmware-nsxlib Features -------- * TODO vmware-nsxlib-12.0.1/releasenotes/0000775000175100017510000000000013244536266017121 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/releasenotes/source/0000775000175100017510000000000013244536266020421 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/releasenotes/source/_templates/0000775000175100017510000000000013244536266022556 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/releasenotes/source/_templates/.placeholder0000666000175100017510000000000013244535763025032 0ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/releasenotes/source/unreleased.rst0000666000175100017510000000015713244535763023310 0ustar zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes::vmware-nsxlib-12.0.1/releasenotes/source/conf.py0000666000175100017510000002153213244535763021726 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Glance Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'vmware_nsxlib Release Notes' copyright = u'2016, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'GlanceReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation', u'Glance Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'glancereleasenotes', u'Glance Release Notes Documentation', [u'Glance Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation', u'Glance Developers', 'GlanceReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] vmware-nsxlib-12.0.1/releasenotes/source/_static/0000775000175100017510000000000013244536266022047 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/releasenotes/source/_static/.placeholder0000666000175100017510000000000013244535763024323 0ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/releasenotes/source/index.rst0000666000175100017510000000020413244535763022261 0ustar zuulzuul00000000000000============================ vmware_nsxlib Release Notes ============================ .. toctree:: :maxdepth: 1 unreleased vmware-nsxlib-12.0.1/releasenotes/notes/0000775000175100017510000000000013244536266020251 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/releasenotes/notes/.placeholder0000666000175100017510000000000013244535763022525 0ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/babel.cfg0000666000175100017510000000002013244535763016151 0ustar zuulzuul00000000000000[python: **.py] vmware-nsxlib-12.0.1/setup.cfg0000666000175100017510000000237013244536266016255 0ustar zuulzuul00000000000000[metadata] name = vmware-nsxlib summary = A common library that interfaces with VMware NSX description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 Programming Language :: Python :: 3.5 [files] packages = vmware_nsxlib [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 [upload_sphinx] upload-dir = doc/build/html [compile_catalog] directory = vmware_nsxlib/locale domain = vmware_nsxlib [update_catalog] domain = vmware_nsxlib output_dir = vmware_nsxlib/locale input_file = vmware_nsxlib/locale/vmware_nsxlib.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = vmware_nsxlib/locale/vmware_nsxlib.pot [build_releasenotes] all_files = 1 build-dir = releasenotes/build source-dir = releasenotes/source [egg_info] tag_build = tag_date = 0 vmware-nsxlib-12.0.1/PKG-INFO0000664000175100017510000000204613244536266015527 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: vmware-nsxlib Version: 12.0.1 Summary: A common library that interfaces with VMware NSX Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ============= vmware-nsxlib ============= * Free software: Apache license * Source: http://git.openstack.org/cgit/openstack/vmware-nsxlib Features -------- * TODO Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 vmware-nsxlib-12.0.1/vmware_nsxlib/0000775000175100017510000000000013244536266017310 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/vmware_nsxlib/version.py0000666000175100017510000000125413244535763021354 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('vmware-nsxlib') vmware-nsxlib-12.0.1/vmware_nsxlib/tests/0000775000175100017510000000000013244536266020452 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/vmware_nsxlib/tests/__init__.py0000666000175100017510000000000013244535763022554 0ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/0000775000175100017510000000000013244536266021431 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/__init__.py0000666000175100017510000000000013244535763023533 0ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/0000775000175100017510000000000013244536266021761 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_cert.py0000666000175100017510000003171313244535763024337 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os import mock from OpenSSL import crypto from oslo_serialization import jsonutils from vmware_nsxlib.tests.unit.v3 import mocks from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_client from vmware_nsxlib.tests.unit.v3 import test_constants as const from vmware_nsxlib.v3 import client from vmware_nsxlib.v3 import client_cert from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import trust_management as tm class DummyStorageDriver(dict): """Storage driver simulation - just a dictionary""" def store_cert(self, project_id, certificate, private_key): self[project_id] = {} self[project_id]['cert'] = certificate self[project_id]['key'] = private_key def get_cert(self, project_id): if project_id not in self: return (None, None) return (self[project_id]['cert'], self[project_id]['key']) def delete_cert(self, project_id): del(self[project_id]) def is_empty(self, project_id): return project_id not in self class NsxV3ClientCertificateTestCase(nsxlib_testcase.NsxClientTestCase): identity = 'drumknott' cert_id = "00000000-1111-2222-3333-444444444444" identity_id = "55555555-6666-7777-8888-999999999999" node_id = "meh" def _get_mocked_response(self, status_code, results): return mocks.MockRequestsResponse( status_code, jsonutils.dumps({'results': results})) def _get_mocked_error_response(self, status_code, error_code): return mocks.MockRequestsResponse( status_code, jsonutils.dumps({'httpStatus': 'go away', 'error_code': error_code, 'module_name': 'never mind', 'error_message': 'bad luck'})) def _get_mocked_trust(self, action, cert_pem): fake_responses = [] if 'create' in action: # import cert and return its id results = [{'id': self.cert_id}] fake_responses.append(self._get_mocked_response(201, results)) # and then bind this id to principal identity fake_responses.append(self._get_mocked_response(201, [])) if 'delete' in action: # get certs list, including same cert imported twice edge case results = [{'resource_type': 'Certificate', 'id': 'dont care', 'pem_encoded': 'some junk'}, {'resource_type': 'Certificate', 'id': 'some_other_cert_id', 'pem_encoded': cert_pem}, {'resource_type': 'Certificate', 'id': self.cert_id, 'pem_encoded': cert_pem}] fake_responses.append(self._get_mocked_response(200, results)) # get principal identities list results = [{'resource_type': 'Principal Identity', 'id': 'dont care', 'name': 'willikins', 'certificate_id': 'some other id'}, {'resource_type': 'Principal Identity', 'id': self.identity_id, 'name': self.identity, 'certificate_id': self.cert_id}] fake_responses.append(self._get_mocked_response(200, results)) # delete certificate fake_responses.append(self._get_mocked_response(204, [])) # delete identity fake_responses.append(self._get_mocked_response(204, [])) mock_client = self.new_mocked_client( client.JSONRESTClient, url_prefix='api/v1', session_response=fake_responses) return tm.NsxLibTrustManagement(mock_client, {}) def _verify_backend_create(self, mocked_trust, cert_pem): """Verify API calls to create cert and identity on backend""" # verify API call to import cert on backend base_uri = 'https://1.2.3.4/api/v1/trust-management' uri = base_uri + '/certificates?action=import' expected_body = {'pem_encoded': cert_pem} test_client.assert_json_call('post', mocked_trust.client, uri, single_call=False, data=jsonutils.dumps(expected_body)) # verify API call to bind cert to identity on backend uri = base_uri + '/principal-identities' expected_body = {'name': self.identity, 'node_id': self.node_id, 'permission_group': 'read_write_api_users', 'certificate_id': self.cert_id, 'is_protected': True} test_client.assert_json_call('post', mocked_trust.client, uri, single_call=False, data=jsonutils.dumps(expected_body, sort_keys=True)) def _verify_backend_delete(self, mocked_trust): """Verify API calls to fetch and delete cert and identity""" # verify API call to query identities in order to get cert id base_uri = 'https://1.2.3.4/api/v1/trust-management' uri = base_uri + '/principal-identities' test_client.assert_json_call('get', mocked_trust.client, uri, single_call=False) # verify API call to delete openstack principal identity uri = uri + '/' + self.identity_id test_client.assert_json_call('delete', mocked_trust.client, uri, single_call=False) # verify API call to delete certificate uri = base_uri + '/certificates/' + self.cert_id test_client.assert_json_call('delete', mocked_trust.client, uri, single_call=False) def test_generate_cert(self): """Test startup without certificate + certificate generation""" storage_driver = DummyStorageDriver() # Prepare fake trust management for "cert create" requests cert_pem, key_pem = storage_driver.get_cert(self.identity) mocked_trust = self._get_mocked_trust('create', cert_pem) cert = client_cert.ClientCertificateManager(self.identity, mocked_trust, storage_driver) self.assertFalse(cert.exists()) cert.generate(subject={}, key_size=2048, valid_for_days=333, node_id=self.node_id) # verify client cert was generated and makes sense self.assertTrue(cert.exists()) self.assertEqual(332, cert.expires_in_days()) cert_pem, key_pem = cert.get_pem() # verify cert ans PK were stored in storage stored_cert, stored_key = storage_driver.get_cert(self.identity) self.assertEqual(cert_pem, stored_cert) self.assertEqual(key_pem, stored_key) # verify backend API calls self._verify_backend_create(mocked_trust, cert_pem) # try to generate cert again and fail self.assertRaises(nsxlib_exc.ObjectAlreadyExists, cert.generate, {}) def _prepare_storage_with_existing_cert(self, key_size, days, alg, subj): # prepare storage driver with existing cert and key # this test simulates system startup cert, key = client_cert.generate_self_signed_cert_pair(key_size, days, alg, subj) storage_driver = DummyStorageDriver() cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) key_pem = crypto.dump_privatekey(crypto.FILETYPE_PEM, key) storage_driver.store_cert(self.identity, cert_pem, key_pem) return storage_driver def test_load_and_delete_existing_cert(self): """Test startup with existing certificate + certificate deletion""" storage_driver = self._prepare_storage_with_existing_cert(4096, 3650, 'sha256', {}) # get mocked backend driver for trust management, # prepared for get request, that preceeds delete operation cert_pem, key_pem = storage_driver.get_cert(self.identity) mocked_trust = self._get_mocked_trust('delete', cert_pem) cert = client_cert.ClientCertificateManager(self.identity, mocked_trust, storage_driver) self.assertTrue(cert.exists()) cert.delete() self.assertFalse(cert.exists()) self.assertTrue(storage_driver.is_empty(self.identity)) self._verify_backend_delete(mocked_trust) def _test_import_and_delete_cert(self, with_pkey=True): filename = '/tmp/test.pem' # this driver simulates storage==none scenario noop_driver = DummyStorageDriver() cert, key = client_cert.generate_self_signed_cert_pair(4096, 20, 'sha256', {}) cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) key_pem = crypto.dump_privatekey(crypto.FILETYPE_PEM, key) with open(filename, 'wb') as f: f.write(cert_pem) if with_pkey: f.write(key_pem) mocked_trust = self._get_mocked_trust('create_delete', cert_pem) cert = client_cert.ClientCertificateManager(self.identity, mocked_trust, noop_driver) cert.import_pem(filename, self.node_id) self._verify_backend_create(mocked_trust, cert_pem) cert.delete_pem(filename) self._verify_backend_delete(mocked_trust) os.remove(filename) def test_import_and_delete_cert_pkey(self): self._test_import_and_delete_cert(True) def test_import_and_delete_cert_only(self): self._test_import_and_delete_cert(False) def test_get_certificate_details(self): """Test retrieving cert details for existing cert""" key_size = 2048 days = 999 alg = 'sha256' subj = {'country': 'CA', 'organization': 'squirrel rights', 'hostname': 'www.squirrels.ca', 'unit': 'nuts', 'state': 'BC'} storage_driver = self._prepare_storage_with_existing_cert(key_size, days, alg, subj) with client_cert.ClientCertificateManager(self.identity, None, storage_driver) as cert: self.assertTrue(cert.exists()) self.assertEqual(days - 1, cert.expires_in_days()) self.assertEqual(key_size, cert.get_key_size()) cert_subj = cert.get_subject() self.assertEqual(subj, cert_subj) def test_bad_certificate_values(self): bad_cert_values = [{'key_size': 1024, 'valid_for_days': 10, 'signature_alg': 'sha256', 'subject': {}}, {'key_size': 4096, 'valid_for_days': 100, 'signature_alg': 'sha', 'subject': {}}] for args in bad_cert_values: self.assertRaises(nsxlib_exc.NsxLibInvalidInput, client_cert.generate_self_signed_cert_pair, **args) def test_find_cert_with_pem(self): with mock.patch.object(self.nsxlib.trust_management, 'get_certs' ) as mock_get_certs: mock_get_certs.return_value = const.FAKE_CERT_LIST cert_ids = self.nsxlib.trust_management.find_cert_with_pem( const.FAKE_CERT_PEM) self.assertEqual(const.FAKE_CERT_LIST[1]['id'], cert_ids[0]) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_native_dhcp.py0000666000175100017510000001425613244535763025671 0ustar zuulzuul00000000000000# Copyright (c) 2017 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import native_dhcp # TODO(asarfaty): Add more test cases here class TestNativeDhcp(nsxlib_testcase.NsxLibTestCase): """Tests for vmware_nsxlib.v3.native_dhcp.NsxLibNativeDhcp.""" def setUp(self, *args, **kwargs): super(TestNativeDhcp, self).setUp() self.handler = native_dhcp.NsxLibNativeDhcp( self.nsxlib.client, nsxlib_testcase.get_default_nsxlib_config()) self.net_dns_domain = 'a.com' self.subnet_dns_nameserver = '1.1.1.1' self.default_dns_domain = 'b.com' self.default_dns_nameserver = '2.2.2.2' def _get_server_config(self, with_net_dns=True, with_default_dns=True): net = {'name': 'dummy', 'id': 'dummy'} subnet = {'dns_nameservers': None, 'gateway_ip': '2.2.2.2', 'cidr': '5.5.0.0/24', 'host_routes': []} port = {'fixed_ips': [{'ip_address': '5.5.0.1'}]} tags = [] if with_net_dns: net['dns_domain'] = {'dns_domain': self.net_dns_domain} subnet['dns_nameservers'] = [self.subnet_dns_nameserver] if with_default_dns: result = self.handler.build_server_config( net, subnet, port, tags, default_dns_nameservers=[self.default_dns_nameserver], default_dns_domain=self.default_dns_domain) else: result = self.handler.build_server_config(net, subnet, port, tags) return result def test_build_server_config_dns_from_net_no_defaults(self): # Verify that net/subnet dns params are used if exist result = self._get_server_config(with_net_dns=True, with_default_dns=False) self.assertEqual(self.net_dns_domain, result['domain_name']) self.assertEqual([self.subnet_dns_nameserver], result['dns_nameservers']) def test_build_server_config_dns_from_net_with_defaults(self): # Verify that net/subnet dns params are used if exist, even if there # are defaults result = self._get_server_config(with_net_dns=True, with_default_dns=True) self.assertEqual(self.net_dns_domain, result['domain_name']) self.assertEqual([self.subnet_dns_nameserver], result['dns_nameservers']) def test_build_server_config_dns_from_defaults(self): # Verify that default dns params are used if net/subnet dns params # are missing result = self._get_server_config(with_net_dns=False, with_default_dns=True) self.assertEqual(self.default_dns_domain, result['domain_name']) self.assertEqual([self.default_dns_nameserver], result['dns_nameservers']) def test_build_server_config_dns_from_config(self): # Verify that config dns params are used if net/subnet and default # dns params are missing result = self._get_server_config(with_net_dns=False, with_default_dns=False) self.assertEqual(nsxlib_testcase.DNS_DOMAIN, result['domain_name']) self.assertEqual(nsxlib_testcase.DNS_NAMESERVERS, result['dns_nameservers']) def test_build_static_routes(self): gateway_ip = '2.2.2.2' cidr = '5.5.0.0/24' host_routes = [{'nexthop': '81.0.200.254', 'destination': '91.255.255.0/24'}] static_routes, gateway_ip = self.handler.build_static_routes( gateway_ip, cidr, host_routes) expected = [{'network': '5.5.0.0/24', 'next_hop': '0.0.0.0'}, {'network': '91.255.255.0/24', 'next_hop': '81.0.200.254'}, {'network': '0.0.0.0/0', 'next_hop': '2.2.2.2'}] self.assertEqual(expected, static_routes) self.assertEqual('2.2.2.2', gateway_ip) def test_build_static_routes_gw_none(self): gateway_ip = None cidr = '5.5.0.0/24' host_routes = [{'nexthop': '81.0.200.254', 'destination': '91.255.255.0/24'}] static_routes, gateway_ip = self.handler.build_static_routes( gateway_ip, cidr, host_routes) expected = [{'network': '5.5.0.0/24', 'next_hop': '0.0.0.0'}, {'network': '91.255.255.0/24', 'next_hop': '81.0.200.254'}] self.assertEqual(expected, static_routes) self.assertIsNone(gateway_ip) def test_build_static_routes_no_host_routes(self): gateway_ip = '2.2.2.2' cidr = '5.5.0.0/24' host_routes = [] static_routes, gateway_ip = self.handler.build_static_routes( gateway_ip, cidr, host_routes) expected = [{'network': '5.5.0.0/24', 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': '2.2.2.2'}] self.assertEqual(expected, static_routes) self.assertEqual('2.2.2.2', gateway_ip) def test_build_static_routes_gw_none_host_route_any(self): gateway_ip = None cidr = '5.5.0.0/24' host_routes = [{'nexthop': '81.0.200.254', 'destination': '0.0.0.0/0'}] static_routes, gateway_ip = self.handler.build_static_routes( gateway_ip, cidr, host_routes) expected = [{'network': '5.5.0.0/24', 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': '81.0.200.254'}] self.assertEqual(expected, static_routes) self.assertEqual('81.0.200.254', gateway_ip) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_policy_resources.py0000666000175100017510000015540713244535763027002 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import unittest import mock from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib import v3 from vmware_nsxlib.v3 import policy_constants from vmware_nsxlib.v3 import policy_defs TEST_TENANT = 'test' class NsxPolicyLibTestCase(unittest.TestCase): def setUp(self, *args, **kwargs): super(NsxPolicyLibTestCase, self).setUp() nsxlib_config = nsxlib_testcase.get_default_nsxlib_config() self.policy_lib = v3.NsxPolicyLib(nsxlib_config) self.policy_api = self.policy_lib.policy_api self.maxDiff = None def _compare_def(self, expected_def, actual_def): # verify the resource definition class self.assertEqual(expected_def.__class__, actual_def.__class__) # verify the resource definition tenant self.assertEqual(expected_def.tenant, actual_def.tenant) # verify the resource definition values self.assertEqual(expected_def.get_obj_dict(), actual_def.get_obj_dict()) def assert_called_with_def(self, mock_api, expected_def, call_num=0): # verify the api was called mock_api.assert_called() actual_def = mock_api.call_args_list[call_num][0][0] self._compare_def(expected_def, actual_def) def assert_called_with_defs(self, mock_api, expected_defs, call_num=0): # verify the api & first resource definition self.assert_called_with_def(mock_api, expected_defs[0], call_num=call_num) # compare the 2nd resource definition class & values actual_def = mock_api.call_args_list[call_num][0][1] expected_def = expected_defs[1] self._compare_def(expected_def, actual_def) def assert_called_with_def_and_dict(self, mock_api, expected_def, expected_dict, call_num=0): # verify the api & resource definition self.assert_called_with_def(mock_api, expected_def, call_num=call_num) # compare the 2nd api parameter which is a dictionary actual_dict = mock_api.call_args_list[call_num][0][0].body self.assertEqual(expected_dict, actual_dict) class TestPolicyDomain(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyDomain, self).setUp() self.resourceApi = self.policy_lib.domain def test_create_with_id(self): name = 'd1' description = 'desc' id = '111' with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.create_or_overwrite(name, domain_id=id, description=description, tenant=TEST_TENANT) expected_def = policy_defs.DomainDef(domain_id=id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_create_without_id(self): name = 'd1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.create_or_overwrite(name, description=description, tenant=TEST_TENANT) expected_def = policy_defs.DomainDef(domain_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_delete(self): id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(id, tenant=TEST_TENANT) expected_def = policy_defs.DomainDef(domain_id=id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): id = '111' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get(id, tenant=TEST_TENANT) expected_def = policy_defs.DomainDef(domain_id=id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = policy_defs.DomainDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list") as api_call: self.resourceApi.list(tenant=TEST_TENANT) expected_def = policy_defs.DomainDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_update(self): id = '111' name = 'new name' description = 'new desc' with mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(id, name=name, description=description, tenant=TEST_TENANT) expected_def = policy_defs.DomainDef(domain_id=id, tenant=TEST_TENANT) expected_dict = {'display_name': name, 'description': description} self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) class TestPolicyGroup(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyGroup, self).setUp() self.resourceApi = self.policy_lib.group def test_create_with_id(self): domain_id = '111' name = 'g1' description = 'desc' id = '222' with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.create_or_overwrite(name, domain_id, group_id=id, description=description, tenant=TEST_TENANT) expected_def = policy_defs.GroupDef(domain_id=domain_id, group_id=id, name=name, description=description, conditions=[], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_create_without_id(self): domain_id = '111' name = 'g1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.create_or_overwrite(name, domain_id, description=description, tenant=TEST_TENANT) expected_def = policy_defs.GroupDef(domain_id=domain_id, group_id=mock.ANY, name=name, description=description, conditions=[], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_create_with_condition(self): domain_id = '111' name = 'g1' description = 'desc' cond_val = '123' cond_op = policy_constants.CONDITION_OP_EQUALS cond_member_type = policy_constants.CONDITION_MEMBER_VM cond_key = policy_constants.CONDITION_KEY_TAG with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.create_or_overwrite( name, domain_id, description=description, cond_val=cond_val, cond_op=cond_op, cond_member_type=cond_member_type, cond_key=cond_key, tenant=TEST_TENANT) exp_cond = policy_defs.Condition(value=cond_val, key=cond_key, operator=cond_op, member_type=cond_member_type) expected_def = policy_defs.GroupDef(domain_id=domain_id, group_id=mock.ANY, name=name, description=description, conditions=[exp_cond], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_delete(self): domain_id = '111' id = '222' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(domain_id, id, tenant=TEST_TENANT) expected_def = policy_defs.GroupDef(domain_id=domain_id, group_id=id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): domain_id = '111' id = '222' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get(domain_id, id, tenant=TEST_TENANT) expected_def = policy_defs.GroupDef(domain_id=domain_id, group_id=id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_by_name(self): domain_id = '111' name = 'g1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(domain_id, name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = policy_defs.GroupDef(domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): domain_id = '111' with mock.patch.object(self.policy_api, "list") as api_call: self.resourceApi.list(domain_id, tenant=TEST_TENANT) expected_def = policy_defs.GroupDef(domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_update(self): domain_id = '111' id = '222' name = 'new name' description = 'new desc' with mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(domain_id, id, name=name, description=description, tenant=TEST_TENANT) expected_def = policy_defs.GroupDef(domain_id=domain_id, group_id=id, tenant=TEST_TENANT) expected_dict = {'display_name': name, 'description': description} self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) def test_update_condition(self): domain_id = '111' id = '222' cond_val = '123' with mock.patch.object(self.policy_api, "get", return_value={}) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update_condition(domain_id, id, cond_val=cond_val, tenant=TEST_TENANT) expected_def = policy_defs.GroupDef(domain_id=domain_id, group_id=id, tenant=TEST_TENANT) exp_cond = {'resource_type': 'Condition', 'member_type': policy_constants.CONDITION_MEMBER_PORT, 'key': policy_constants.CONDITION_KEY_TAG, 'value': cond_val, 'operator': policy_constants.CONDITION_OP_EQUALS} expected_dict = {'expression': [exp_cond]} self.assert_called_with_def(get_call, expected_def) self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) def test_remove_condition(self): domain_id = '111' id = '222' old_cond = {'resource_type': 'Condition', 'member_type': policy_constants.CONDITION_MEMBER_PORT, 'key': policy_constants.CONDITION_KEY_TAG, 'value': 'abc', 'operator': policy_constants.CONDITION_OP_EQUALS} with mock.patch.object(self.policy_api, "get", return_value={'expression': [old_cond]}) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update_condition(domain_id, id, cond_val=None, tenant=TEST_TENANT) expected_def = policy_defs.GroupDef(domain_id=domain_id, group_id=id, tenant=TEST_TENANT) expected_dict = {'expression': []} self.assert_called_with_def(get_call, expected_def) self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) def test_get_realized(self): domain_id = 'd1' id = 'g1' ep_id = 'ef1' result = {'state': policy_constants.STATE_REALIZED} with mock.patch.object( self.policy_api, "get_by_path", return_value=result) as api_get: state = self.resourceApi.get_realized_state( domain_id, id, ep_id, tenant=TEST_TENANT) self.assertEqual(policy_constants.STATE_REALIZED, state) expected_path = policy_defs.REALIZED_STATE_GROUP % ( TEST_TENANT, ep_id, domain_id, id) api_get.assert_called_once_with(expected_path) class TestPolicyService(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyService, self).setUp() self.resourceApi = self.policy_lib.service def test_create(self): name = 's1' description = 'desc' protocol = policy_constants.TCP dest_ports = [81, 82] with mock.patch.object(self.policy_api, "create_with_parent") as api_call: self.resourceApi.create_or_overwrite(name, description=description, protocol=protocol, dest_ports=dest_ports, tenant=TEST_TENANT) exp_srv_def = policy_defs.ServiceDef(service_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT) exp_entry_def = policy_defs.L4ServiceEntryDef( service_id=mock.ANY, name=name, description=description, protocol=protocol, dest_ports=dest_ports, tenant=TEST_TENANT) self.assert_called_with_defs( api_call, [exp_srv_def, exp_entry_def]) def test_delete(self): id = '111' with mock.patch.object(self.policy_api, "delete") as api_call,\ mock.patch.object(self.policy_api, "get") as get_call: self.resourceApi.delete(id, tenant=TEST_TENANT) expected_def = policy_defs.ServiceDef(service_id=id, tenant=TEST_TENANT) self.assert_called_with_def(get_call, expected_def) self.assert_called_with_def(api_call, expected_def) def test_get(self): id = '111' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get(id, tenant=TEST_TENANT) expected_def = policy_defs.ServiceDef(service_id=id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_by_name(self): name = 's1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = policy_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list") as api_call: self.resourceApi.list(tenant=TEST_TENANT) expected_def = policy_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_update(self): id = '111' name = 'new name' description = 'new desc' with mock.patch.object(self.policy_api, "get", return_value={}) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(id, name=name, description=description, tenant=TEST_TENANT) expected_def = policy_defs.ServiceDef(service_id=id, tenant=TEST_TENANT) expected_dict = {'display_name': name, 'description': description, 'service_entries': []} self.assert_called_with_def(get_call, expected_def) self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) def test_update_entry(self): id = '111' protocol = 'udp' dest_ports = [555] service_entry_id = '222' service_entry = {'id': service_entry_id} with mock.patch.object( self.policy_api, "get", return_value={'service_entries': [service_entry]}) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(id, protocol=protocol, dest_ports=dest_ports, tenant=TEST_TENANT) # get will be called for the entire service expected_def = policy_defs.ServiceDef(service_id=id, tenant=TEST_TENANT) self.assert_called_with_def(get_call, expected_def) # update will be called for the service entry only expected_entry_def = policy_defs.L4ServiceEntryDef( service_id=id, service_entry_id=service_entry_id, tenant=TEST_TENANT) expected_entry_dict = {'id': service_entry_id, 'l4_protocol': protocol.upper(), 'destination_ports': dest_ports} self.assert_called_with_def_and_dict( update_call, expected_entry_def, expected_entry_dict) def test_update_all(self): id = '111' name = 'new name' description = 'new desc' protocol = 'udp' dest_ports = [555] service_entry_id = '222' service_entry = {'id': service_entry_id} with mock.patch.object( self.policy_api, "get", return_value={'service_entries': [service_entry]}) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call,\ mock.patch.object(self.policy_api, "list", return_value={'results': []}): self.resourceApi.update(id, name=name, description=description, protocol=protocol, dest_ports=dest_ports, tenant=TEST_TENANT) # get will be called for the entire service expected_def = policy_defs.ServiceDef(service_id=id, tenant=TEST_TENANT) self.assert_called_with_def(get_call, expected_def) # update will be called for the service and entry (2 calls) expected_dict = {'display_name': name, 'description': description, 'service_entries': []} self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) expected_entry_def = policy_defs.L4ServiceEntryDef( service_id=id, service_entry_id=service_entry_id, tenant=TEST_TENANT) expected_entry_dict = {'id': service_entry_id, 'display_name': name, 'description': description, 'l4_protocol': protocol.upper(), 'destination_ports': dest_ports} self.assert_called_with_def_and_dict( update_call, expected_entry_def, expected_entry_dict, call_num=1) class TestPolicyIcmpService(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyIcmpService, self).setUp() self.resourceApi = self.policy_lib.icmp_service def test_create(self): name = 's1' description = 'desc' icmp_type = 2 with mock.patch.object(self.policy_api, "create_with_parent") as api_call: self.resourceApi.create_or_overwrite(name, description=description, icmp_type=icmp_type, tenant=TEST_TENANT) exp_srv_def = policy_defs.ServiceDef(service_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT) exp_entry_def = policy_defs.IcmpServiceEntryDef( service_id=mock.ANY, name=name, description=description, icmp_type=icmp_type, tenant=TEST_TENANT) self.assert_called_with_defs( api_call, [exp_srv_def, exp_entry_def]) def test_delete(self): id = '111' with mock.patch.object(self.policy_api, "delete") as api_call,\ mock.patch.object(self.policy_api, "get") as get_call: self.resourceApi.delete(id, tenant=TEST_TENANT) expected_def = policy_defs.ServiceDef(service_id=id, tenant=TEST_TENANT) self.assert_called_with_def(get_call, expected_def) self.assert_called_with_def(api_call, expected_def) def test_get(self): id = '111' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get(id, tenant=TEST_TENANT) expected_def = policy_defs.ServiceDef(service_id=id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_by_name(self): name = 's1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = policy_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list") as api_call: self.resourceApi.list(tenant=TEST_TENANT) expected_def = policy_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_update(self): id = '111' name = 'new name' description = 'new desc' with mock.patch.object(self.policy_api, "get", return_value={}) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(id, name=name, description=description, tenant=TEST_TENANT) expected_def = policy_defs.ServiceDef(service_id=id, tenant=TEST_TENANT) expected_dict = {'display_name': name, 'description': description, 'service_entries': []} self.assert_called_with_def(get_call, expected_def) self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) def test_update_entry(self): id = '111' icmp_code = 12 service_entry_id = '222' service_entry = {'id': service_entry_id} with mock.patch.object( self.policy_api, "get", return_value={'service_entries': [service_entry]}) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(id, icmp_code=icmp_code, tenant=TEST_TENANT) # get will be called for the entire service expected_def = policy_defs.ServiceDef(service_id=id, tenant=TEST_TENANT) self.assert_called_with_def(get_call, expected_def) # update will be called for the service entry only expected_entry_def = policy_defs.IcmpServiceEntryDef( service_id=id, service_entry_id=service_entry_id, tenant=TEST_TENANT) expected_entry_dict = {'id': service_entry_id, 'icmp_code': icmp_code} self.assert_called_with_def_and_dict( update_call, expected_entry_def, expected_entry_dict) def test_update_all(self): id = '111' name = 'new name' description = 'new desc' version = 6 icmp_type = 3 icmp_code = 3 service_entry_id = '222' service_entry = {'id': service_entry_id} with mock.patch.object( self.policy_api, "get", return_value={'service_entries': [service_entry]}) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call,\ mock.patch.object(self.policy_api, "list", return_value={'results': []}): self.resourceApi.update(id, name=name, description=description, version=version, icmp_type=icmp_type, icmp_code=icmp_code, tenant=TEST_TENANT) # get will be called for the entire service expected_def = policy_defs.ServiceDef(service_id=id, tenant=TEST_TENANT) self.assert_called_with_def(get_call, expected_def) # update will be called for the service and entry (2 calls) expected_dict = {'display_name': name, 'description': description, 'service_entries': []} self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) expected_entry_def = policy_defs.IcmpServiceEntryDef( service_id=id, service_entry_id=service_entry_id, tenant=TEST_TENANT) expected_entry_dict = {'id': service_entry_id, 'display_name': name, 'description': description, 'protocol': 'ICMPv6', 'icmp_type': icmp_type, 'icmp_code': icmp_code} self.assert_called_with_def_and_dict( update_call, expected_entry_def, expected_entry_dict, call_num=1) class TestPolicyCommunicationProfile(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyCommunicationProfile, self).setUp() self.resourceApi = self.policy_lib.comm_profile def test_create(self): name = 'c1' description = 'desc' service_id = '333' action = 'DENY' with mock.patch.object(self.policy_api, "create_with_parent") as api_call: self.resourceApi.create_or_overwrite(name, description=description, services=[service_id], action=action, tenant=TEST_TENANT) exp_srv_def = policy_defs.CommunicationProfileDef( profile_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT) exp_entry_def = policy_defs.CommunicationProfileEntryDef( profile_id=mock.ANY, name=name, description=description, services=[service_id], action=action, tenant=TEST_TENANT) self.assert_called_with_defs( api_call, [exp_srv_def, exp_entry_def]) def test_delete(self): id = '111' with mock.patch.object(self.policy_api, "delete") as api_call,\ mock.patch.object(self.policy_api, "get") as get_call: self.resourceApi.delete(id, tenant=TEST_TENANT) expected_def = policy_defs.CommunicationProfileDef( profile_id=id, tenant=TEST_TENANT) self.assert_called_with_def(get_call, expected_def) self.assert_called_with_def(api_call, expected_def) def test_get(self): id = '111' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get(id, tenant=TEST_TENANT) expected_def = policy_defs.CommunicationProfileDef( profile_id=id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_by_name(self): name = 'c1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = policy_defs.CommunicationProfileDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list") as api_call: self.resourceApi.list(tenant=TEST_TENANT) expected_def = policy_defs.CommunicationProfileDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_update(self): id = '111' name = 'new name' description = 'new desc' with mock.patch.object(self.policy_api, "get", return_value={}) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(id, name=name, description=description, tenant=TEST_TENANT) expected_def = policy_defs.CommunicationProfileDef( profile_id=id, tenant=TEST_TENANT) expected_dict = {'display_name': name, 'description': description, 'communication_profile_entries': []} self.assert_called_with_def(get_call, expected_def) self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) def test_update_entry(self): id = '111' service_id = '333' action = 'deny' entry_id = '222' profile_entry = {'id': entry_id} entries_dict = {'communication_profile_entries': [profile_entry]} with mock.patch.object( self.policy_api, "get", return_value=entries_dict) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(id, services=[service_id], action=action, tenant=TEST_TENANT) # get will be called for the entire service expected_def = policy_defs.CommunicationProfileDef( profile_id=id, tenant=TEST_TENANT) self.assert_called_with_def(get_call, expected_def) # update will be called for the service entry only expected_entry_def = policy_defs.CommunicationProfileEntryDef( profile_id=id, profile_entry_id=entry_id, tenant=TEST_TENANT) expected_entry_dict = {'id': entry_id, 'action': action.upper(), 'services': [service_id]} self.assert_called_with_def_and_dict( update_call, expected_entry_def, expected_entry_dict) def test_update_all(self): id = '111' name = 'new name' description = 'new desc' service_id = '333' action = 'deny' entry_id = '222' profile_entry = {'id': entry_id} entries_dict = {'communication_profile_entries': [profile_entry]} with mock.patch.object( self.policy_api, "get", return_value=entries_dict) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(id, name=name, description=description, services=[service_id], action=action, tenant=TEST_TENANT) # get will be called for the entire service expected_def = policy_defs.CommunicationProfileDef( profile_id=id, tenant=TEST_TENANT) self.assert_called_with_def(get_call, expected_def) # update will be called for the service and entry (2 calls) expected_dict = {'display_name': name, 'description': description, 'communication_profile_entries': []} self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) expected_entry_def = policy_defs.CommunicationProfileEntryDef( profile_id=id, profile_entry_id=entry_id, tenant=TEST_TENANT) expected_entry_dict = {'id': entry_id, 'display_name': name, 'description': description, 'action': action.upper(), 'services': [service_id]} self.assert_called_with_def_and_dict( update_call, expected_entry_def, expected_entry_dict, call_num=1) class TestPolicyCommunicationMap(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyCommunicationMap, self).setUp() self.resourceApi = self.policy_lib.comm_map def test_create(self): domain_id = '111' name = 'cm1' description = 'desc' source_group = 'g1' dest_group = 'g2' seq_num = 7 profile_id = 'c1' list_return_value = {'results': [{'sequence_number': 1}]} with mock.patch.object(self.policy_api, "create_or_update") as api_call,\ mock.patch.object(self.policy_api, "list", return_value=list_return_value): self.resourceApi.create_or_overwrite(name, domain_id, description=description, sequence_number=seq_num, profile_id=profile_id, source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) expected_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, map_id=mock.ANY, name=name, description=description, sequence_number=seq_num, profile_id=profile_id, source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_create_first_seqnum(self): domain_id = '111' name = 'cm1' description = 'desc' source_group = 'g1' dest_group = 'g2' profile_id = 'c1' with mock.patch.object(self.policy_api, "create_or_update") as api_call, \ mock.patch.object(self.resourceApi, "list", return_value=[]): self.resourceApi.create_or_overwrite(name, domain_id, description=description, profile_id=profile_id, source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) expected_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, map_id=mock.ANY, name=name, description=description, sequence_number=1, profile_id=profile_id, source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_create_without_seqnum(self): domain_id = '111' name = 'cm1' description = 'desc' source_group = 'g1' dest_group = 'g2' profile_id = 'c1' with mock.patch.object(self.policy_api, "create_with_parent") as api_call, \ mock.patch.object(self.resourceApi, "_get_last_seq_num", return_value=-1): self.resourceApi.create_or_overwrite(name, domain_id, description=description, profile_id=profile_id, source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) expected_map_def = policy_defs.CommunicationMapDef( domain_id=domain_id, tenant=TEST_TENANT) expected_entry_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, map_id=mock.ANY, name=name, description=description, sequence_number=1, profile_id=profile_id, source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) self.assert_called_with_defs( api_call, [expected_map_def, expected_entry_def]) def test_delete(self): domain_id = '111' id = '222' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(domain_id, id, tenant=TEST_TENANT) expected_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, map_id=id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): domain_id = '111' id = '222' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get(domain_id, id, tenant=TEST_TENANT) expected_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, map_id=id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_by_name(self): domain_id = '111' name = 'cm1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(domain_id, name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = policy_defs.CommunicationMapEntryDef( domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): domain_id = '111' with mock.patch.object(self.policy_api, "list") as api_call: self.resourceApi.list(domain_id, tenant=TEST_TENANT) expected_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_update(self): domain_id = '111' id = '222' name = 'new name' description = 'new desc' source_group = 'ng1' dest_group = 'ng2' profile_id = 'nc1' with mock.patch.object(self.policy_api, "get", return_value={}) as get_call,\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(domain_id, id, name=name, description=description, profile_id=profile_id, source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) expected_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, map_id=id, tenant=TEST_TENANT) sgroup_path = "/%s/domains/%s/groups/%s" % ( TEST_TENANT, domain_id, source_group) dgroup_path = "/%s/domains/%s/groups/%s" % ( TEST_TENANT, domain_id, dest_group) profile_path = "/%s/communication-profiles/%s" % ( TEST_TENANT, profile_id) expected_dict = {'display_name': name, 'description': description, 'communication_profile_path': profile_path, 'source_groups': [sgroup_path], 'destination_groups': [dgroup_path]} self.assert_called_with_def(get_call, expected_def) self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) def test_get_realized(self): domain_id = 'd1' ep_id = 'ef1' result = {'state': policy_constants.STATE_REALIZED} with mock.patch.object( self.policy_api, "get_by_path", return_value=result) as api_get: state = self.resourceApi.get_realized_state( domain_id, ep_id, tenant=TEST_TENANT) self.assertEqual(policy_constants.STATE_REALIZED, state) expected_path = policy_defs.REALIZED_STATE_COMM_MAP % ( TEST_TENANT, ep_id, domain_id) api_get.assert_called_once_with(expected_path) class TestPolicyEnforcementPoint(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyEnforcementPoint, self).setUp() self.resourceApi = self.policy_lib.enforcement_point def test_create(self): name = 'ep' description = 'desc' ip_address = '1.1.1.1' username = 'admin' password = 'zzz' with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.create_or_overwrite( name, description=description, ip_address=ip_address, username=username, password=password, tenant=TEST_TENANT) expected_def = policy_defs.EnforcementPointDef( ep_id=mock.ANY, name=name, description=description, ip_address=ip_address, username=username, password=password, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_delete(self): id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(id, tenant=TEST_TENANT) expected_def = policy_defs.EnforcementPointDef(ep_id=id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): id = '111' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get(id, tenant=TEST_TENANT) expected_def = policy_defs.EnforcementPointDef(ep_id=id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_by_name(self): name = 'ep1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = policy_defs.EnforcementPointDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list") as api_call: self.resourceApi.list(tenant=TEST_TENANT) expected_def = policy_defs.EnforcementPointDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_update(self): id = '111' name = 'new name' username = 'admin' password = 'zzz' with mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(id, name=name, username=username, password=password, tenant=TEST_TENANT) expected_def = policy_defs.EnforcementPointDef(ep_id=id, tenant=TEST_TENANT) expected_dict = {'display_name': name, 'username': username, 'password': password} self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) def test_get_realized(self): ep_id = 'ef1' result = {'state': policy_constants.STATE_REALIZED} with mock.patch.object( self.policy_api, "get_by_path", return_value=result) as api_get: state = self.resourceApi.get_realized_state( ep_id, tenant=TEST_TENANT) self.assertEqual(policy_constants.STATE_REALIZED, state) expected_path = policy_defs.REALIZED_STATE_EF % ( TEST_TENANT, ep_id) api_get.assert_called_once_with(expected_path) class TestPolicyDeploymentMap(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyDeploymentMap, self).setUp() self.resourceApi = self.policy_lib.deployment_map def test_create(self): name = 'map1' description = 'desc' domain_id = 'domain1' ep_id = 'ep1' with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.create_or_overwrite(name, description=description, ep_id=ep_id, domain_id=domain_id, tenant=TEST_TENANT) expected_def = policy_defs.DeploymentMapDef( map_id=mock.ANY, name=name, description=description, ep_id=ep_id, domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_delete(self): id = '111' domain_id = 'domain1' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(id, domain_id=domain_id, tenant=TEST_TENANT) expected_def = policy_defs.DeploymentMapDef(map_id=id, domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): id = '111' domain_id = 'domain1' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get(id, domain_id=domain_id, tenant=TEST_TENANT) expected_def = policy_defs.DeploymentMapDef(map_id=id, domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_by_name(self): name = 'ep1' domain_id = 'domain1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, domain_id=domain_id, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = policy_defs.DeploymentMapDef(domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): domain_id = 'domain1' with mock.patch.object(self.policy_api, "list") as api_call: self.resourceApi.list(domain_id=domain_id, tenant=TEST_TENANT) expected_def = policy_defs.DeploymentMapDef(domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_update(self): id = '111' name = 'new name' domain_id = 'domain2' ep_id = 'ep2' with mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(id, name=name, ep_id=ep_id, domain_id=domain_id, tenant=TEST_TENANT) expected_def = policy_defs.DeploymentMapDef(map_id=id, tenant=TEST_TENANT) domain_path = "/%s/domains/%s" % (TEST_TENANT, domain_id) ep_path = ("/%s/deployment-zones/default/" "enforcement-points/%s" % (TEST_TENANT, ep_id)) expected_dict = {'display_name': name, 'enforcement_point_paths': [ep_path], 'domain_path': domain_path} self.assert_called_with_def_and_dict( update_call, expected_def, expected_dict) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_load_balancer.py0000666000175100017510000006112013244535763026143 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_constants as consts from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import load_balancer app_profile_types = load_balancer.ApplicationProfileTypes app_profiles = [app_profile_types.HTTP, app_profile_types.FAST_TCP, app_profile_types.FAST_UDP] per_profile_types = load_balancer.PersistenceProfileTypes per_profiles = [per_profile_types.COOKIE, per_profile_types.SOURCE_IP] monitor_types = load_balancer.MonitorTypes monitors = [monitor_types.HTTP, monitor_types.HTTPS, monitor_types.ICMP, monitor_types.PASSIVE, monitor_types.TCP, monitor_types.UDP] tags = [ { 'scope': 'os-project-id', 'tag': 'project-1' }, { 'scope': 'os-api-version', 'tag': '2.1.1.0' } ] class TestApplicationProfile(nsxlib_testcase.NsxClientTestCase): def test_create_application_profiles(self): fake_profile = consts.FAKE_APPLICATION_PROFILE.copy() for profile_type in app_profiles: body = { 'display_name': fake_profile['display_name'], 'description': fake_profile['description'], 'resource_type': profile_type, 'tags': tags } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.application_profile.create( display_name=body['display_name'], description=body['description'], resource_type=body['resource_type'], tags=tags) create.assert_called_with('loadbalancer/application-profiles', body) def test_list_application_profiles(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.application_profile.list() list_call.assert_called_with( resource='loadbalancer/application-profiles') def test_get_application_profile(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_profile = consts.FAKE_APPLICATION_PROFILE.copy() self.nsxlib.load_balancer.application_profile.get( fake_profile['id']) get.assert_called_with( 'loadbalancer/application-profiles/%s' % fake_profile['id']) def test_delete_application_profile(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_profile = consts.FAKE_APPLICATION_PROFILE.copy() self.nsxlib.load_balancer.application_profile.delete( fake_profile['id']) delete.assert_called_with( 'loadbalancer/application-profiles/%s' % fake_profile['id']) class TestPersistenceProfile(nsxlib_testcase.NsxClientTestCase): def test_create_persistence_profiles(self): fake_profile = consts.FAKE_PERSISTENCE_PROFILE.copy() for profile_type in per_profiles: body = { 'display_name': fake_profile['display_name'], 'description': fake_profile['description'], 'resource_type': profile_type, 'tags': tags } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.persistence_profile.create( body['display_name'], body['description'], tags, body['resource_type']) create.assert_called_with('loadbalancer/persistence-profiles', body) def test_list_persistence_profiles(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.persistence_profile.list() list_call.assert_called_with( resource='loadbalancer/persistence-profiles') def test_get_persistence_profile(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_profile = consts.FAKE_APPLICATION_PROFILE.copy() self.nsxlib.load_balancer.persistence_profile.get( fake_profile['id']) get.assert_called_with( 'loadbalancer/persistence-profiles/%s' % fake_profile['id']) def test_delete_persistence_profile(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_profile = consts.FAKE_PERSISTENCE_PROFILE.copy() self.nsxlib.load_balancer.persistence_profile.delete( fake_profile['id']) delete.assert_called_with( 'loadbalancer/persistence-profiles/%s' % fake_profile['id']) class TestRule(nsxlib_testcase.NsxClientTestCase): def test_create_rule(self): fake_rule = consts.FAKE_RULE.copy() body = { 'display_name': fake_rule['display_name'], 'description': fake_rule['description'], 'resource_type': fake_rule['resource_type'], 'phase': fake_rule['phase'], 'match_strategy': fake_rule['match_strategy'], 'tags': tags } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.rule.create(**body) create.assert_called_with('loadbalancer/rules', body) def test_list_rules(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.rule.list() list_call.assert_called_with(resource='loadbalancer/rules') def test_get_rule(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_rule = consts.FAKE_RULE.copy() self.nsxlib.load_balancer.rule.get(fake_rule['id']) get.assert_called_with('loadbalancer/rules/%s' % fake_rule['id']) def test_delete_rule(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_rule = consts.FAKE_RULE.copy() self.nsxlib.load_balancer.rule.delete(fake_rule['id']) delete.assert_called_with( 'loadbalancer/rules/%s' % fake_rule['id']) class TestClientSslProfile(nsxlib_testcase.NsxClientTestCase): def test_create_client_ssl_profiles(self): fake_profile = consts.FAKE_CLIENT_SSL_PROFILE.copy() body = { 'display_name': fake_profile['display_name'], 'description': fake_profile['description'], 'tags': tags } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.client_ssl_profile.create( body['display_name'], body['description'], tags) create.assert_called_with('loadbalancer/client-ssl-profiles', body) def test_list_client_ssl_profiles(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.client_ssl_profile.list() list_call.assert_called_with( resource='loadbalancer/client-ssl-profiles') def test_get_client_ssl_profile(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_profile = consts.FAKE_CLIENT_SSL_PROFILE.copy() self.nsxlib.load_balancer.client_ssl_profile.get( fake_profile['id']) get.assert_called_with( 'loadbalancer/client-ssl-profiles/%s' % fake_profile['id']) def test_delete_client_ssl_profile(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_profile = consts.FAKE_CLIENT_SSL_PROFILE.copy() self.nsxlib.load_balancer.client_ssl_profile.delete( fake_profile['id']) delete.assert_called_with( 'loadbalancer/client-ssl-profiles/%s' % fake_profile['id']) class TestServerSslProfile(nsxlib_testcase.NsxClientTestCase): def test_create_server_client_ssl_profiles(self): fake_profile = consts.FAKE_SERVER_SSL_PROFILE.copy() body = { 'display_name': fake_profile['display_name'], 'description': fake_profile['description'], 'tags': tags } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.server_ssl_profile.create( body['display_name'], body['description'], tags) create.assert_called_with('loadbalancer/server-ssl-profiles', body) def test_list_server_ssl_profiles(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.server_ssl_profile.list() list_call.assert_called_with( resource='loadbalancer/server-ssl-profiles') def test_get_server_ssl_profile(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_profile = consts.FAKE_SERVER_SSL_PROFILE.copy() self.nsxlib.load_balancer.server_ssl_profile.get( fake_profile['id']) get.assert_called_with( 'loadbalancer/server-ssl-profiles/%s' % fake_profile['id']) def test_delete_server_ssl_profile(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_profile = consts.FAKE_SERVER_SSL_PROFILE.copy() self.nsxlib.load_balancer.server_ssl_profile.delete( fake_profile['id']) delete.assert_called_with( 'loadbalancer/server-ssl-profiles/%s' % fake_profile['id']) class TestMonitor(nsxlib_testcase.NsxClientTestCase): def test_create_monitors(self): fake_monitor = consts.FAKE_MONITOR.copy() for monitor_type in monitors: body = { 'display_name': fake_monitor['display_name'], 'description': fake_monitor['description'], 'resource_type': monitor_type, 'tags': tags } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.monitor.create( body['display_name'], body['description'], tags, body['resource_type']) create.assert_called_with('loadbalancer/monitors', body) def test_list_monitors(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.monitor.list() list_call.assert_called_with(resource='loadbalancer/monitors') def test_get_monitor(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_monitor = consts.FAKE_MONITOR.copy() self.nsxlib.load_balancer.monitor.get(fake_monitor['id']) get.assert_called_with( 'loadbalancer/monitors/%s' % fake_monitor['id']) def test_delete_monitor(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_monitor = consts.FAKE_MONITOR.copy() self.nsxlib.load_balancer.monitor.delete(fake_monitor['id']) delete.assert_called_with( 'loadbalancer/monitors/%s' % fake_monitor['id']) class TestPool(nsxlib_testcase.NsxClientTestCase): def test_create_pool(self): fake_pool = consts.FAKE_POOL.copy() body = { 'display_name': fake_pool['display_name'], 'description': fake_pool['description'], 'algorithm': fake_pool['algorithm'], 'tags': tags } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.pool.create( body['display_name'], body['description'], tags, algorithm=body['algorithm']) create.assert_called_with('loadbalancer/pools', body) def test_list_pools(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.pool.list() list_call.assert_called_with(resource='loadbalancer/pools') def test_get_pool(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_profile = consts.FAKE_POOL.copy() self.nsxlib.load_balancer.pool.get(fake_profile['id']) get.assert_called_with( 'loadbalancer/pools/%s' % fake_profile['id']) def test_delete_pool(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_profile = consts.FAKE_POOL.copy() self.nsxlib.load_balancer.pool.delete(fake_profile['id']) delete.assert_called_with( 'loadbalancer/pools/%s' % fake_profile['id']) def test_remove_monitor_from_pool(self): fake_pool = consts.FAKE_POOL.copy() fake_pool['active_monitor_ids'] = [consts.FAKE_MONITOR_UUID] body = {'display_name': fake_pool['display_name'], 'description': fake_pool['description'], 'id': fake_pool['id'], 'algorithm': fake_pool['algorithm'], 'active_monitor_ids': []} with mock.patch.object(self.nsxlib.client, 'get', return_value=fake_pool): with mock.patch.object(self.nsxlib.client, 'update') as update: self.nsxlib.load_balancer.pool.remove_monitor_from_pool( fake_pool['id'], consts.FAKE_MONITOR_UUID) resource = 'loadbalancer/pools/%s' % fake_pool['id'] update.assert_called_with(resource, body) def test_remove_non_exist_monitor_from_pool(self): fake_pool = consts.FAKE_POOL.copy() fake_pool['active_monitor_ids'] = [consts.FAKE_MONITOR_UUID] with mock.patch.object(self.nsxlib.client, 'get', return_value=fake_pool): self.assertRaises( nsxlib_exc.ResourceNotFound, self.nsxlib.load_balancer.pool.remove_monitor_from_pool, fake_pool['id'], 'xxx-yyy') def test_add_monitor_to_pool(self): fake_pool = consts.FAKE_POOL.copy() body = {'display_name': fake_pool['display_name'], 'description': fake_pool['description'], 'id': fake_pool['id'], 'algorithm': fake_pool['algorithm'], 'active_monitor_ids': [consts.FAKE_MONITOR_UUID]} with mock.patch.object(self.nsxlib.client, 'get', return_value=fake_pool): with mock.patch.object(self.nsxlib.client, 'update') as update: self.nsxlib.load_balancer.pool.add_monitor_to_pool( fake_pool['id'], consts.FAKE_MONITOR_UUID) resource = 'loadbalancer/pools/%s' % fake_pool['id'] update.assert_called_with(resource, body) class TestVirtualServer(nsxlib_testcase.NsxClientTestCase): def test_create_virtual_server(self): fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() body = { 'display_name': fake_virtual_server['display_name'], 'description': fake_virtual_server['description'], 'ip_protocol': fake_virtual_server['ip_protocol'], 'port': fake_virtual_server['port'], 'enabled': fake_virtual_server['enabled'], 'tags': tags } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.virtual_server.create( body['display_name'], body['description'], tags, ip_protocol=body['ip_protocol'], port=body['port'], enabled=body['enabled']) create.assert_called_with('loadbalancer/virtual-servers', body) def test_list_virtual_servers(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.virtual_server.list() list_call.assert_called_with( resource='loadbalancer/virtual-servers') def test_get_virtual_server(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() self.nsxlib.load_balancer.virtual_server.get( fake_virtual_server['id']) get.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id']) def test_delete_virtual_server(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() self.nsxlib.load_balancer.virtual_server.delete( fake_virtual_server['id']) delete.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id']) def test_add_rule(self): fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() body = { 'display_name': fake_virtual_server['display_name'], 'description': fake_virtual_server['description'], 'id': fake_virtual_server['id'], 'enabled': fake_virtual_server['enabled'], 'port': fake_virtual_server['port'], 'ip_protocol': fake_virtual_server['ip_protocol'], 'rule_ids': [consts.FAKE_RULE_UUID] } with mock.patch.object(self.nsxlib.client, 'get') as mock_get, \ mock.patch.object(self.nsxlib.client, 'update') as mock_update: mock_get.return_value = fake_virtual_server self.nsxlib.load_balancer.virtual_server.add_rule( fake_virtual_server['id'], consts.FAKE_RULE_UUID) mock_update.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id'], body) def test_remove_rule(self): fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() fake_virtual_server['rule_ids'] = [consts.FAKE_RULE_UUID] body = { 'display_name': fake_virtual_server['display_name'], 'description': fake_virtual_server['description'], 'id': fake_virtual_server['id'], 'enabled': fake_virtual_server['enabled'], 'port': fake_virtual_server['port'], 'ip_protocol': fake_virtual_server['ip_protocol'], 'rule_ids': [] } with mock.patch.object(self.nsxlib.client, 'get') as mock_get, \ mock.patch.object(self.nsxlib.client, 'update') as mock_update: mock_get.return_value = fake_virtual_server self.nsxlib.load_balancer.virtual_server.remove_rule( fake_virtual_server['id'], consts.FAKE_RULE_UUID) mock_update.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id'], body) def test_add_client_ssl_profile_binding(self): fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() body = { 'display_name': fake_virtual_server['display_name'], 'description': fake_virtual_server['description'], 'id': fake_virtual_server['id'], 'enabled': fake_virtual_server['enabled'], 'port': fake_virtual_server['port'], 'ip_protocol': fake_virtual_server['ip_protocol'], 'client_ssl_profile_binding': { 'ssl_profile_id': consts.FAKE_CLIENT_SSL_PROFILE_UUID, 'default_certificate_id': consts.FAKE_DEFAULT_CERTIFICATE_ID, 'client_auth': 'IGNORE', 'certificate_chain_depth': 3 } } with mock.patch.object(self.nsxlib.client, 'get') as mock_get, \ mock.patch.object(self.nsxlib.client, 'update') as mock_update: mock_get.return_value = fake_virtual_server vs_client = self.nsxlib.load_balancer.virtual_server vs_client.add_client_ssl_profile_binding( fake_virtual_server['id'], consts.FAKE_CLIENT_SSL_PROFILE_UUID, consts.FAKE_DEFAULT_CERTIFICATE_ID, client_auth='IGNORE', certificate_chain_depth=3, xyz='xyz' ) mock_update.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id'], body) def test_add_server_ssl_profile_binding(self): fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() body = { 'display_name': fake_virtual_server['display_name'], 'description': fake_virtual_server['description'], 'id': fake_virtual_server['id'], 'enabled': fake_virtual_server['enabled'], 'port': fake_virtual_server['port'], 'ip_protocol': fake_virtual_server['ip_protocol'], 'server_ssl_profile_binding': { 'ssl_profile_id': consts.FAKE_SERVER_SSL_PROFILE_UUID, 'server_auth': 'IGNORE', 'certificate_chain_depth': 3 } } with mock.patch.object(self.nsxlib.client, 'get') as mock_get, \ mock.patch.object(self.nsxlib.client, 'update') as mock_update: mock_get.return_value = fake_virtual_server vs_client = self.nsxlib.load_balancer.virtual_server vs_client.add_server_ssl_profile_binding( fake_virtual_server['id'], consts.FAKE_SERVER_SSL_PROFILE_UUID, server_auth='IGNORE', certificate_chain_depth=3, xyz='xyz') mock_update.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id'], body) class TestService(nsxlib_testcase.NsxClientTestCase): def test_create_service(self): fake_service = consts.FAKE_SERVICE.copy() body = { 'display_name': fake_service['display_name'], 'description': fake_service['description'], 'enabled': fake_service['enabled'], 'attachment': fake_service['attachment'], 'tags': tags } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.service.create( body['display_name'], body['description'], tags, enabled=body['enabled'], attachment=body['attachment']) create.assert_called_with('loadbalancer/services', body) def test_list_services(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.service.list() list_call.assert_called_with(resource='loadbalancer/services') def test_get_service(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_service = consts.FAKE_SERVICE.copy() self.nsxlib.load_balancer.service.get(fake_service['id']) get.assert_called_with( 'loadbalancer/services/%s' % fake_service['id']) def test_get_stats(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_service = consts.FAKE_SERVICE.copy() self.nsxlib.load_balancer.service.get_stats(fake_service['id']) get.assert_called_with( 'loadbalancer/services/%s/statistics?source=realtime' % fake_service['id']) def test_get_status(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_service = consts.FAKE_SERVICE.copy() self.nsxlib.load_balancer.service.get_status(fake_service['id']) get.assert_called_with( 'loadbalancer/services/%s/status' % fake_service['id']) def test_delete_service(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_service = consts.FAKE_SERVICE.copy() self.nsxlib.load_balancer.service.delete(fake_service['id']) delete.assert_called_with( 'loadbalancer/services/%s' % fake_service['id']) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_cluster.py0000666000175100017510000002344513244535763025066 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import unittest import mock from requests import exceptions as requests_exceptions from requests import models import six.moves.urllib.parse as urlparse from vmware_nsxlib.tests.unit.v3 import mocks from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import client from vmware_nsxlib.v3 import client_cert from vmware_nsxlib.v3 import cluster from vmware_nsxlib.v3 import exceptions as nsxlib_exc def _validate_conn_up(*args, **kwargs): return def _validate_conn_down(*args, **kwargs): raise requests_exceptions.ConnectionError() def get_sess_create_resp(): sess_create_response = models.Response() sess_create_response.status_code = 200 sess_create_response.headers = {'Set-Cookie': 'JSESSIONID=abc;'} return sess_create_response class RequestsHTTPProviderTestCase(unittest.TestCase): def test_new_connection(self): mock_api = mock.Mock() mock_api.nsxlib_config = mock.Mock() mock_api.nsxlib_config.username = 'nsxuser' mock_api.nsxlib_config.password = 'nsxpassword' mock_api.nsxlib_config.retries = 100 mock_api.nsxlib_config.insecure = True mock_api.nsxlib_config.ca_file = None mock_api.nsxlib_config.http_timeout = 99 mock_api.nsxlib_config.conn_idle_timeout = 39 mock_api.nsxlib_config.client_cert_provider = None provider = cluster.NSXRequestsHTTPProvider() with mock.patch.object(cluster.TimeoutSession, 'request', return_value=get_sess_create_resp()): session = provider.new_connection( mock_api, cluster.Provider('9.8.7.6', 'https://9.8.7.6', 'nsxuser', 'nsxpassword', None)) self.assertEqual(('nsxuser', 'nsxpassword'), session.auth) self.assertFalse(session.verify) self.assertIsNone(session.cert) self.assertEqual(100, session.adapters['https://'].max_retries.total) self.assertEqual(99, session.timeout) def test_new_connection_with_client_auth(self): mock_api = mock.Mock() mock_api.nsxlib_config = mock.Mock() mock_api.nsxlib_config.retries = 100 mock_api.nsxlib_config.insecure = True mock_api.nsxlib_config.ca_file = None mock_api.nsxlib_config.http_timeout = 99 mock_api.nsxlib_config.conn_idle_timeout = 39 cert_provider_inst = client_cert.ClientCertProvider( '/etc/cert.pem') mock_api.nsxlib_config.client_cert_provider = cert_provider_inst provider = cluster.NSXRequestsHTTPProvider() with mock.patch.object(cluster.TimeoutSession, 'request', return_value=get_sess_create_resp()): session = provider.new_connection( mock_api, cluster.Provider('9.8.7.6', 'https://9.8.7.6', None, None, None)) self.assertIsNone(session.auth) self.assertFalse(session.verify) self.assertEqual(cert_provider_inst, session.cert_provider) self.assertEqual(99, session.timeout) def test_validate_connection(self): mock_conn = mocks.MockRequestSessionApi() mock_conn.default_headers = {} mock_ep = mock.Mock() mock_ep.provider.url = 'https://1.2.3.4' mock_cluster = mock.Mock() mock_cluster.nsxlib_config = mock.Mock() mock_cluster.nsxlib_config.url_base = 'abc' mock_cluster.nsxlib_config.keepalive_section = 'transport-zones' provider = cluster.NSXRequestsHTTPProvider() self.assertRaises(nsxlib_exc.ResourceNotFound, provider.validate_connection, mock_cluster, mock_ep, mock_conn) with mock.patch.object(client.JSONRESTClient, "get", return_value={'result_count': 1}): provider.validate_connection(mock_cluster, mock_ep, mock_conn) class NsxV3ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase): def _assert_providers(self, cluster_api, provider_tuples): self.assertEqual(len(cluster_api.providers), len(provider_tuples)) def _assert_provider(pid, purl): for provider in cluster_api.providers: if provider.id == pid and provider.url == purl: return self.fail("Provider: %s not found" % pid) for provider_tuple in provider_tuples: _assert_provider(provider_tuple[0], provider_tuple[1]) def test_conf_providers_no_scheme(self): conf_managers = ['8.9.10.11', '9.10.11.12:4433'] api = self.new_mocked_cluster(conf_managers, _validate_conn_up) self._assert_providers( api, [(p, "https://%s" % p) for p in conf_managers]) def test_conf_providers_with_scheme(self): conf_managers = ['http://8.9.10.11:8080', 'https://9.10.11.12:4433'] api = self.new_mocked_cluster(conf_managers, _validate_conn_up) self._assert_providers( api, [(urlparse.urlparse(p).netloc, p) for p in conf_managers]) def test_http_retries(self): api = self.mock_nsx_clustered_api(retries=9) with api.endpoints['1.2.3.4'].pool.item() as session: self.assertEqual( session.adapters['https://'].max_retries.total, 9) def test_conns_per_pool(self): conf_managers = ['8.9.10.11', '9.10.11.12:4433'] api = self.new_mocked_cluster( conf_managers, _validate_conn_up, concurrent_connections=11) for ep_id, ep in api.endpoints.items(): self.assertEqual(ep.pool.max_size, 11) def test_timeouts(self): api = self.mock_nsx_clustered_api(http_read_timeout=37, http_timeout=7) api.get('logical-ports') mock_call = api.recorded_calls.method_calls[0] name, args, kwargs = mock_call self.assertEqual(kwargs['timeout'], (7, 37)) # Repeat the above tests with client cert present # in NsxLib initialization class NsxV3ClusteredAPIWithClientCertTestCase(NsxV3ClusteredAPITestCase): def use_client_cert_auth(self): return True class ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase): def _test_health(self, validate_fn, expected_health): conf_managers = ['8.9.10.11', '9.10.11.12'] api = self.new_mocked_cluster(conf_managers, validate_fn) self.assertEqual(expected_health, api.health) def test_orange_health(self): def _validate(cluster_api, endpoint, conn): if endpoint.provider.id == '8.9.10.11': raise Exception() self._test_health(_validate, cluster.ClusterHealth.ORANGE) def test_green_health(self): self._test_health(_validate_conn_up, cluster.ClusterHealth.GREEN) def test_red_health(self): self._test_health(_validate_conn_down, cluster.ClusterHealth.RED) def test_cluster_validate_with_exception(self): conf_managers = ['8.9.10.11', '9.10.11.12', '10.11.12.13'] api = self.new_mocked_cluster(conf_managers, _validate_conn_down) self.assertEqual(3, len(api.endpoints)) self.assertRaises(nsxlib_exc.ServiceClusterUnavailable, api.get, 'api/v1/transport-zones') def test_cluster_proxy_stale_revision(self): def stale_revision(): raise nsxlib_exc.StaleRevision(manager='1.1.1.1', operation='whatever') api = self.mock_nsx_clustered_api(session_response=stale_revision) self.assertRaises(nsxlib_exc.StaleRevision, api.get, 'api/v1/transport-zones') def test_cluster_proxy_connection_error(self): def connect_timeout(): raise requests_exceptions.ConnectTimeout() api = self.mock_nsx_clustered_api(session_response=connect_timeout) api._validate = mock.Mock() self.assertRaises(nsxlib_exc.ServiceClusterUnavailable, api.get, 'api/v1/transport-zones') def test_cluster_round_robin_servicing(self): conf_managers = ['8.9.10.11', '9.10.11.12', '10.11.12.13'] api = self.mock_nsx_clustered_api(nsx_api_managers=conf_managers) api._validate = mock.Mock() eps = list(api._endpoints.values()) def _get_schedule(num_eps): return [api._select_endpoint() for i in range(num_eps)] self.assertEqual(_get_schedule(3), eps) self.assertEqual(_get_schedule(6), [eps[0], eps[1], eps[2], eps[0], eps[1], eps[2]]) eps[0]._state = cluster.EndpointState.DOWN self.assertEqual(_get_schedule(4), [eps[1], eps[2], eps[1], eps[2]]) eps[1]._state = cluster.EndpointState.DOWN self.assertEqual(_get_schedule(2), [eps[2], eps[2]]) eps[0]._state = cluster.EndpointState.UP self.assertEqual(_get_schedule(4), [eps[0], eps[2], eps[0], eps[2]]) def test_reinitialize_cluster(self): with mock.patch.object(cluster.TimeoutSession, 'request', return_value=get_sess_create_resp()): api = self.mock_nsx_clustered_api() # just make sure this api is defined, and does not crash api._reinit_cluster() vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_constants.py0000666000175100017510000004235513244535763025422 0ustar zuulzuul00000000000000# Copyright (c) 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils FAKE_NAME = "fake_name" FAKE_SWITCH_UUID = uuidutils.generate_uuid() FAKE_IP_SET_UUID = uuidutils.generate_uuid() FAKE_PORT_UUID = uuidutils.generate_uuid() FAKE_PORT = { "id": FAKE_PORT_UUID, "display_name": FAKE_NAME, "resource_type": "LogicalPort", "address_bindings": [], "logical_switch_id": FAKE_SWITCH_UUID, "admin_state": "UP", "attachment": { "id": "9ca8d413-f7bf-4276-b4c9-62f42516bdb2", "attachment_type": "VIF" }, "switching_profile_ids": [ { "value": "64814784-7896-3901-9741-badeff705639", "key": "IpDiscoverySwitchingProfile" }, { "value": "fad98876-d7ff-11e4-b9d6-1681e6b88ec1", "key": "SpoofGuardSwitchingProfile" }, { "value": "93b4b7e8-f116-415d-a50c-3364611b5d09", "key": "PortMirroringSwitchingProfile" }, { "value": "fbc4fb17-83d9-4b53-a286-ccdf04301888", "key": "SwitchSecuritySwitchingProfile" }, { "value": "f313290b-eba8-4262-bd93-fab5026e9495", "key": "QosSwitchingProfile" } ] } FAKE_CONTAINER_PORT = { "id": FAKE_PORT_UUID, "display_name": FAKE_NAME, "resource_type": "LogicalPort", "address_bindings": [ { "ip_address": "192.168.1.110", "mac_address": "aa:bb:cc:dd:ee:ff" } ], "logical_switch_id": FAKE_SWITCH_UUID, "admin_state": "UP", "attachment": { "id": "9ca8d413-f7bf-4276-b4c9-62f42516bdb2", "attachment_type": "VIF", "context": { "vlan_tag": 122, "container_host_vif_id": "c6f817a0-4e36-421e-98a6-8a2faed880bc", "resource_type": "VifAttachmentContext", "app_id": "container-1", "vif_type": "CHILD", "allocate_addresses": "Both", } }, "switching_profile_ids": [ { "value": "64814784-7896-3901-9741-badeff705639", "key": "IpDiscoverySwitchingProfile" }, { "value": "fad98876-d7ff-11e4-b9d6-1681e6b88ec1", "key": "SpoofGuardSwitchingProfile" }, { "value": "93b4b7e8-f116-415d-a50c-3364611b5d09", "key": "PortMirroringSwitchingProfile" }, { "value": "fbc4fb17-83d9-4b53-a286-ccdf04301888", "key": "SwitchSecuritySwitchingProfile" }, { "value": "f313290b-eba8-4262-bd93-fab5026e9495", "key": "QosSwitchingProfile" } ] } FAKE_ROUTER_UUID = uuidutils.generate_uuid() FAKE_ROUTER_FW_SEC_UUID = uuidutils.generate_uuid() FAKE_ROUTER = { "resource_type": "LogicalRouter", "revision": 0, "id": FAKE_ROUTER_UUID, "display_name": FAKE_NAME, "firewall_sections": [{ "is_valid": True, "target_type": "FirewallSection", "target_id": FAKE_ROUTER_FW_SEC_UUID }], } FAKE_ROUTER_PORT_UUID = uuidutils.generate_uuid() FAKE_ROUTER_PORT = { "resource_type": "LogicalRouterLinkPort", "revision": 0, "id": FAKE_ROUTER_PORT_UUID, "display_name": FAKE_NAME, "logical_router_id": FAKE_ROUTER_UUID } FAKE_QOS_PROFILE = { "resource_type": "QosSwitchingProfile", "id": uuidutils.generate_uuid(), "display_name": FAKE_NAME, "system_defined": False, "dscp": { "priority": 25, "mode": "UNTRUSTED" }, "tags": [], "description": FAKE_NAME, "class_of_service": 0, "shaper_configuration": [ { "resource_type": "IngressRateShaper", "enabled": False, "peak_bandwidth_mbps": 0, "burst_size_bytes": 0, "average_bandwidth_mbps": 0 }, { "resource_type": "IngressBroadcastRateShaper", "enabled": False, "peak_bandwidth_kbps": 0, "average_bandwidth_kbps": 0, "burst_size_bytes": 0 }, { "resource_type": "EgressRateShaper", "enabled": False, "peak_bandwidth_mbps": 0, "burst_size_bytes": 0, "average_bandwidth_mbps": 0 } ], "_last_modified_user": "admin", "_last_modified_time": 1438383180608, "_create_time": 1438383180608, "_create_user": "admin", "_revision": 0 } FAKE_IP_POOL_UUID = uuidutils.generate_uuid() FAKE_IP_POOL = { "_revision": 0, "id": FAKE_IP_POOL_UUID, "display_name": "IPPool-IPV6-1", "description": "IPPool-IPV6-1 Description", "subnets": [{ "dns_nameservers": [ "2002:a70:cbfa:1:1:1:1:1" ], "allocation_ranges": [{ "start": "2002:a70:cbfa:0:0:0:0:1", "end": "2002:a70:cbfa:0:0:0:0:5" }], "gateway_ip": "2002:a80:cbfa:0:0:0:0:255", "cidr": "2002:a70:cbfa:0:0:0:0:0/24" }], } FAKE_IP_SET = { "id": FAKE_IP_SET_UUID, "display_name": FAKE_NAME, "resource_type": "IPSet", "ip_addresses": [ "192.168.1.1-192.168.1.6", "192.168.1.8", "192.168.4.8/24"] } FAKE_APPLICATION_PROFILE_UUID = uuidutils.generate_uuid() FAKE_APPLICATION_PROFILE = { "resource_type": "LbHttpProfile", "description": "my http profile", "id": FAKE_APPLICATION_PROFILE_UUID, "display_name": "httpprofile1", "ntlm": False, "request_header_size": 1024, "http_redirect_to_https": False, "idle_timeout": 1800, "x_forwarded_for": "INSERT", "_create_user": "admin", "_create_time": 1493834124218, "_last_modified_user": "admin", "_last_modified_time": 1493834124218, "_system_owned": False, "_revision": 0 } FAKE_PERSISTENCE_PROFILE_UUID = uuidutils.generate_uuid() FAKE_PERSISTENCE_PROFILE = { "resource_type": "LbCookiePersistenceProfile", "description": "cookie persistence", "id": FAKE_PERSISTENCE_PROFILE_UUID, "display_name": "cookiePersistence", "cookie_mode": "INSERT", "cookie_garble": True, "cookie_fallback": True, "cookie_name": "ABC", "_create_user": "admin", "_create_time": 1493837413804, "_last_modified_user": "admin", "_last_modified_time": 1493837413804, "_system_owned": False, "_revision": 0 } FAKE_RULE_UUID = uuidutils.generate_uuid() FAKE_RULE = { "resource_type": "LbRule", "description": "LbRule to route login requests to dedicated pool", "id": FAKE_RULE_UUID, "display_name": "LoginRouteRule", "phase": "HTTP_FORWARDING", "match_strategy": "ALL", "match_conditions": [ { "type": "LbHttpRequestUriCondition", "uri": "/login" } ], "actions": [ { "type": "LbSelectPoolAction", "pool_id": "54411c58-046c-4236-8ff1-e1e1aad3e873" } ] } FAKE_CLIENT_SSL_PROFILE_UUID = uuidutils.generate_uuid() FAKE_CLIENT_SSL_PROFILE = { "display_name": "clientSslProfile1", "description": "client ssl profile", "id": FAKE_CLIENT_SSL_PROFILE_UUID, "prefer_server_ciphers": False, "session_cache_enabled": False, "session_cache_timeout": 300 } FAKE_SERVER_SSL_PROFILE_UUID = uuidutils.generate_uuid() FAKE_SERVER_SSL_PROFILE = { "display_name": "serverSslProfile1", "description": "server ssl profile", "id": FAKE_SERVER_SSL_PROFILE_UUID, "session_cache_enabled": False } FAKE_MONITOR_UUID = uuidutils.generate_uuid() FAKE_MONITOR = { "display_name": "httpmonitor1", "description": "my http monitor", "id": FAKE_MONITOR_UUID, "resource_type": "LbHttpMonitor", "interval": 5, "rise_count": 3, "fall_count": 3, "timeout": 15, "request_url": "/", "request_method": "GET", "monitor_port": "80" } FAKE_POOL_UUID = uuidutils.generate_uuid() FAKE_POOL = { "display_name": "httppool1", "description": "my http pool", "id": FAKE_POOL_UUID, "algorithm": "ROUND_ROBIN", } FAKE_VIRTUAL_SERVER_UUID = uuidutils.generate_uuid() FAKE_VIRTUAL_SERVER = { "display_name": "httpvirtualserver1", "description": "my http virtual server", "id": FAKE_VIRTUAL_SERVER_UUID, "enabled": True, "port": "80", "ip_protocol": "TCP", } FAKE_SERVICE_UUID = uuidutils.generate_uuid() FAKE_SERVICE = { "display_name": "my LB web service1", "description": "my LB web service", "id": FAKE_SERVICE_UUID, "enabled": True, "attachment": { "target_id": FAKE_ROUTER_UUID, "target_type": "LogicalRouter" } } FAKE_TZ_UUID = uuidutils.generate_uuid() FAKE_TZ = { "resource_type": "TransportZone", "revision": 0, "id": FAKE_TZ_UUID, "display_name": FAKE_NAME, "transport_type": "OVERLAY", "host_switch_mode": "STANDARD" } FAKE_MD_UUID = uuidutils.generate_uuid() FAKE_URL = "http://7.7.7.70:3500/abc" FAKE_MD = { "resource_type": "MetadataProxy", "revision": 0, "id": FAKE_MD_UUID, "metadata_server_url": FAKE_URL } FAKE_RELAY_UUID = uuidutils.generate_uuid() FAKE_RELAY_SERVER = "6.6.6.6" FAKE_RELAY_PROFILE = { "id": FAKE_RELAY_UUID, "display_name": "dummy", "server_addresses": [FAKE_RELAY_SERVER], "resource_type": "DhcpRelayProfile" } FAKE_RELAY_SERVICE_UUID = uuidutils.generate_uuid() FAKE_RELAY_SERVICE = { "id": FAKE_RELAY_SERVICE_UUID, "display_name": "dummy", "dhcp_relay_profile_id": FAKE_RELAY_UUID, "resource_type": "DhcpRelayService" } FAKE_DEFAULT_CERTIFICATE_ID = uuidutils.generate_uuid() FAKE_CERT_LIST = [ {'pem_encoded': '-----BEGINCERTIFICATE-----\n' 'MIIDmzCCAoOgAwIBAgIGAV8Rg5RhMA0GCSqGSIb3DQEBCwUAMHoxJzA' 'lBgNVBAMM\nHlZNd2FyZSBOU1hBUEkgVHJ1c3QgTWFuYWdlbWVudDET' 'MBEGA1UECgwKVk13YXJl\nIEluYzEMMAoGA1UECwwDTlNYMQswCQYDV' 'QQGEwJVUzELMAkGA1UECAwCQ0ExEjAQ\nBgNVBAcMCVBhbG8gQWx0bz' 'AeFw0xNzEwMTIxNjU1NTZaFw0yNzEwMTAxNjU1NTZa\nMHoxJzAlBgN' 'VBAMMHlZNd2FyZSBOU1hBUEkgVHJ1c3QgTWFuYWdlbWVudDETMBEG\n' 'A1UECgwKVk13YXJlIEluYzEMMAoGA1UECwwDTlNYMQswCQYDVQQGEwJ' 'VUzELMAkG\nA1UECAwCQ0ExEjAQBgNVBAcMCVBhbG8gQWx0bzCCASIw' 'DQYJKoZIhvcNAQEBBQAD\nggEPADCCAQoCggEBAJuRUtmJLamkJyW3X' 'qpilC7o0dxp3l5vlWWCjnbz3cl+/5Fd\nnpd8dTco9UMeSv5bPBGvLm' 'qSPBZwTYCO3JAowF7aS3qPPWo8tNYWqlMfrZqo5Phc\nGRwtTkfK+GO' '2VN6EG7kTewjrNMW7EAA/68fsNk0QeYIkDJw4ozaX6MhyNDjR+20M\n' '0urN5DEt0ucNZfuQ0pfwYwZoAULHJJODRgUzQG7OT0u64m4ugjQ0uxD' '268aV2IFU\ntSln5HAw2IHXsSn+TVCxInDb+3Uj5E0gjANk5xH7yumi' 'mFXC5DGVvdi1vHdQwZzi\nEklX2Gj2+qEiLul9Jr6BjMM+cor3ediuL' 'KfC05kCAwEAAaMnMCUwDgYDVR0PAQH/\nBAQDAgeAMBMGA1UdJQQMMA' 'oGCCsGAQUFBwMCMA0GCSqGSIb3DQEBCwUAA4IBAQBb\nk498dN3Wid9' '0NIfEJOtTuPtMBSLbCuXgeAqmxGgAB1mYyXCSk50AzkzDZqdt7J9Z\n' 'm3LMe1mfyzfD5zboGiSbb6OrMac3RO9B3nFl2h2pkJtZQAqQDxrighQ' 'qodlbLCum\nw3juA9AIx+YveAOP8mwldo6XJX4ogIXiTol6m1EkOmJ/' '6YnFiVN/BloBhSbbv2zJ\nhk9LKwCjZ23hkWj74zQY94iknhcS3VxEt' 'FlEyk1VrRGkmFfn618JCOCt+8Zuw1M3\nlkn4tA81IVjbj/uWaRIDY1' 'gSfltVX14vNy5fbtCHlQiJgI/A4I4z8UNaktkLO/ie\ntiAwSni6x7S' 'ZWsf3Sy/P\n-----END CERTIFICATE-----\n', 'id': 'c863428e-bfce-4a93-9341-6c9b9ec07657', 'resource_type': 'certificate_self_signed'}, {'pem_encoded': '-----BEGIN CERTIFICATE-----\n' 'MIIEgzCCAmsCCQCmkvlHE5M1KTANBgkqhkiG9w0BAQsFADB0MQswCQY' 'DVQQGEwJV\nUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJ' 'UGFsbyBBbHRvMQ8wDQYD\nVQQKDAZWTXdhcmUxDTALBgNVBAsMBE5TQ' 'lUxHDAaBgNVBAMME1ZNd2FyZSBOU0JV\nIFJvb3QgQ0EwHhcNMTcxMD' 'EyMjI0NzU0WhcNMTgxMDA3MjI0NzU0WjCBkjELMAkG\nA1UEBhMCVVM' 'xEzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAcMCVBhbG8gQWx0\n' 'bzEPMA0GA1UECgwGVk13YXJlMQ0wCwYDVQQLDAROU0JVMRgwFgYDVQQ' 'DDA93d3cu\nZXhhbXBsZS5jb20xIDAeBgkqhkiG9w0BCQEWEWFkbWlu' 'QGV4YW1wbGUuY29tMIIB\nIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBC' 'gKCAQEA7F2TheIEy9g9CwVMlxlTuZqQ\n6QbJdymQw9RQwR0O09wsbS' 'jx4XJtzwDjCX7aZ1ON7eZBXXNkQx6nWlkYrS7zmR4T\npWmLiIYQWpV' 'H6oIzgEEaeabFOqfs5b0zbYZN868fcFsPVGGgizfKO6I+gJwp5sii\n' 'IQvBa9hCKlXRwbGYYeywThfMf4plxzj/YDIIBkM+4qck58sr7Nhjb5J' 'FD60LrOJK\nSdqzCSinsYlx5eZ4f5GjpMc7euAsS5UVdZFV13CysK83' '6h/KHYyz/LXTjGpGbDd7\n2wPSUZRkjY58I5FU0hVeH3zMoaVJBfXmj' 'X8TVjR2Jk+NcNr5Azmgn3BC8pTqowID\nAQABMA0GCSqGSIb3DQEBCw' 'UAA4ICAQBtGBazJXwQVtIqBeyzmoQDWNctBc5VSTEq\nGT3dAyy0LYJ' 'Tm+4aaCVAY4uiS6HTzb4MQR+EtGxN/1fLyFgs/V3oQ+bRh+aWS85u\n' 'J4sZL87EtO7VlXLt8mAjqrAAJwwywMhbw+PlGVjhJgp8vAjpbDiccmb' 'QRN/noSSF\nTCqUDFtsP4yyf+b8xbipVGvmTLrqTX1Dt9iQKKKD8QYi' 'GG0Bt2t38YVc8hEQg3TC\n8xjs1OcyYN+oCRHj+Nunib9fH8OGMjn3j' 'OpVAJGADpwmTc0rbwkTFtTUweT5HSCD\nrzLZNI0DwjLeR8mDZRMpjN' 'tYaCSERbpzhEUFWEIXuVT3GdrgsPGcNZi520cyeUyz\nTC9ixXgkiy4' 'yS8zqca0v2mryrf9MxhYKu2nek+0GB4WodHO904Tlbcdz9wHnCi4f\n' '6VdS7/lKncvj8yJrqE7yQtzLlNGjBUJNajp/jchzlHpsYLCiuIX7fyh' '6Z+cQVwjJ\nSWkf7yuOO+jEw45A0Jxtyl3aLf5aoptmzLOKLFznscSg' 'tkFvtdh4O/APxORxgPKc\n1WiQCpUecsmxc4qMRulh31tVBFi6uIsKY' 'vrUkP5JaxIxV/nKGBDJyzKbAZWLqdnm\nNd3coEUMwd16vr57QJatJb' 'To/wVMMbvW3vqVy0AuXReHCPVTDF5+vnsMGXK/IV7w\nLzulLswFmA=' '=\n-----END CERTIFICATE-----\n', 'id': 'e4b0ab75-ce14-456e-8f5f-071303dd6275', 'resource_type': 'certificate_signed'} ] FAKE_CERT_PEM = ( "-----BEGIN CERTIFICATE-----\n" "MIIEgzCCAmsCCQCmkvlHE5M1KTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV\n" "UzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMQ8wDQYD\n" "VQQKDAZWTXdhcmUxDTALBgNVBAsMBE5TQlUxHDAaBgNVBAMME1ZNd2FyZSBOU0JV\n" "IFJvb3QgQ0EwHhcNMTcxMDEyMjI0NzU0WhcNMTgxMDA3MjI0NzU0WjCBkjELMAkG\n" "A1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAcMCVBhbG8gQWx0\n" "bzEPMA0GA1UECgwGVk13YXJlMQ0wCwYDVQQLDAROU0JVMRgwFgYDVQQDDA93d3cu\n" "ZXhhbXBsZS5jb20xIDAeBgkqhkiG9w0BCQEWEWFkbWluQGV4YW1wbGUuY29tMIIB\n" "IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA7F2TheIEy9g9CwVMlxlTuZqQ\n" "6QbJdymQw9RQwR0O09wsbSjx4XJtzwDjCX7aZ1ON7eZBXXNkQx6nWlkYrS7zmR4T\n" "pWmLiIYQWpVH6oIzgEEaeabFOqfs5b0zbYZN868fcFsPVGGgizfKO6I+gJwp5sii\n" "IQvBa9hCKlXRwbGYYeywThfMf4plxzj/YDIIBkM+4qck58sr7Nhjb5JFD60LrOJK\n" "SdqzCSinsYlx5eZ4f5GjpMc7euAsS5UVdZFV13CysK836h/KHYyz/LXTjGpGbDd7\n" "2wPSUZRkjY58I5FU0hVeH3zMoaVJBfXmjX8TVjR2Jk+NcNr5Azmgn3BC8pTqowID\n" "AQABMA0GCSqGSIb3DQEBCwUAA4ICAQBtGBazJXwQVtIqBeyzmoQDWNctBc5VSTEq\n" "GT3dAyy0LYJTm+4aaCVAY4uiS6HTzb4MQR+EtGxN/1fLyFgs/V3oQ+bRh+aWS85u\n" "J4sZL87EtO7VlXLt8mAjqrAAJwwywMhbw+PlGVjhJgp8vAjpbDiccmbQRN/noSSF\n" "TCqUDFtsP4yyf+b8xbipVGvmTLrqTX1Dt9iQKKKD8QYiGG0Bt2t38YVc8hEQg3TC\n" "8xjs1OcyYN+oCRHj+Nunib9fH8OGMjn3jOpVAJGADpwmTc0rbwkTFtTUweT5HSCD\n" "rzLZNI0DwjLeR8mDZRMpjNtYaCSERbpzhEUFWEIXuVT3GdrgsPGcNZi520cyeUyz\n" "TC9ixXgkiy4yS8zqca0v2mryrf9MxhYKu2nek+0GB4WodHO904Tlbcdz9wHnCi4f\n" "6VdS7/lKncvj8yJrqE7yQtzLlNGjBUJNajp/jchzlHpsYLCiuIX7fyh6Z+cQVwjJ\n" "SWkf7yuOO+jEw45A0Jxtyl3aLf5aoptmzLOKLFznscSgtkFvtdh4O/APxORxgPKc\n" "1WiQCpUecsmxc4qMRulh31tVBFi6uIsKYvrUkP5JaxIxV/nKGBDJyzKbAZWLqdnm\n" "Nd3coEUMwd16vr57QJatJbTo/wVMMbvW3vqVy0AuXReHCPVTDF5+vnsMGXK/IV7w\n" "LzulLswFmA==\n" "-----END CERTIFICATE-----\n") FAKE_DPD_ID = "c933402b-f111-4634-9d66-cc8fffde0f65" FAKE_DPD = { "resource_type": "IPSecVPNDPDProfile", "description": "neutron dpd profile", "id": FAKE_DPD_ID, "display_name": "con1-dpd-profile", "enabled": True, "timeout": 120, } FAKE_PEP_ID = "a7b2915c-2041-4a33-9ea7-9d22b67bf38e" FAKE_PEP = { "resource_type": "IPSecVPNPeerEndpoint", "id": FAKE_PEP_ID, "display_name": "con1", "connection_initiation_mode": "INITIATOR", "authentication_mode": "PSK", "ipsec_tunnel_profile_id": "76e3707d-22e5-4e36-a9ef-b568215e2481", "dpd_profile_id": "04191f5f-3bdd-4ec1-ae56-154b06778d4f", "ike_profile_id": "df386534-5cec-49b4-9c21-4c212cba3cbf", "peer_address": "172.24.4.233", "peer_id": "172.24.4.233" } FAKE_LEP_ID = "cb57de72-4adb-4dad-9abc-685f9f1d0265" FAKE_LEP = { "resource_type": "IPSecVPNLocalEndpoint", "description": "XXX", "id": FAKE_LEP_ID, "display_name": "XXX", "local_id": "1.1.1.1", "ipsec_vpn_service_id": {"target_id": "aca38a11-981b-46d8-9e2c-9bedc0d96794"}, "local_address": "1.1.1.1", "trust_ca_ids": [], "trust_crl_ids": [], } vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_qos_switching_profile.py0000666000175100017510000002736013244535763030006 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import mock from oslo_log import log from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_constants from vmware_nsxlib.v3 import nsx_constants LOG = log.getLogger(__name__) class NsxLibQosTestCase(nsxlib_testcase.NsxClientTestCase): def _body(self, qos_marking=None, dscp=None, description=test_constants.FAKE_NAME): body = { "resource_type": "QosSwitchingProfile", "tags": [] } if qos_marking: body = self.nsxlib.qos_switching_profile._update_dscp_in_args( body, qos_marking, dscp) body["display_name"] = test_constants.FAKE_NAME body["description"] = description return body def _body_with_shaping(self, shaping_enabled=False, burst_size=None, peak_bandwidth=None, average_bandwidth=None, description=test_constants.FAKE_NAME, qos_marking=None, dscp=0, direction=nsx_constants.EGRESS, body=None): if body is None: body = copy.deepcopy(test_constants.FAKE_QOS_PROFILE) body["display_name"] = test_constants.FAKE_NAME body["description"] = description resource_type = (nsx_constants.EGRESS_SHAPING if direction == nsx_constants.EGRESS else nsx_constants.INGRESS_SHAPING) for shaper in body["shaper_configuration"]: if shaper["resource_type"] == resource_type: shaper["enabled"] = shaping_enabled if burst_size: shaper["burst_size_bytes"] = burst_size if peak_bandwidth: shaper["peak_bandwidth_mbps"] = peak_bandwidth if average_bandwidth: shaper["average_bandwidth_mbps"] = average_bandwidth break if qos_marking: body = self.nsxlib.qos_switching_profile._update_dscp_in_args( body, qos_marking, dscp) return body def test_create_qos_switching_profile(self): """Test creating a qos-switching profile returns the correct response """ with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.qos_switching_profile.create( tags=[], name=test_constants.FAKE_NAME, description=test_constants.FAKE_NAME) create.assert_called_with( 'switching-profiles', self._body()) def test_update_qos_switching_profile(self): """Test updating a qos-switching profile returns the correct response """ original_profile = self._body() new_description = "Test" with mock.patch.object(self.nsxlib.client, 'get', return_value=original_profile): with mock.patch.object(self.nsxlib.client, 'update') as update: # update the description of the profile self.nsxlib.qos_switching_profile.update( test_constants.FAKE_QOS_PROFILE['id'], tags=[], description=new_description) update.assert_called_with( 'switching-profiles/%s' % test_constants.FAKE_QOS_PROFILE['id'], self._body(description=new_description), headers=None) def _enable_qos_switching_profile_shaping( self, direction=nsx_constants.EGRESS, new_burst_size=100): """Test updating a qos-switching profile returns the correct response """ original_burst = 10 original_profile = self._body_with_shaping(direction=direction, burst_size=original_burst) peak_bandwidth = 200 average_bandwidth = 300 qos_marking = "untrusted" dscp = 10 with mock.patch.object(self.nsxlib.client, 'get', return_value=original_profile): with mock.patch.object(self.nsxlib.client, 'update') as update: # update the bw shaping of the profile self.nsxlib.qos_switching_profile.update_shaping( test_constants.FAKE_QOS_PROFILE['id'], shaping_enabled=True, burst_size=new_burst_size, peak_bandwidth=peak_bandwidth, average_bandwidth=average_bandwidth, qos_marking=qos_marking, dscp=dscp, direction=direction) actual_body = copy.deepcopy(update.call_args[0][1]) actual_path = update.call_args[0][0] expected_path = ('switching-profiles/%s' % test_constants.FAKE_QOS_PROFILE['id']) expected_burst = (new_burst_size if new_burst_size is not None else original_burst) expected_body = self._body_with_shaping( shaping_enabled=True, burst_size=expected_burst, peak_bandwidth=peak_bandwidth, average_bandwidth=average_bandwidth, qos_marking="untrusted", dscp=10, direction=direction) self.assertEqual(expected_path, actual_path) self.assertEqual(expected_body, actual_body) def test_enable_qos_switching_profile_egress_shaping(self): self._enable_qos_switching_profile_shaping( direction=nsx_constants.EGRESS) def test_enable_qos_switching_profile_ingress_shaping(self): self._enable_qos_switching_profile_shaping( direction=nsx_constants.INGRESS) def test_update_qos_switching_profile_with_burst_size(self): self._enable_qos_switching_profile_shaping( direction=nsx_constants.EGRESS, new_burst_size=101) def test_update_qos_switching_profile_without_burst_size(self): self._enable_qos_switching_profile_shaping( direction=nsx_constants.EGRESS, new_burst_size=None) def test_update_qos_switching_profile_zero_burst_size(self): self._enable_qos_switching_profile_shaping( direction=nsx_constants.EGRESS, new_burst_size=0) def _disable_qos_switching_profile_shaping( self, direction=nsx_constants.EGRESS): """Test updating a qos-switching profile. Returns the correct response """ burst_size = 100 peak_bandwidth = 200 average_bandwidth = 300 original_profile = self._body_with_shaping( shaping_enabled=True, burst_size=burst_size, peak_bandwidth=peak_bandwidth, average_bandwidth=average_bandwidth, qos_marking="untrusted", dscp=10, direction=direction) with mock.patch.object(self.nsxlib.client, 'get', return_value=original_profile): with mock.patch.object(self.nsxlib.client, 'update') as update: # update the bw shaping of the profile self.nsxlib.qos_switching_profile.update_shaping( test_constants.FAKE_QOS_PROFILE['id'], shaping_enabled=False, qos_marking="trusted", direction=direction) actual_body = copy.deepcopy(update.call_args[0][1]) actual_path = update.call_args[0][0] expected_path = ('switching-profiles/%s' % test_constants.FAKE_QOS_PROFILE['id']) expected_body = self._body_with_shaping(qos_marking="trusted", direction=direction) self.assertEqual(expected_path, actual_path) self.assertEqual(expected_body, actual_body) def test_disable_qos_switching_profile_egress_shaping(self): self._disable_qos_switching_profile_shaping( direction=nsx_constants.EGRESS) def test_disable_qos_switching_profile_ingress_shaping(self): self._disable_qos_switching_profile_shaping( direction=nsx_constants.INGRESS) def test_delete_qos_switching_profile(self): """Test deleting qos-switching-profile""" with mock.patch.object(self.nsxlib.client, 'delete') as delete: self.nsxlib.qos_switching_profile.delete( test_constants.FAKE_QOS_PROFILE['id']) delete.assert_called_with( 'switching-profiles/%s' % test_constants.FAKE_QOS_PROFILE['id']) def test_qos_switching_profile_set_shaping(self): """Test updating a qos-switching profile returns the correct response """ egress_peak_bandwidth = 200 egress_average_bandwidth = 300 egress_burst_size = 500 ingress_peak_bandwidth = 100 ingress_average_bandwidth = 400 ingress_burst_size = 600 qos_marking = "untrusted" dscp = 10 original_profile = self._body_with_shaping() with mock.patch.object(self.nsxlib.client, 'get', return_value=original_profile): with mock.patch.object(self.nsxlib.client, 'update') as update: # update the bw shaping of the profile self.nsxlib.qos_switching_profile.set_profile_shaping( test_constants.FAKE_QOS_PROFILE['id'], ingress_bw_enabled=True, ingress_burst_size=ingress_burst_size, ingress_peak_bandwidth=ingress_peak_bandwidth, ingress_average_bandwidth=ingress_average_bandwidth, egress_bw_enabled=True, egress_burst_size=egress_burst_size, egress_peak_bandwidth=egress_peak_bandwidth, egress_average_bandwidth=egress_average_bandwidth, qos_marking=qos_marking, dscp=dscp) actual_body = copy.deepcopy(update.call_args[0][1]) actual_path = update.call_args[0][0] expected_path = ('switching-profiles/%s' % test_constants.FAKE_QOS_PROFILE['id']) expected_body = self._body_with_shaping( shaping_enabled=True, burst_size=egress_burst_size, peak_bandwidth=egress_peak_bandwidth, average_bandwidth=egress_average_bandwidth, qos_marking="untrusted", dscp=10, direction=nsx_constants.EGRESS) # Add the other direction to the body expected_body = self._body_with_shaping( shaping_enabled=True, burst_size=ingress_burst_size, peak_bandwidth=ingress_peak_bandwidth, average_bandwidth=ingress_average_bandwidth, direction=nsx_constants.INGRESS, body=expected_body) self.assertEqual(expected_path, actual_path) self.assertEqual(expected_body, actual_body) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/nsxlib_testcase.py0000666000175100017510000003532413244535763025537 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import unittest import mock from oslo_serialization import jsonutils from oslo_utils import uuidutils from requests import exceptions as requests_exceptions from requests import models from vmware_nsxlib import v3 from vmware_nsxlib.v3 import client as nsx_client from vmware_nsxlib.v3 import client_cert from vmware_nsxlib.v3 import cluster as nsx_cluster from vmware_nsxlib.v3 import config from vmware_nsxlib.v3 import utils NSX_USER = 'admin' NSX_PASSWORD = 'default' NSX_MANAGER = '1.2.3.4' NSX_INSECURE = False NSX_CERT = '/opt/stack/certs/nsx.pem' CLIENT_CERT = '/opt/stack/certs/client.pem' NSX_HTTP_RETRIES = 10 NSX_HTTP_TIMEOUT = 10 NSX_HTTP_READ_TIMEOUT = 180 NSX_CONCURENT_CONN = 10 NSX_CONN_IDLE_TIME = 10 NSX_MAX_ATTEMPTS = 10 PLUGIN_SCOPE = "plugin scope" PLUGIN_TAG = "plugin tag" PLUGIN_VER = "plugin ver" DNS_NAMESERVERS = ['1.1.1.1'] DNS_DOMAIN = 'openstacklocal' JSESSIONID = 'my_sess_id' def _mock_nsxlib(): def _return_id_key(*args, **kwargs): return {'id': uuidutils.generate_uuid()} def _mock_add_rules_in_section(*args): # NOTE(arosen): the code in the neutron plugin expects the # neutron rule id as the display_name. rules = args[0] return { 'rules': [ {'display_name': rule['display_name'], 'id': uuidutils.generate_uuid()} for rule in rules ]} def _mock_limits(*args): return utils.TagLimits(20, 40, 15) mocking = [] mocking.append(mock.patch( "vmware_nsxlib.v3.cluster.NSXRequestsHTTPProvider" ".validate_connection")) mocking.append(mock.patch( "vmware_nsxlib.v3.security.NsxLibNsGroup.create", side_effect=_return_id_key )) mocking.append(mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection.create_empty", side_effect=_return_id_key)) mocking.append(mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection.init_default", side_effect=_return_id_key)) mocking.append(mock.patch( "vmware_nsxlib.v3.security.NsxLibNsGroup.list")) mocking.append(mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection.add_rules", side_effect=_mock_add_rules_in_section)) mocking.append(mock.patch( ("vmware_nsxlib.v3.core_resources." "NsxLibTransportZone.get_id_by_name_or_id"), return_value=uuidutils.generate_uuid())) mocking.append(mock.patch( "vmware_nsxlib.v3.NsxLib.get_tag_limits", side_effect=_mock_limits)) for m in mocking: m.start() return mocking def get_default_nsxlib_config(): return config.NsxLibConfig( username=NSX_USER, password=NSX_PASSWORD, retries=NSX_HTTP_RETRIES, insecure=NSX_INSECURE, ca_file=NSX_CERT, concurrent_connections=NSX_CONCURENT_CONN, http_timeout=NSX_HTTP_TIMEOUT, http_read_timeout=NSX_HTTP_READ_TIMEOUT, conn_idle_timeout=NSX_CONN_IDLE_TIME, http_provider=None, nsx_api_managers=[], plugin_scope=PLUGIN_SCOPE, plugin_tag=PLUGIN_TAG, plugin_ver=PLUGIN_VER, dns_nameservers=DNS_NAMESERVERS, dns_domain=DNS_DOMAIN ) def get_nsxlib_config_with_client_cert(): return config.NsxLibConfig( client_cert_provider=client_cert.ClientCertProvider(CLIENT_CERT), retries=NSX_HTTP_RETRIES, insecure=NSX_INSECURE, ca_file=NSX_CERT, concurrent_connections=NSX_CONCURENT_CONN, http_timeout=NSX_HTTP_TIMEOUT, http_read_timeout=NSX_HTTP_READ_TIMEOUT, conn_idle_timeout=NSX_CONN_IDLE_TIME, http_provider=None, nsx_api_managers=[], plugin_scope=PLUGIN_SCOPE, plugin_tag=PLUGIN_TAG, plugin_ver=PLUGIN_VER) class NsxLibTestCase(unittest.TestCase): def use_client_cert_auth(self): return False def setUp(self, *args, **kwargs): super(NsxLibTestCase, self).setUp() self.mocking = _mock_nsxlib() if self.use_client_cert_auth(): nsxlib_config = get_nsxlib_config_with_client_cert() else: nsxlib_config = get_default_nsxlib_config() self.nsxlib = v3.NsxLib(nsxlib_config) # print diffs when assert comparisons fail self.maxDiff = None def tearDown(self, *args, **kwargs): # stop the mocks for m in self.mocking: m.stop() super(NsxLibTestCase, self).tearDown() class MemoryMockAPIProvider(nsx_cluster.AbstractHTTPProvider): """Acts as a HTTP provider for mocking which is backed by a MockRequestSessionApi. """ def __init__(self, mock_session_api): self._store = mock_session_api @property def provider_id(self): return "Memory mock API" def validate_connection(self, cluster_api, endpoint, conn): return def new_connection(self, cluster_api, provider): # all callers use the same backing return self._store def is_connection_exception(self, exception): return isinstance(exception, requests_exceptions.ConnectionError) class NsxClientTestCase(NsxLibTestCase): class MockNSXClusteredAPI(nsx_cluster.NSXClusteredAPI): def __init__( self, session_response=None, username=None, password=None, retries=None, insecure=None, ca_file=None, concurrent_connections=None, http_timeout=None, http_read_timeout=None, conn_idle_timeout=None, nsx_api_managers=None): nsxlib_config = config.NsxLibConfig( username=username or NSX_USER, password=password or NSX_PASSWORD, retries=retries or NSX_HTTP_RETRIES, insecure=insecure if insecure is not None else NSX_INSECURE, ca_file=ca_file or NSX_CERT, concurrent_connections=(concurrent_connections or NSX_CONCURENT_CONN), http_timeout=http_timeout or NSX_HTTP_TIMEOUT, http_read_timeout=http_read_timeout or NSX_HTTP_READ_TIMEOUT, conn_idle_timeout=conn_idle_timeout or NSX_CONN_IDLE_TIME, http_provider=NsxClientTestCase.MockHTTPProvider( session_response=session_response), nsx_api_managers=nsx_api_managers or [NSX_MANAGER], plugin_scope=PLUGIN_SCOPE, plugin_tag=PLUGIN_TAG, plugin_ver=PLUGIN_VER) super(NsxClientTestCase.MockNSXClusteredAPI, self).__init__( nsxlib_config) self._record = mock.Mock() def record_call(self, request, **kwargs): verb = request.method.lower() # filter out requests specific attributes checked_kwargs = copy.copy(kwargs) del checked_kwargs['proxies'] del checked_kwargs['stream'] if 'allow_redirects' in checked_kwargs: del checked_kwargs['allow_redirects'] for attr in ['url', 'body']: checked_kwargs[attr] = getattr(request, attr, None) # remove headers we don't need to verify checked_kwargs['headers'] = copy.copy(request.headers) for header in ['Accept-Encoding', 'User-Agent', 'Connection', 'Authorization', 'Content-Length']: if header in checked_kwargs['headers']: del checked_kwargs['headers'][header] checked_kwargs['headers'] = request.headers # record the call in the mock object method = getattr(self._record, verb) method(**checked_kwargs) def assert_called_once(self, verb, **kwargs): mock_call = getattr(self._record, verb.lower()) mock_call.assert_called_once_with(**kwargs) def assert_any_call(self, verb, **kwargs): mock_call = getattr(self._record, verb.lower()) mock_call.assert_any_call(**kwargs) def call_count(self, verb): mock_call = getattr(self._record, verb.lower()) return mock_call.call_count @property def recorded_calls(self): return self._record class MockHTTPProvider(nsx_cluster.NSXRequestsHTTPProvider): def __init__(self, session_response=None): super(NsxClientTestCase.MockHTTPProvider, self).__init__() if isinstance(session_response, list): self._session_responses = session_response elif session_response: self._session_responses = [session_response] else: self._session_responses = None def new_connection(self, cluster_api, provider): # wrapper the session so we can intercept and record calls session = super(NsxClientTestCase.MockHTTPProvider, self).new_connection(cluster_api, provider) mock_adapter = mock.Mock() session_send = session.send def _adapter_send(request, **kwargs): # record calls at the requests HTTP adapter level mock_response = mock.Mock() mock_response.history = None mock_response.headers = {'location': ''} # needed to bypass requests internal checks for mock mock_response.raw._original_response = {} # record the request for later verification cluster_api.record_call(request, **kwargs) return mock_response def _session_send(request, **kwargs): # calls at the Session level if self._session_responses: # pop first response current_response = self._session_responses[0] del self._session_responses[0] # consumer has setup a response for the session cluster_api.record_call(request, **kwargs) return (current_response() if hasattr(current_response, '__call__') else current_response) # bypass requests redirect handling for mock kwargs['allow_redirects'] = False # session send will end up calling adapter send return session_send(request, **kwargs) mock_adapter.send = _adapter_send session.send = _session_send def _mock_adapter(*args, **kwargs): # use our mock adapter rather than requests adapter return mock_adapter session.get_adapter = _mock_adapter return session def validate_connection(self, cluster_api, endpoint, conn): assert conn is not None def mock_nsx_clustered_api(self, session_response=None, **kwargs): orig_request = nsx_cluster.TimeoutSession.request def mocked_request(*args, **kwargs): if args[2].endswith('api/session/create'): response = models.Response() response.status_code = 200 response.headers = { 'Set-Cookie': 'JSESSIONID=%s;junk' % JSESSIONID} return response return orig_request(*args, **kwargs) with mock.patch.object(nsx_cluster.TimeoutSession, 'request', new=mocked_request): cluster = NsxClientTestCase.MockNSXClusteredAPI( session_response=session_response, **kwargs) return cluster @staticmethod def default_headers(): return {'Content-Type': 'application/json', 'Accept': 'application/json', 'Cookie': 'JSESSIONID=%s;' % JSESSIONID} def mocked_resource(self, resource_class, mock_validate=True, session_response=None): mocked = resource_class(nsx_client.NSX3Client( self.mock_nsx_clustered_api(session_response=session_response), nsx_api_managers=[NSX_MANAGER], max_attempts=NSX_MAX_ATTEMPTS), nsxlib_config=get_default_nsxlib_config(), nsxlib=self.nsxlib) if mock_validate: mock.patch.object(mocked.client, '_validate_result').start() return mocked def new_mocked_client(self, client_class, mock_validate=True, session_response=None, mock_cluster=None, **kwargs): client = client_class(mock_cluster or self.mock_nsx_clustered_api( session_response=session_response), **kwargs) if mock_validate: mock.patch.object(client, '_validate_result').start() new_client_for = client.new_client_for def _new_client_for(*args, **kwargs): sub_client = new_client_for(*args, **kwargs) if mock_validate: mock.patch.object(sub_client, '_validate_result').start() return sub_client client.new_client_for = _new_client_for return client def new_mocked_cluster(self, conf_managers, validate_conn_func, concurrent_connections=None): mock_provider = mock.Mock() mock_provider.default_scheme = 'https' mock_provider.validate_connection = validate_conn_func nsxlib_config = get_default_nsxlib_config() if concurrent_connections: nsxlib_config.concurrent_connections = concurrent_connections nsxlib_config.http_provider = mock_provider nsxlib_config.nsx_api_managers = conf_managers return nsx_cluster.NSXClusteredAPI(nsxlib_config) def assert_json_call(self, method, client, url, headers=None, timeout=(NSX_HTTP_TIMEOUT, NSX_HTTP_READ_TIMEOUT), data=None): cluster = client._conn if data: data = jsonutils.dumps(data, sort_keys=True) if not headers: headers = self.default_headers() cluster.assert_called_once( method, **{'url': url, 'verify': NSX_CERT, 'body': data, 'headers': headers, 'cert': None, 'timeout': timeout}) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_utils.py0000666000175100017510000003312713244535763024543 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils class TestNsxV3Utils(nsxlib_testcase.NsxClientTestCase): def test_build_v3_tags_payload(self): result = self.nsxlib.build_v3_tags_payload( {'id': 'fake_id', 'project_id': 'fake_proj_id'}, resource_type='os-net-id', project_name='fake_proj_name') expected = [{'scope': 'os-net-id', 'tag': 'fake_id'}, {'scope': 'os-project-id', 'tag': 'fake_proj_id'}, {'scope': 'os-project-name', 'tag': 'fake_proj_name'}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(expected, result) def test_build_v3_tags_payload_internal(self): result = self.nsxlib.build_v3_tags_payload( {'id': 'fake_id', 'project_id': 'fake_proj_id'}, resource_type='os-net-id', project_name=None) expected = [{'scope': 'os-net-id', 'tag': 'fake_id'}, {'scope': 'os-project-id', 'tag': 'fake_proj_id'}, {'scope': 'os-project-name', 'tag': nsxlib_testcase.PLUGIN_TAG}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(expected, result) def test_build_v3_tags_payload_invalid_length(self): self.assertRaises(exceptions.NsxLibInvalidInput, self.nsxlib.build_v3_tags_payload, {'id': 'fake_id', 'project_id': 'fake_proj_id'}, resource_type='os-longer-maldini-rocks-id', project_name='fake') def test_build_v3_api_version_tag(self): result = self.nsxlib.build_v3_api_version_tag() expected = [{'scope': nsxlib_testcase.PLUGIN_SCOPE, 'tag': nsxlib_testcase.PLUGIN_TAG}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(expected, result) def test_is_internal_resource(self): project_tag = self.nsxlib.build_v3_tags_payload( {'id': 'fake_id', 'project_id': 'fake_proj_id'}, resource_type='os-net-id', project_name=None) internal_tag = self.nsxlib.build_v3_api_version_tag() expect_false = self.nsxlib.is_internal_resource({'tags': project_tag}) self.assertFalse(expect_false) expect_true = self.nsxlib.is_internal_resource({'tags': internal_tag}) self.assertTrue(expect_true) def test_get_name_and_uuid(self): uuid = 'afc40f8a-4967-477e-a17a-9d560d1786c7' suffix = '_afc40...786c7' expected = 'maldini%s' % suffix short_name = utils.get_name_and_uuid('maldini', uuid) self.assertEqual(expected, short_name) name = 'X' * 255 expected = '%s%s' % ('X' * (80 - len(suffix)), suffix) short_name = utils.get_name_and_uuid(name, uuid) self.assertEqual(expected, short_name) def test_build_v3_tags_max_length_payload(self): result = self.nsxlib.build_v3_tags_payload( {'id': 'X' * 255, 'project_id': 'X' * 255}, resource_type='os-net-id', project_name='X' * 255) expected = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'X' * 40}, {'scope': 'os-project-name', 'tag': 'X' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(expected, result) def test_add_v3_tag(self): result = utils.add_v3_tag([], 'fake-scope', 'fake-tag') expected = [{'scope': 'fake-scope', 'tag': 'fake-tag'}] self.assertEqual(expected, result) def test_add_v3_tag_max_length_payload(self): result = utils.add_v3_tag([], 'fake-scope', 'X' * 255) expected = [{'scope': 'fake-scope', 'tag': 'X' * 40}] self.assertEqual(expected, result) def test_add_v3_tag_invalid_scope_length(self): self.assertRaises(exceptions.NsxLibInvalidInput, utils.add_v3_tag, [], 'fake-scope-name-is-far-too-long', 'fake-tag') def test_update_v3_tags_addition(self): tags = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] resources = [{'scope': 'os-instance-uuid', 'tag': 'A' * 40}] tags = utils.update_v3_tags(tags, resources) expected = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}, {'scope': 'os-instance-uuid', 'tag': 'A' * 40}] self.assertEqual(sorted(expected, key=lambda x: x.get('tag')), sorted(tags, key=lambda x: x.get('tag'))) def test_update_v3_tags_removal(self): tags = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] resources = [{'scope': 'os-net-id', 'tag': ''}] tags = utils.update_v3_tags(tags, resources) expected = [{'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(sorted(expected, key=lambda x: x.get('tag')), sorted(tags, key=lambda x: x.get('tag'))) def test_update_v3_tags_update(self): tags = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] resources = [{'scope': 'os-project-id', 'tag': 'A' * 40}] tags = utils.update_v3_tags(tags, resources) expected = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'A' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(sorted(expected, key=lambda x: x.get('tag')), sorted(tags, key=lambda x: x.get('tag'))) def test_update_v3_tags_repetitive_scopes(self): tags = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-security-group', 'tag': 'SG1'}, {'scope': 'os-security-group', 'tag': 'SG2'}] tags_update = [{'scope': 'os-security-group', 'tag': 'SG3'}, {'scope': 'os-security-group', 'tag': 'SG4'}] tags = utils.update_v3_tags(tags, tags_update) expected = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-security-group', 'tag': 'SG3'}, {'scope': 'os-security-group', 'tag': 'SG4'}] self.assertEqual(sorted(expected, key=lambda x: x.get('tag')), sorted(tags, key=lambda x: x.get('tag'))) def test_update_v3_tags_repetitive_scopes_remove(self): tags = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-security-group', 'tag': 'SG1'}, {'scope': 'os-security-group', 'tag': 'SG2'}] tags_update = [{'scope': 'os-security-group', 'tag': None}] tags = utils.update_v3_tags(tags, tags_update) expected = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}] self.assertEqual(sorted(expected, key=lambda x: x.get('tag')), sorted(tags, key=lambda x: x.get('tag'))) def test_build_extra_args_positive(self): extra_args = ['fall_count', 'interval', 'monitor_port', 'request_body', 'request_method', 'request_url', 'request_version', 'response_body', 'response_status', 'rise_count', 'timeout'] body = {'display_name': 'httpmonitor1', 'description': 'my http monitor'} expected = {'display_name': 'httpmonitor1', 'description': 'my http monitor', 'interval': 5, 'rise_count': 3, 'fall_count': 3} resp = utils.build_extra_args(body, extra_args, interval=5, rise_count=3, fall_count=3) self.assertEqual(resp, expected) def test_build_extra_args_negative(self): extra_args = ['cookie_domain', 'cookie_fallback', 'cookie_garble', 'cookie_mode', 'cookie_name', 'cookie_path', 'cookie_time'] body = {'display_name': 'persistenceprofile1', 'description': 'my persistence profile', 'resource_type': 'LoadBalancerCookiePersistenceProfile'} expected = {'display_name': 'persistenceprofile1', 'description': 'my persistence profile', 'resource_type': 'LoadBalancerCookiePersistenceProfile', 'cookie_mode': 'INSERT', 'cookie_name': 'ABC', 'cookie_fallback': True} resp = utils.build_extra_args(body, extra_args, cookie_mode='INSERT', cookie_name='ABC', cookie_fallback=True, bogus='bogus') self.assertEqual(resp, expected) def test_retry(self): max_retries = 5 total_count = {'val': 0} @utils.retry_upon_exception(exceptions.NsxLibInvalidInput, max_attempts=max_retries) def func_to_fail(x): total_count['val'] = total_count['val'] + 1 raise exceptions.NsxLibInvalidInput(error_message='foo') self.assertRaises(exceptions.NsxLibInvalidInput, func_to_fail, 99) self.assertEqual(max_retries, total_count['val']) def test_retry_random(self): max_retries = 5 total_count = {'val': 0} @utils.retry_random_upon_exception(exceptions.NsxLibInvalidInput, max_attempts=max_retries) def func_to_fail(x): total_count['val'] = total_count['val'] + 1 raise exceptions.NsxLibInvalidInput(error_message='foo') self.assertRaises(exceptions.NsxLibInvalidInput, func_to_fail, 99) self.assertEqual(max_retries, total_count['val']) @mock.patch.object(utils, '_update_max_tags') @mock.patch.object(utils, '_update_tag_length') @mock.patch.object(utils, '_update_resource_length') def test_update_limits(self, _update_resource_length, _update_tag_length, _update_max_tags): limits = utils.TagLimits(1, 2, 3) utils.update_tag_limits(limits) _update_resource_length.assert_called_with(1) _update_tag_length.assert_called_with(2) _update_max_tags.assert_called_with(3) class NsxFeaturesTestCase(nsxlib_testcase.NsxLibTestCase): def test_v2_features(self, current_version='2.0.0'): self.nsxlib.nsx_version = current_version self.assertTrue(self.nsxlib.feature_supported( nsx_constants.FEATURE_ROUTER_FIREWALL)) self.assertTrue(self.nsxlib.feature_supported( nsx_constants.FEATURE_EXCLUDE_PORT_BY_TAG)) def test_v2_features_plus(self): self.test_v2_features(current_version='2.0.1') def test_v2_features_minus(self): self.nsxlib.nsx_version = '1.9.9' self.assertFalse(self.nsxlib.feature_supported( nsx_constants.FEATURE_ROUTER_FIREWALL)) self.assertFalse(self.nsxlib.feature_supported( nsx_constants.FEATURE_EXCLUDE_PORT_BY_TAG)) self.assertTrue(self.nsxlib.feature_supported( nsx_constants.FEATURE_MAC_LEARNING)) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/__init__.py0000666000175100017510000000000013244535763024063 0ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_policy_api.py0000666000175100017510000003403413244535763025531 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import client from vmware_nsxlib.v3 import policy_constants from vmware_nsxlib.v3 import policy_defs as policy BASE_POLICY_URI = "https://1.2.3.4/policy/api/v1/" class TestPolicyApi(nsxlib_testcase.NsxClientTestCase): def setUp(self): self.client = self.new_mocked_client(client.NSX3Client, url_prefix='policy/api/v1/') self.policy_api = policy.NsxPolicyApi(self.client) super(TestPolicyApi, self).setUp() def assert_json_call(self, method, client, url, data=None): url = BASE_POLICY_URI + url return super(TestPolicyApi, self).assert_json_call( method, client, url, data=data) class TestPolicyDomain(TestPolicyApi): def test_create(self): domain_def = policy.DomainDef( 'archaea', 'prokaryotic cells', 'typically characterized by membrane lipids') self.policy_api.create_or_update(domain_def) self.assert_json_call('POST', self.client, 'infra/domains/archaea', data=domain_def.get_obj_dict()) def test_delete(self): domain_def = policy.DomainDef('bacteria') self.policy_api.delete(domain_def) self.assert_json_call('DELETE', self.client, 'infra/domains/bacteria') def test_get(self): domain_def = policy.DomainDef('eukarya') self.policy_api.get(domain_def) self.assert_json_call('GET', self.client, 'infra/domains/eukarya') def test_list(self): domain_def = policy.DomainDef() self.policy_api.list(domain_def) self.assert_json_call('GET', self.client, 'infra/domains') class TestPolicyGroup(TestPolicyApi): def test_create(self): group_def = policy.GroupDef( 'eukarya', 'cats', 'felis catus') self.policy_api.create_or_update(group_def) self.assert_json_call('POST', self.client, 'infra/domains/eukarya/groups/cats', data=group_def.get_obj_dict()) def test_create_with_domain(self): domain_def = policy.DomainDef('eukarya', 'eukarya', 'dude with cell membranes') group_def = policy.GroupDef('eukarya', 'cats', 'Ailuropoda melanoleuca') self.policy_api.create_with_parent(domain_def, group_def) data = domain_def.get_obj_dict() data['groups'] = [group_def.get_obj_dict()] self.assert_json_call('POST', self.client, 'infra/domains/eukarya', data=data) def test_create_with_single_tag(self): domain_def = policy.DomainDef('eukarya') group_def = policy.GroupDef('eukarya', 'dogs', conditions=policy.Condition('spaniel')) self.policy_api.create_with_parent(domain_def, group_def) data = domain_def.get_obj_dict() data['groups'] = [group_def.get_obj_dict()] # validate body structure and defaults expected_condition = {'value': 'spaniel', 'operator': 'EQUALS', 'member_type': 'LogicalPort', 'resource_type': 'Condition', 'key': 'Tag'} expected_group = {'id': 'dogs', 'display_name': None, 'description': None, 'expression': [expected_condition]} expected_data = {'id': 'eukarya', 'display_name': None, 'description': None, 'groups': [expected_group]} self.assert_json_call('POST', self.client, 'infra/domains/eukarya', data=expected_data) def test_create_with_multi_tag(self): domain_def = policy.DomainDef('eukarya') pines = policy.Condition( 'pine', operator=policy_constants.CONDITION_OP_CONTAINS) maples = policy.Condition( 'maple', operator=policy_constants.CONDITION_OP_STARTS_WITH) group_def = policy.GroupDef('eukarya', 'trees', conditions=[pines, maples]) self.policy_api.create_with_parent(domain_def, group_def) data = domain_def.get_obj_dict() data['groups'] = [group_def.get_obj_dict()] self.assert_json_call('POST', self.client, 'infra/domains/eukarya', data=data) def test_delete(self): group_def = policy.GroupDef(domain_id='eukarya', group_id='giraffe') self.policy_api.delete(group_def) self.assert_json_call('DELETE', self.client, 'infra/domains/eukarya/groups/giraffe') class TestPolicyService(TestPolicyApi): def test_create(self): service_def = policy.ServiceDef('roomservice') self.policy_api.create_or_update(service_def) self.assert_json_call('POST', self.client, 'infra/services/roomservice', data=service_def.get_obj_dict()) def test_create_l4_with_parent(self): service_def = policy.ServiceDef('roomservice') entry_def = policy.L4ServiceEntryDef('roomservice', 'http', name='room http', dest_ports=[80, 8080]) self.policy_api.create_with_parent(service_def, entry_def) expected_entry = {'id': 'http', 'resource_type': 'L4PortSetServiceEntry', 'display_name': 'room http', 'description': None, 'l4_protocol': 'TCP', 'destination_ports': [80, 8080]} expected_data = {'id': 'roomservice', 'display_name': None, 'description': None, 'service_entries': [expected_entry]} self.assert_json_call('POST', self.client, 'infra/services/roomservice', data=expected_data) def test_create_icmp_with_parent(self): service_def = policy.ServiceDef('icmpservice') entry_def = policy.IcmpServiceEntryDef('icmpservice', 'icmp', name='icmpv4') self.policy_api.create_with_parent(service_def, entry_def) expected_entry = {'id': 'icmp', 'resource_type': 'ICMPTypeServiceEntry', 'display_name': 'icmpv4', 'description': None, 'protocol': 'ICMPv4'} expected_data = {'id': 'icmpservice', 'display_name': None, 'description': None, 'service_entries': [expected_entry]} self.assert_json_call('POST', self.client, 'infra/services/icmpservice', data=expected_data) class TestPolicyCommunicationProfile(TestPolicyApi): def test_create(self): profile_def = policy.CommunicationProfileDef('rental') self.policy_api.create_or_update(profile_def) self.assert_json_call('POST', self.client, 'infra/communication-profiles/rental', data=profile_def.get_obj_dict()) def test_create_with_parent(self): profile_def = policy.CommunicationProfileDef('rental') entry_def = policy.CommunicationProfileEntryDef( 'rental', 'room1', description='includes roomservice', services=["roomservice"]) self.policy_api.create_with_parent(profile_def, entry_def) expected_entry = {'id': 'room1', 'display_name': None, 'description': 'includes roomservice', 'services': ["roomservice"], 'action': 'ALLOW'} expected_data = {'id': 'rental', 'display_name': None, 'description': None, 'communication_profile_entries': [expected_entry]} self.assert_json_call('POST', self.client, 'infra/communication-profiles/rental', data=expected_data) class TestPolicyCommunicationMap(TestPolicyApi): def setUp(self): super(TestPolicyCommunicationMap, self).setUp() self.entry1 = policy.CommunicationMapEntryDef( 'd1', 'cm1', sequence_number=12, source_groups=["group1", "group2"], dest_groups=["group1"], profile_id="profile1") self.entry2 = policy.CommunicationMapEntryDef( 'd1', 'cm2', sequence_number=13, source_groups=["group1", "group2"], dest_groups=["group3"], profile_id="profile2") self.expected_data1 = {'id': 'cm1', 'display_name': None, 'description': None, 'sequence_number': 12, 'source_groups': ['/infra/domains/d1/groups/group1', '/infra/domains/d1/groups/group2'], 'destination_groups': ['/infra/domains/d1/groups/group1'], 'communication_profile_path': '/infra/communication-profiles/profile1'} self.expected_data2 = {'id': 'cm2', 'display_name': None, 'description': None, 'sequence_number': 13, 'source_groups': ['/infra/domains/d1/groups/group1', '/infra/domains/d1/groups/group2'], 'destination_groups': ['/infra/domains/d1/groups/group3'], 'communication_profile_path': '/infra/communication-profiles/profile2'} def test_create_with_one_entry(self): map_def = policy.CommunicationMapDef('d1') self.policy_api.create_with_parent(map_def, self.entry1) expected_data = map_def.get_obj_dict() expected_data['communication_entries'] = [self.expected_data1] self.assert_json_call('POST', self.client, 'infra/domains/d1/communication-map', data=expected_data) def test_create_with_two_entries(self): map_def = policy.CommunicationMapDef('d1') self.policy_api.create_with_parent(map_def, [self.entry1, self.entry2]) expected_data = map_def.get_obj_dict() expected_data['communication_entries'] = [self.expected_data1, self.expected_data2] self.assert_json_call('POST', self.client, 'infra/domains/d1/communication-map', data=expected_data) def test_update_entry(self): self.policy_api.create_or_update(self.entry1) self.assert_json_call('POST', self.client, 'infra/domains/d1/communication-map/' 'communication-entries/cm1', data=self.expected_data1) def test_delete_entry(self): self.policy_api.delete(self.entry2) self.assert_json_call('DELETE', self.client, 'infra/domains/d1/communication-map/' 'communication-entries/cm2') class TestPolicyEnforcementPoint(TestPolicyApi): def test_create(self): ep_def = policy.EnforcementPointDef('ep1', name='The Point', ip_address='1.1.1.1', username='admin', password='a') self.policy_api.create_or_update(ep_def) ep_path = policy.EnforcementPointDef('ep1').get_resource_path() self.assert_json_call('POST', self.client, ep_path, data=ep_def.get_obj_dict()) class TestPolicyDeploymentMap(TestPolicyApi): def test_create(self): map_def = policy.DeploymentMapDef('dm1', domain_id='d1', ep_id='ep1') self.policy_api.create_or_update(map_def) ep_path = policy.EnforcementPointDef('ep1').get_resource_full_path() expected_data = {'id': 'dm1', 'display_name': None, 'description': None, 'enforcement_point_path': ep_path} self.assert_json_call('POST', self.client, 'infra/domains/d1/domain-deployment-maps/dm1', data=expected_data) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_ns_group_manager.py0000666000175100017510000001774313244535763026737 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import ns_group_manager from vmware_nsxlib.v3 import nsx_constants as consts # Pool of fake ns-groups uuids NSG_IDS = ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555'] def _mock_create_and_list_nsgroups(test_method): nsgroups = [] def _create_nsgroup_mock(name, desc, tags, membership_criteria=None): nsgroup = {'id': NSG_IDS[len(nsgroups)], 'display_name': name, 'description': desc, 'tags': tags} nsgroups.append(nsgroup) return nsgroup def wrap(*args, **kwargs): with mock.patch( 'vmware_nsxlib.v3.security.NsxLibNsGroup.create' ) as create_nsgroup_mock: create_nsgroup_mock.side_effect = _create_nsgroup_mock with mock.patch( "vmware_nsxlib.v3.security.NsxLibNsGroup.list" ) as list_nsgroups_mock: list_nsgroups_mock.side_effect = lambda: nsgroups test_method(*args, **kwargs) return wrap class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase): """Tests for vmware_nsxlib.v3.ns_group_manager.NSGroupManager.""" @_mock_create_and_list_nsgroups def test_first_initialization(self): size = 5 cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) nested_groups = cont_manager.nested_groups self.assertEqual({i: NSG_IDS[i] for i in range(size)}, nested_groups) @_mock_create_and_list_nsgroups def test_reconfigure_number_of_nested_groups(self): # We need to test that when changing the number of nested groups then # the NSGroupManager picks the ones which were previously created # and create the ones which are missing, which also verifies that it # also recognizes existing nested groups. size = 2 # Creates 2 nested groups. ns_group_manager.NSGroupManager(self.nsxlib, size) size = 5 # Creates another 3 nested groups. nested_groups = ns_group_manager.NSGroupManager( self.nsxlib, size).nested_groups self.assertEqual({i: NSG_IDS[i] for i in range(size)}, nested_groups) @_mock_create_and_list_nsgroups @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.remove_member') @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.add_members') def test_add_and_remove_nsgroups(self, add_member_mock, remove_member_mock): # We verify that when adding a new nsgroup the properly placed # according to its id and the number of nested groups. size = 5 cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) nsgroup_id = 'nsgroup_id' with mock.patch.object(cont_manager, '_hash_uuid', return_value=7): cont_manager.add_nsgroup(nsgroup_id) cont_manager.remove_nsgroup(nsgroup_id) # There are 5 nested groups, the hash function will return 7, therefore # we expect that the nsgroup will be placed in the 3rd group. add_member_mock.assert_called_once_with( NSG_IDS[2], consts.NSGROUP, [nsgroup_id]) remove_member_mock.assert_called_once_with( NSG_IDS[2], consts.NSGROUP, nsgroup_id, verify=True) @_mock_create_and_list_nsgroups @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.remove_member') @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.add_members') def test_when_nested_group_is_full(self, add_member_mock, remove_member_mock): def _add_member_mock(nsgroup, target_type, target_id): if nsgroup == NSG_IDS[2]: raise nsxlib_exc.NSGroupIsFull(nsgroup_id=nsgroup) def _remove_member_mock(nsgroup, target_type, target_id, verify=False): if nsgroup == NSG_IDS[2]: raise nsxlib_exc.NSGroupMemberNotFound(nsgroup_id=nsgroup, member_id=target_id) add_member_mock.side_effect = _add_member_mock remove_member_mock.side_effect = _remove_member_mock size = 5 cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) nsgroup_id = 'nsgroup_id' with mock.patch.object(cont_manager, '_hash_uuid', return_value=7): cont_manager.add_nsgroup(nsgroup_id) cont_manager.remove_nsgroup(nsgroup_id) # Trying to add nsgroup to the nested group at index 2 will raise # NSGroupIsFull exception, we expect that the nsgroup will be added to # the nested group at index 3. calls = [mock.call(NSG_IDS[2], consts.NSGROUP, [nsgroup_id]), mock.call(NSG_IDS[3], consts.NSGROUP, [nsgroup_id])] add_member_mock.assert_has_calls(calls) # Since the nsgroup was added to the nested group at index 3, it will # fail to remove it from the group at index 2, and then will try to # remove it from the group at index 3. calls = [ mock.call( NSG_IDS[2], consts.NSGROUP, nsgroup_id, verify=True), mock.call( NSG_IDS[3], consts.NSGROUP, nsgroup_id, verify=True)] remove_member_mock.assert_has_calls(calls) @_mock_create_and_list_nsgroups @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.remove_member') @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.add_members') def test_initialize_with_absent_nested_groups(self, add_member_mock, remove_member_mock): size = 3 cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) # list_nsgroups will return nested group 1 and 3, but not group 2. nsgroups = cont_manager.nsxlib_nsgroup.list() with mock.patch("vmware_nsxlib.v3.security.NsxLibNsGroup.list", side_effect=lambda: nsgroups[::2]): # invoking the initialization process again, it should process # groups 1 and 3 and create group 2. cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) self.assertEqual({0: NSG_IDS[0], 1: NSG_IDS[3], 2: NSG_IDS[2]}, cont_manager.nested_groups) @_mock_create_and_list_nsgroups def test_suggest_nested_group(self): size = 5 cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) # We expect that the first suggested index is 2 expected_suggested_groups = NSG_IDS[2:5] + NSG_IDS[:2] suggest_group = lambda: cont_manager._suggest_nested_group('fake-id') with mock.patch.object(cont_manager, '_hash_uuid', return_value=7): for i, suggested in enumerate(suggest_group()): self.assertEqual(expected_suggested_groups[i], suggested) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_client.py0000666000175100017510000003102613244535763024655 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from oslo_log import log from oslo_serialization import jsonutils import requests from vmware_nsxlib.tests.unit.v3 import mocks from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import client from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) DFT_ACCEPT_HEADERS = { 'Accept': '*/*', 'Cookie': 'JSESSIONID=%s;' % nsxlib_testcase.JSESSIONID } JSON_DFT_ACCEPT_HEADERS = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'JSESSIONID=%s;' % nsxlib_testcase.JSESSIONID } def _headers(**kwargs): headers = copy.copy(DFT_ACCEPT_HEADERS) headers.update(kwargs) return headers def assert_call(verb, client_or_resource, url, verify=nsxlib_testcase.NSX_CERT, data=None, headers=DFT_ACCEPT_HEADERS, timeout=(nsxlib_testcase.NSX_HTTP_TIMEOUT, nsxlib_testcase.NSX_HTTP_READ_TIMEOUT), single_call=True): nsx_client = client_or_resource if getattr(nsx_client, 'client', None) is not None: nsx_client = nsx_client.client cluster = nsx_client._conn if single_call: cluster.assert_called_once( verb, **{'url': url, 'verify': verify, 'body': data, 'headers': headers, 'cert': None, 'timeout': timeout}) else: cluster.assert_any_call( verb, **{'url': url, 'verify': verify, 'body': data, 'headers': headers, 'cert': None, 'timeout': timeout}) def mock_calls_count(verb, client_or_resource): nsx_client = client_or_resource if getattr(nsx_client, 'client', None) is not None: nsx_client = nsx_client.client cluster = nsx_client._conn return cluster.call_count(verb) def assert_json_call(verb, client_or_resource, url, verify=nsxlib_testcase.NSX_CERT, data=None, headers=JSON_DFT_ACCEPT_HEADERS, single_call=True): return assert_call(verb, client_or_resource, url, verify=verify, data=data, headers=headers, single_call=single_call) class NsxV3RESTClientTestCase(nsxlib_testcase.NsxClientTestCase): def test_client_url_prefix(self): api = self.new_mocked_client(client.RESTClient, url_prefix='/cloud/api') api.list() assert_call( 'get', api, 'https://1.2.3.4/cloud/api') api = self.new_mocked_client(client.RESTClient, url_prefix='/cloud/api') api.url_list('v1/ports') assert_call( 'get', api, 'https://1.2.3.4/cloud/api/v1/ports') def test_client_headers(self): default_headers = {'Content-Type': 'application/golang'} api = self.new_mocked_client( client.RESTClient, default_headers=default_headers, url_prefix='/v1/api') api.list() assert_call( 'get', api, 'https://1.2.3.4/v1/api', headers=_headers(**default_headers)) api = self.new_mocked_client( client.RESTClient, default_headers=default_headers, url_prefix='/v1/api') method_headers = {'X-API-Key': 'strong-crypt'} api.url_list('ports/33', headers=method_headers) method_headers.update(default_headers) assert_call( 'get', api, 'https://1.2.3.4/v1/api/ports/33', headers=_headers(**method_headers)) def test_client_for(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/') sub_api = api.new_client_for('switch/ports') sub_api.get('11a2b') assert_call( 'get', sub_api, 'https://1.2.3.4/api/v1/switch/ports/11a2b') def test_client_list(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.list() assert_call( 'get', api, 'https://1.2.3.4/api/v1/ports') def test_client_get(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.get('unique-id') assert_call( 'get', api, 'https://1.2.3.4/api/v1/ports/unique-id') def test_client_delete(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.delete('unique-id') assert_call( 'delete', api, 'https://1.2.3.4/api/v1/ports/unique-id') def test_client_update(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.update('unique-id', jsonutils.dumps({'name': 'a-new-name'})) assert_call( 'put', api, 'https://1.2.3.4/api/v1/ports/unique-id', data=jsonutils.dumps({'name': 'a-new-name'})) def test_client_create(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.create(body=jsonutils.dumps({'resource-name': 'port1'})) assert_call( 'post', api, 'https://1.2.3.4/api/v1/ports', data=jsonutils.dumps({'resource-name': 'port1'})) def test_client_url_list(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') json_headers = {'Content-Type': 'application/json'} api.url_list('/connections', json_headers) assert_call( 'get', api, 'https://1.2.3.4/api/v1/ports/connections', headers=_headers(**json_headers)) def test_client_url_get(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.url_get('connections/1') assert_call( 'get', api, 'https://1.2.3.4/api/v1/ports/connections/1') def test_client_url_delete(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.url_delete('1') assert_call( 'delete', api, 'https://1.2.3.4/api/v1/ports/1') def test_client_url_put(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.url_put('connections/1', jsonutils.dumps({'name': 'conn1'})) assert_call( 'put', api, 'https://1.2.3.4/api/v1/ports/connections/1', data=jsonutils.dumps({'name': 'conn1'})) def test_client_url_post(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.url_post('1/connections', jsonutils.dumps({'name': 'conn1'})) assert_call( 'post', api, 'https://1.2.3.4/api/v1/ports/1/connections', data=jsonutils.dumps({'name': 'conn1'})) def test_client_validate_result(self): def _verb_response_code(http_verb, status_code, error_code=None): content = None if error_code: content = jsonutils.dumps({'httpStatus': 'dummy', 'error_code': error_code, 'module_name': 'dummy', 'error_message': 'bad'}) response = mocks.MockRequestsResponse( status_code, content) client_api = self.new_mocked_client( client.RESTClient, mock_validate=False, session_response=response) client_call = getattr(client_api, "url_%s" % http_verb) client_call('', None) for verb in ['get', 'post', 'put', 'delete']: for code in client.RESTClient._VERB_RESP_CODES.get(verb): _verb_response_code(verb, code) self.assertRaises( nsxlib_exc.ManagerError, _verb_response_code, verb, requests.codes.INTERNAL_SERVER_ERROR) self.assertRaises( nsxlib_exc.ResourceNotFound, _verb_response_code, verb, requests.codes.NOT_FOUND) self.assertRaises( nsxlib_exc.BackendResourceNotFound, _verb_response_code, verb, requests.codes.NOT_FOUND, 202) def test_inject_headers_callback(self): self.injected = None def inject_header(): self.injected = True return {} utils.set_inject_headers_callback(inject_header) api = self.new_mocked_client( client.RESTClient, url_prefix='/v1/api') api.list() injected_headers = {} assert_call( 'get', api, 'https://1.2.3.4/v1/api', headers=_headers(**injected_headers)) api = self.new_mocked_client( client.RESTClient, url_prefix='/v1/api') utils.set_inject_headers_callback(None) self.assertIsNotNone(self.injected) class NsxV3JSONClientTestCase(nsxlib_testcase.NsxClientTestCase): def test_json_request(self): resp = mocks.MockRequestsResponse( 200, jsonutils.dumps({'result': {'ok': 200}})) api = self.new_mocked_client(client.JSONRESTClient, session_response=resp, url_prefix='api/v2/nat') resp = api.create(body={'name': 'mgmt-egress'}) assert_json_call( 'post', api, 'https://1.2.3.4/api/v2/nat', data=jsonutils.dumps({'name': 'mgmt-egress'})) self.assertEqual(resp, {'result': {'ok': 200}}) def test_mask_password(self): pwds = ('my!pwd0#', 'some0therlong$pwd', 'pwd') body = {'name_pwd': 'name1', 'password': pwds[0], 'some_list': {'name_password': 'name2', 'password': pwds[1]}, 'password': pwds[2]} cl = client.RESTClient(None) json_body = jsonutils.dumps(body) masked_body = cl._mask_password(json_body) for pwd in pwds: json_body = json_body.replace('"' + pwd + '"', '"********"') self.assertEqual(json_body, masked_body) class NsxV3APIClientTestCase(nsxlib_testcase.NsxClientTestCase): def test_api_call(self): api = self.new_mocked_client(client.NSX3Client) api.get('ports') assert_json_call( 'get', api, 'https://1.2.3.4/api/v1/ports') # NOTE(boden): remove this when tmp brigding removed class NsxV3APIClientBridgeTestCase(nsxlib_testcase.NsxClientTestCase): def test_get_resource(self): api = self.new_mocked_client(client.NSX3Client) api.get('ports') assert_json_call( 'get', api, 'https://1.2.3.4/api/v1/ports') def test_create_resource(self): api = self.new_mocked_client(client.NSX3Client) api.create('ports', {'resource-name': 'port1'}) assert_json_call( 'post', api, 'https://1.2.3.4/api/v1/ports', data=jsonutils.dumps({'resource-name': 'port1'})) def test_update_resource(self): api = self.new_mocked_client(client.NSX3Client) api.update('ports/1', {'name': 'a-new-name'}) assert_json_call( 'put', api, 'https://1.2.3.4/api/v1/ports/1', data=jsonutils.dumps({'name': 'a-new-name'})) def test_delete_resource(self): api = self.new_mocked_client(client.NSX3Client) api.delete('ports/11') assert_json_call( 'delete', api, 'https://1.2.3.4/api/v1/ports/11') vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_security.py0000666000175100017510000002013013244535763025240 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import six from oslo_utils import uuidutils from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_constants from vmware_nsxlib.v3 import nsx_constants as const class TestNsxLibFirewallSection(nsxlib_testcase.NsxLibTestCase): """Tests for vmware_nsxlib.v3.security.NsxLibFirewallSection""" def test_get_logicalport_reference(self): mock_port = '3ed55c9f-f879-4048-bdd3-eded92465252' result = self.nsxlib.firewall_section.get_logicalport_reference( mock_port) expected = { 'target_id': '3ed55c9f-f879-4048-bdd3-eded92465252', 'target_type': 'LogicalPort' } self.assertEqual(expected, result) def test_get_rule_address(self): result = self.nsxlib.firewall_section.get_rule_address( 'target-id', 'display-name') expected = { 'target_display_name': 'display-name', 'target_id': 'target-id', 'is_valid': True, 'target_type': 'IPv4Address' } self.assertEqual(expected, result) def test_get_l4portset_nsservice(self): result = self.nsxlib.firewall_section.get_l4portset_nsservice() expected = { 'service': { 'resource_type': 'L4PortSetNSService', 'source_ports': [], 'destination_ports': [], 'l4_protocol': 'TCP' } } self.assertEqual(expected, result) def test_create_with_rules(self): expected_body = { 'display_name': 'display-name', 'description': 'section-description', 'stateful': True, 'section_type': "LAYER3", 'applied_tos': [], 'rules': [{ 'display_name': 'rule-name', 'direction': 'IN_OUT', 'ip_protocol': "IPV4_IPV6", 'action': "ALLOW", 'logged': False, 'disabled': False, 'sources': [], 'destinations': [], 'services': [] }], 'tags': [] } with mock.patch.object(self.nsxlib.client, 'create') as create: rule = self.nsxlib.firewall_section.get_rule_dict('rule-name') self.nsxlib.firewall_section.create_with_rules( 'display-name', 'section-description', rules=[rule]) resource = 'firewall/sections?operation=insert_bottom' \ '&action=create_with_rules' create.assert_called_with(resource, expected_body, headers=None) def test_get_excludelist(self): with mock.patch.object(self.nsxlib.client, 'list') as clist: self.nsxlib.firewall_section.get_excludelist() clist.assert_called_with('firewall/excludelist') def test_update(self): fws_tags = [{"scope": "name", "tag": "new_name"}] with mock.patch.object(self.nsxlib.client, 'update') as update: with mock.patch.object(self.nsxlib.client, 'get') as get: get.return_value = {} self.nsxlib.firewall_section.update('fw_section_id', tags_update=fws_tags) resource = 'firewall/sections/%s' % 'fw_section_id' data = {'tags': fws_tags} update.assert_called_with(resource, data, headers=None) class TestNsxLibIPSet(nsxlib_testcase.NsxClientTestCase): """Tests for vmware_nsxlib.v3.security.NsxLibIPSet""" def test_get_ipset_reference(self): mock_ip_set = uuidutils.generate_uuid() result = self.nsxlib.ip_set.get_ipset_reference( mock_ip_set) expected = { 'target_id': mock_ip_set, 'target_type': const.IP_SET } self.assertEqual(expected, result) def test_create_ip_set(self): fake_ip_set = test_constants.FAKE_IP_SET.copy() data = { 'display_name': fake_ip_set['display_name'], 'ip_addresses': fake_ip_set['ip_addresses'], 'description': 'ipset-desc', 'tags': [] } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.ip_set.create( fake_ip_set['display_name'], 'ipset-desc', ip_addresses=fake_ip_set['ip_addresses']) resource = 'ip-sets' create.assert_called_with(resource, data) def test_delete_ip_set(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_ip_set = test_constants.FAKE_IP_SET.copy() self.nsxlib.ip_set.delete(fake_ip_set['id']) delete.assert_called_with('ip-sets/%s' % fake_ip_set['id']) def test_update_ip_set(self): fake_ip_set = test_constants.FAKE_IP_SET.copy() new_ip_addresses = ['10.0.0.0'] data = { 'id': fake_ip_set['id'], 'display_name': fake_ip_set['display_name'], 'ip_addresses': new_ip_addresses, 'resource_type': 'IPSet' } with mock.patch.object(self.nsxlib.client, 'get', return_value=fake_ip_set): with mock.patch.object(self.nsxlib.client, 'update') as update: self.nsxlib.ip_set.update( fake_ip_set['id'], ip_addresses=new_ip_addresses) resource = 'ip-sets/%s' % fake_ip_set['id'] update.assert_called_with(resource, data, headers=None) def test_update_ip_set_empty_ip_addresses(self): fake_ip_set = test_constants.FAKE_IP_SET.copy() new_ip_addresses = [] data = { 'id': fake_ip_set['id'], 'display_name': fake_ip_set['display_name'], 'ip_addresses': new_ip_addresses, 'resource_type': 'IPSet' } with mock.patch.object(self.nsxlib.client, 'get', return_value=fake_ip_set): with mock.patch.object(self.nsxlib.client, 'update') as update: self.nsxlib.ip_set.update( fake_ip_set['id'], ip_addresses=new_ip_addresses) resource = 'ip-sets/%s' % fake_ip_set['id'] update.assert_called_with(resource, data, headers=None) class TestNsxLibNSGroup(nsxlib_testcase.NsxClientTestCase): """Tests for vmware_nsxlib.v3.security.NsxLibNSGroup""" def test_get_nsgroup_complex_expression(self): port_tags = {'app': 'foo', 'project': 'myproject'} port_exp = [self.nsxlib.ns_group.get_port_tag_expression(k, v) for k, v in six.iteritems(port_tags)] complex_exp = self.nsxlib.ns_group.get_nsgroup_complex_expression( expressions=port_exp) expected_exp = {'resource_type': const.NSGROUP_COMPLEX_EXP, 'expressions': port_exp} self.assertEqual(expected_exp, complex_exp) def test_update(self): nsg_tags = [{"scope": "name", "tag": "new_name"}] membership_criteria = [] with mock.patch.object(self.nsxlib.client, 'update') as update: with mock.patch.object(self.nsxlib.client, 'get') as get: get.return_value = {} self.nsxlib.ns_group.update( 'nsgroupid', tags_update=nsg_tags, membership_criteria=membership_criteria) resource = 'ns-groups/nsgroupid' data = {'tags': nsg_tags, 'membership_criteria': membership_criteria} update.assert_called_with(resource, data, headers=None) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_vpn_ipsec.py0000666000175100017510000002747713244535763025404 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_serialization import jsonutils from vmware_nsxlib.tests.unit.v3 import test_client from vmware_nsxlib.tests.unit.v3 import test_constants from vmware_nsxlib.tests.unit.v3 import test_resources from vmware_nsxlib.v3 import vpn_ipsec class TestIkeProfile(test_resources.BaseTestResource): def setUp(self): super(TestIkeProfile, self).setUp( vpn_ipsec.IkeProfile) def test_ike_profile_create(self): mocked_resource = self.get_mocked_resource() name = 'ike_profile' description = 'desc' enc_alg = vpn_ipsec.EncryptionAlgorithmTypes.ENCRYPTION_ALGORITHM_128 dig_alg = vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA1 ike_ver = vpn_ipsec.IkeVersionTypes.IKE_VERSION_V1 dh_group = vpn_ipsec.DHGroupTypes.DH_GROUP_14 lifetime = 100 mocked_resource.create(name, description=description, encryption_algorithm=enc_alg, digest_algorithm=dig_alg, ike_version=ike_ver, dh_group=dh_group, sa_life_time=lifetime) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'description': description, 'encryption_algorithms': [enc_alg], 'digest_algorithms': [dig_alg], 'ike_version': ike_ver, 'dh_groups': [dh_group], 'sa_life_time': lifetime }, sort_keys=True), headers=self.default_headers()) class TestIPSecTunnelProfile(test_resources.BaseTestResource): def setUp(self): super(TestIPSecTunnelProfile, self).setUp( vpn_ipsec.IPSecTunnelProfile) def test_ipsec_profile_create(self): mocked_resource = self.get_mocked_resource() name = 'ipsec_profile' description = 'desc' enc_alg = vpn_ipsec.EncryptionAlgorithmTypes.ENCRYPTION_ALGORITHM_128 dig_alg = vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA1 dh_group = vpn_ipsec.DHGroupTypes.DH_GROUP_14 lifetime = 100 mocked_resource.create(name, description=description, encryption_algorithm=enc_alg, digest_algorithm=dig_alg, pfs=True, dh_group=dh_group, sa_life_time=lifetime) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'description': description, 'encryption_algorithms': [enc_alg], 'digest_algorithms': [dig_alg], 'enable_perfect_forward_secrecy': True, 'dh_groups': [dh_group], 'sa_life_time': lifetime }, sort_keys=True), headers=self.default_headers()) class TestIPSecDpdProfile(test_resources.BaseTestResource): def setUp(self): super(TestIPSecDpdProfile, self).setUp( vpn_ipsec.IPSecDpdProfile) def test_dpd_profile_create(self): mocked_resource = self.get_mocked_resource() name = 'dpd_profile' description = 'desc' timeout = 100 enabled = True mocked_resource.create(name, description=description, timeout=timeout, enabled=enabled) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'description': description, 'dpd_probe_interval': timeout, 'enabled': enabled }, sort_keys=True), headers=self.default_headers()) def test_dpd_profile_update(self): fake_dpd = test_constants.FAKE_DPD.copy() new_timeout = 1000 uuid = test_constants.FAKE_DPD_ID mocked_resource = self.get_mocked_resource(response=fake_dpd) mocked_resource.update(uuid, timeout=new_timeout) fake_dpd['dpd_probe_interval'] = new_timeout test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, uuid), data=jsonutils.dumps(fake_dpd, sort_keys=True), headers=self.default_headers()) class TestIPSecPeerEndpoint(test_resources.BaseTestResource): def setUp(self): super(TestIPSecPeerEndpoint, self).setUp( vpn_ipsec.IPSecPeerEndpoint) def test_peer_endpoint_create(self): mocked_resource = self.get_mocked_resource() name = 'peerep' description = 'desc' peer_address = peer_id = '1.1.1.1' authentication_mode = 'PSK' dpd_profile_id = 'uuid1' ike_profile_id = 'uuid2' ipsec_profile_id = 'uuid3' initiation_mode = 'INITIATOR' psk = 'secret' mocked_resource.create(name, peer_address, peer_id, description=description, authentication_mode=authentication_mode, dpd_profile_id=dpd_profile_id, ike_profile_id=ike_profile_id, ipsec_tunnel_profile_id=ipsec_profile_id, connection_initiation_mode=initiation_mode, psk=psk) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'peer_address': peer_address, 'peer_id': peer_id, 'description': description, 'authentication_mode': authentication_mode, 'dpd_profile_id': dpd_profile_id, 'ike_profile_id': ike_profile_id, 'ipsec_tunnel_profile_id': ipsec_profile_id, 'connection_initiation_mode': initiation_mode, 'psk': psk }, sort_keys=True), headers=self.default_headers()) def test_peer_endpoint_update(self): fake_pep = test_constants.FAKE_PEP.copy() new_desc = 'updated' new_name = 'new' new_psk = 'psk12' uuid = test_constants.FAKE_PEP_ID mocked_resource = self.get_mocked_resource(response=fake_pep) mocked_resource.update(uuid, name=new_name, description=new_desc, psk=new_psk) fake_pep['description'] = new_desc fake_pep['display_name'] = new_name fake_pep['psk'] = new_psk test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, uuid), data=jsonutils.dumps(fake_pep, sort_keys=True), headers=self.default_headers()) class TestLocalEndpoint(test_resources.BaseTestResource): def setUp(self): super(TestLocalEndpoint, self).setUp( vpn_ipsec.LocalEndpoint) def test_local_endpoint_create(self): mocked_resource = self.get_mocked_resource() name = 'localep' description = 'desc' local_address = local_id = '1.1.1.1' ipsec_vpn_service_id = 'uuid1' mocked_resource.create(name, local_address, ipsec_vpn_service_id, description=description, local_id=local_id) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'local_address': local_address, 'local_id': local_id, 'description': description, 'ipsec_vpn_service_id': {'target_id': ipsec_vpn_service_id} }, sort_keys=True), headers=self.default_headers()) def test_local_endpoint_update(self): fake_pep = test_constants.FAKE_LEP.copy() new_desc = 'updated' new_name = 'new' new_addr = '2.2.2.2' uuid = test_constants.FAKE_LEP_ID mocked_resource = self.get_mocked_resource(response=fake_pep) mocked_resource.update(uuid, name=new_name, description=new_desc, local_address=new_addr, local_id=new_addr) fake_pep['description'] = new_desc fake_pep['display_name'] = new_name fake_pep['local_address'] = new_addr fake_pep['local_id'] = new_addr test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, uuid), data=jsonutils.dumps(fake_pep, sort_keys=True), headers=self.default_headers()) class TestSession(test_resources.BaseTestResource): def setUp(self): super(TestSession, self).setUp( vpn_ipsec.Session) def test_session_create(self): mocked_resource = self.get_mocked_resource() name = 'session' description = 'desc' local_ep_id = 'uuid1' peer_ep_id = 'uuid2' policy_rules = [] mocked_resource.create(name, local_ep_id, peer_ep_id, policy_rules, description=description) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'description': description, 'local_endpoint_id': local_ep_id, 'peer_endpoint_id': peer_ep_id, 'enabled': True, 'resource_type': mocked_resource.resource_type, 'policy_rules': policy_rules, }, sort_keys=True), headers=self.default_headers()) # TODO(asarfaty): add tests for update & rules class TestService(test_resources.BaseTestResource): def setUp(self): super(TestService, self).setUp( vpn_ipsec.Service) def test_service_create(self): mocked_resource = self.get_mocked_resource() router_id = 'abcd' enabled = True log_level = "DEBUG" name = 'service' mocked_resource.create(name, router_id, ike_log_level=log_level, enabled=enabled) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'logical_router_id': router_id, 'ike_log_level': log_level, 'enabled': enabled }, sort_keys=True), headers=self.default_headers()) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/test_resources.py0000666000175100017510000021032313244535763025410 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import eventlet import mock from oslo_serialization import jsonutils from oslo_utils import uuidutils from vmware_nsxlib.tests.unit.v3 import mocks from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_client from vmware_nsxlib.tests.unit.v3 import test_constants from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import resources from vmware_nsxlib.v3 import utils class BaseTestResource(nsxlib_testcase.NsxClientTestCase): """Base class for resources tests Contains tests for the simple get/list/delete apis and an api to get the mocked resource """ def setUp(self, resource=None): self.resource = resource super(BaseTestResource, self).setUp() def get_mocked_resource(self, mock_validate=True, response=None, response_repeat=1): session_response = None if response: session_response = mocks.MockRequestsResponse( 200, jsonutils.dumps(response)) if response_repeat > 1: session_response = [session_response] * response_repeat return self.mocked_resource( self.resource, mock_validate=mock_validate, session_response=session_response) def test_get_resource(self): if not self.resource: return mocked_resource = self.get_mocked_resource() fake_uuid = uuidutils.generate_uuid() mocked_resource.get(fake_uuid) test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, fake_uuid), headers=self.default_headers()) def test_list_all(self): if not self.resource: return mocked_resource = self.get_mocked_resource() mocked_resource.list() test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, headers=self.default_headers()) def test_delete_resource(self, extra_params=None): if not self.resource: return mocked_resource = self.get_mocked_resource() fake_uuid = uuidutils.generate_uuid() mocked_resource.delete(fake_uuid) uri = 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, fake_uuid) if extra_params: uri = uri + '?' + extra_params test_client.assert_json_call( 'delete', mocked_resource, uri, headers=self.default_headers()) class TestSwitchingProfileTestCase(BaseTestResource): def setUp(self): self.types = resources.SwitchingProfileTypes super(TestSwitchingProfileTestCase, self).setUp( resources.SwitchingProfile) def test_switching_profile_create(self): mocked_resource = self.get_mocked_resource() mocked_resource.create(self.types.PORT_MIRRORING, 'pm-profile', 'port mirror prof') test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles', data=jsonutils.dumps({ 'resource_type': self.types.PORT_MIRRORING, 'display_name': 'pm-profile', 'description': 'port mirror prof' }, sort_keys=True), headers=self.default_headers()) def test_switching_profile_update(self): tags = [ { 'scope': 'os-project-id', 'tag': 'project-1' }, { 'scope': 'os-api-version', 'tag': '2.1.1.0' } ] mocked_resource = self.get_mocked_resource() fake_uuid = uuidutils.generate_uuid() mocked_resource.update( fake_uuid, self.types.PORT_MIRRORING, tags=tags) test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles/%s' % fake_uuid, data=jsonutils.dumps({ 'resource_type': self.types.PORT_MIRRORING, 'tags': tags }, sort_keys=True), headers=self.default_headers()) def test_spoofgaurd_profile_create(self): tags = [ { 'scope': 'os-project-id', 'tag': 'project-1' }, { 'scope': 'os-api-version', 'tag': '2.1.1.0' } ] mocked_resource = self.get_mocked_resource() mocked_resource.create_spoofguard_profile( 'plugin-spoof', 'spoofguard-for-plugin', whitelist_ports=True, tags=tags) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles', data=jsonutils.dumps({ 'resource_type': self.types.SPOOF_GUARD, 'display_name': 'plugin-spoof', 'description': 'spoofguard-for-plugin', 'white_list_providers': ['LPORT_BINDINGS'], 'tags': tags }, sort_keys=True), headers=self.default_headers()) def test_create_dhcp_profile(self): tags = [ { 'scope': 'os-project-id', 'tag': 'project-1' }, { 'scope': 'os-api-version', 'tag': '2.1.1.0' } ] mocked_resource = self.get_mocked_resource() mocked_resource.create_dhcp_profile( 'plugin-dhcp', 'dhcp-for-plugin', tags=tags) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles', data=jsonutils.dumps({ 'bpdu_filter': { 'enabled': True, 'white_list': [] }, 'resource_type': self.types.SWITCH_SECURITY, 'display_name': 'plugin-dhcp', 'description': 'dhcp-for-plugin', 'tags': tags, 'dhcp_filter': { 'client_block_enabled': True, 'server_block_enabled': False }, 'rate_limits': { 'enabled': False, 'rx_broadcast': 0, 'tx_broadcast': 0, 'rx_multicast': 0, 'tx_multicast': 0 }, 'block_non_ip_traffic': True }, sort_keys=True), headers=self.default_headers()) def test_create_mac_learning_profile(self): tags = [ { 'scope': 'os-project-id', 'tag': 'project-1' }, { 'scope': 'os-api-version', 'tag': '2.1.1.0' } ] mocked_resource = self.get_mocked_resource() mocked_resource.create_mac_learning_profile( 'plugin-mac-learning', 'mac-learning-for-plugin', tags=tags) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles', data=jsonutils.dumps({ 'mac_learning': { 'enabled': True, }, 'resource_type': self.types.MAC_LEARNING, 'display_name': 'plugin-mac-learning', 'description': 'mac-learning-for-plugin', 'tags': tags, 'mac_change_allowed': True, }, sort_keys=True), headers=self.default_headers()) def test_find_by_display_name(self): resp_resources = { 'results': [ {'display_name': 'resource-1'}, {'display_name': 'resource-2'}, {'display_name': 'resource-3'} ] } mocked_resource = self.get_mocked_resource(response=resp_resources, response_repeat=3) self.assertEqual([{'display_name': 'resource-1'}], mocked_resource.find_by_display_name('resource-1')) self.assertEqual([{'display_name': 'resource-2'}], mocked_resource.find_by_display_name('resource-2')) self.assertEqual([{'display_name': 'resource-3'}], mocked_resource.find_by_display_name('resource-3')) resp_resources = { 'results': [ {'display_name': 'resource-1'}, {'display_name': 'resource-1'}, {'display_name': 'resource-1'} ] } mocked_resource = self.get_mocked_resource(response=resp_resources) self.assertEqual(resp_resources['results'], mocked_resource.find_by_display_name('resource-1')) def test_list_all(self): mocked_resource = self.get_mocked_resource() mocked_resource.list() test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles/' '?include_system_owned=True', data=None, headers=self.default_headers()) class LogicalPortTestCase(BaseTestResource): def setUp(self): super(LogicalPortTestCase, self).setUp(resources.LogicalPort) def _get_profile_dicts(self, fake_port): fake_profile_dicts = [] for profile_id in fake_port['switching_profile_ids']: fake_profile_dicts.append({'resource_type': profile_id['key'], 'id': profile_id['value']}) return fake_profile_dicts def _get_pktcls_bindings(self): fake_pkt_classifiers = [] fake_binding_repr = [] for i in range(0, 3): ip = "9.10.11.%s" % i mac = "00:0c:29:35:4a:%sc" % i fake_pkt_classifiers.append(resources.PacketAddressClassifier( ip, mac, None)) fake_binding_repr.append({ 'ip_address': ip, 'mac_address': mac }) return fake_pkt_classifiers, fake_binding_repr def test_create_logical_port(self): """Test creating a port. returns the correct response and 200 status """ fake_port = test_constants.FAKE_PORT.copy() profile_dicts = self._get_profile_dicts(fake_port) pkt_classifiers, binding_repr = self._get_pktcls_bindings() mocked_resource = self.get_mocked_resource() description = 'dummy' switch_profile = resources.SwitchingProfile mocked_resource.create( fake_port['logical_switch_id'], fake_port['attachment']['id'], address_bindings=pkt_classifiers, switch_profile_ids=switch_profile.build_switch_profile_ids( mock.Mock(), *profile_dicts), description=description) resp_body = { 'logical_switch_id': fake_port['logical_switch_id'], 'switching_profile_ids': fake_port['switching_profile_ids'], 'attachment': { 'attachment_type': 'VIF', 'id': fake_port['attachment']['id'] }, 'admin_state': 'UP', 'address_bindings': binding_repr, 'description': description } test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports', data=jsonutils.dumps(resp_body, sort_keys=True), headers=self.default_headers()) def test_create_logical_port_with_attachtype_cif(self): """Test creating a port returns the correct response and 200 status """ fake_port = copy.deepcopy(test_constants.FAKE_CONTAINER_PORT) profile_dicts = self._get_profile_dicts(fake_port) pkt_classifiers, binding_repr = self._get_pktcls_bindings() fake_port['address_bindings'] = binding_repr mocked_resource = self.get_mocked_resource() switch_profile = resources.SwitchingProfile fake_port_ctx = fake_port['attachment']['context'] fake_container_host_vif_id = fake_port_ctx['container_host_vif_id'] mocked_resource.create( fake_port['logical_switch_id'], fake_port['attachment']['id'], parent_vif_id=fake_container_host_vif_id, traffic_tag=fake_port_ctx['vlan_tag'], address_bindings=pkt_classifiers, switch_profile_ids=switch_profile.build_switch_profile_ids( mock.Mock(), *profile_dicts), vif_type=fake_port_ctx['vif_type'], app_id=fake_port_ctx['app_id'], allocate_addresses=fake_port_ctx['allocate_addresses']) resp_body = { 'logical_switch_id': fake_port['logical_switch_id'], 'switching_profile_ids': fake_port['switching_profile_ids'], 'attachment': { 'attachment_type': 'VIF', 'id': fake_port['attachment']['id'], 'context': { 'resource_type': 'VifAttachmentContext', 'allocate_addresses': 'Both', 'parent_vif_id': fake_container_host_vif_id, 'traffic_tag': fake_port_ctx['vlan_tag'], 'app_id': fake_port_ctx['app_id'], 'vif_type': 'CHILD', } }, 'admin_state': 'UP', 'address_bindings': fake_port['address_bindings'] } test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports', data=jsonutils.dumps(resp_body, sort_keys=True), headers=self.default_headers()) def test_create_logical_port_admin_down(self): """Test creating port with admin_state down.""" fake_port = test_constants.FAKE_PORT fake_port['admin_state'] = "DOWN" mocked_resource = self.get_mocked_resource(response=fake_port) result = mocked_resource.create( test_constants.FAKE_PORT['logical_switch_id'], test_constants.FAKE_PORT['attachment']['id'], tags={}, admin_state=False) self.assertEqual(fake_port, result) def test_delete_resource(self): """Test deleting port.""" super(LogicalPortTestCase, self).test_delete_resource( extra_params='detach=true') def test_get_logical_port_by_attachment(self): """Test deleting port.""" mocked_resource = self.get_mocked_resource() attachment_type = nsx_constants.ATTACHMENT_DHCP attachment_id = '1234' mocked_resource.get_by_attachment(attachment_type, attachment_id) test_client.assert_json_call( 'get', mocked_resource, "https://1.2.3.4/api/v1/logical-ports/?attachment_type=%s" "&attachment_id=%s" % (attachment_type, attachment_id), headers=self.default_headers()) def test_clear_port_bindings(self): fake_port = copy.copy(test_constants.FAKE_PORT) fake_port['address_bindings'] = ['a', 'b'] mocked_resource = self.get_mocked_resource() def get_fake_port(*args, **kwargs): return copy.copy(fake_port) mocked_resource.client.get = get_fake_port mocked_resource.update( fake_port['id'], fake_port['attachment']['id'], address_bindings=[]) fake_port['address_bindings'] = [] test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'], data=jsonutils.dumps(fake_port, sort_keys=True), headers=self.default_headers()) def test_create_logical_port_fail(self): """Test the failure of port creation.""" fake_port = test_constants.FAKE_PORT.copy() profile_dicts = self._get_profile_dicts(fake_port) pkt_classifiers, binding_repr = self._get_pktcls_bindings() fake_port['address_bindings'] = binding_repr mocked_resource = self.get_mocked_resource(mock_validate=False) switch_profile = resources.SwitchingProfile try: mocked_resource.create( fake_port['logical_switch_id'], fake_port['attachment']['id'], address_bindings=pkt_classifiers, switch_profile_ids=switch_profile.build_switch_profile_ids( mock.Mock(), *profile_dicts)) except exceptions.ManagerError as e: self.assertIn(nsxlib_testcase.NSX_MANAGER, e.msg) def test_update_logical_port_no_addr_binding(self): fake_port = copy.deepcopy(test_constants.FAKE_CONTAINER_PORT) mocked_resource = self.get_mocked_resource() new_name = 'updated_port' new_desc = 'updated' fake_port_ctx = fake_port['attachment']['context'] fake_container_host_vif_id = fake_port_ctx['container_host_vif_id'] def get_fake_port(*args, **kwargs): return copy.copy(fake_port) mocked_resource.client.get = get_fake_port mocked_resource.update( fake_port['id'], fake_port['attachment']['id'], name=new_name, description=new_desc, parent_vif_id=fake_container_host_vif_id, traffic_tag=fake_port_ctx['vlan_tag'], vif_type=fake_port_ctx['vif_type'], app_id=fake_port_ctx['app_id'], allocate_addresses=fake_port_ctx['allocate_addresses']) fake_port['display_name'] = new_name fake_port['description'] = new_desc fake_port['attachment'] = { 'attachment_type': 'VIF', 'id': fake_port['attachment']['id'], 'context': { 'resource_type': 'VifAttachmentContext', 'allocate_addresses': 'Both', 'parent_vif_id': fake_container_host_vif_id, 'traffic_tag': fake_port_ctx['vlan_tag'], 'app_id': fake_port_ctx['app_id'], 'vif_type': 'CHILD', } } test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'], data=jsonutils.dumps(fake_port, sort_keys=True), headers=self.default_headers()) def test_update_logical_port_with_addr_binding(self): fake_port = copy.deepcopy(test_constants.FAKE_CONTAINER_PORT) mocked_resource = self.get_mocked_resource() new_name = 'updated_port' new_desc = 'updated' fake_port_ctx = fake_port['attachment']['context'] fake_container_host_vif_id = fake_port_ctx['container_host_vif_id'] pkt_classifiers, binding_repr = self._get_pktcls_bindings() def get_fake_port(*args, **kwargs): return copy.copy(fake_port) mocked_resource.client.get = get_fake_port mocked_resource.update( fake_port['id'], fake_port['attachment']['id'], name=new_name, description=new_desc, parent_vif_id=fake_container_host_vif_id, traffic_tag=fake_port_ctx['vlan_tag'], vif_type=fake_port_ctx['vif_type'], app_id=fake_port_ctx['app_id'], allocate_addresses=fake_port_ctx['allocate_addresses'], address_bindings=pkt_classifiers) fake_port['display_name'] = new_name fake_port['description'] = new_desc fake_port['attachment'] = { 'attachment_type': 'VIF', 'id': fake_port['attachment']['id'], 'context': { 'resource_type': 'VifAttachmentContext', 'allocate_addresses': 'Both', 'parent_vif_id': fake_container_host_vif_id, 'traffic_tag': fake_port_ctx['vlan_tag'], 'app_id': fake_port_ctx['app_id'], 'vif_type': 'CHILD', } } fake_port['address_bindings'] = binding_repr test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'], data=jsonutils.dumps(fake_port, sort_keys=True), headers=self.default_headers()) class LogicalRouterTestCase(BaseTestResource): def setUp(self): super(LogicalRouterTestCase, self).setUp( core_resources.NsxLibLogicalRouter) def test_create_logical_router(self): """Test creating a router returns the correct response and 201 status. """ fake_router = test_constants.FAKE_ROUTER.copy() router = self.get_mocked_resource() tier0_router = True description = 'dummy' router.create(fake_router['display_name'], None, None, tier0_router, description=description) data = { 'display_name': fake_router['display_name'], 'router_type': 'TIER0' if tier0_router else 'TIER1', 'tags': None, 'description': description } test_client.assert_json_call( 'post', router, 'https://1.2.3.4/api/v1/logical-routers', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_force_delete_logical_router(self): """Test force deleting router""" router = self.get_mocked_resource() uuid = test_constants.FAKE_ROUTER['id'] router.delete(uuid, True) test_client.assert_json_call( 'delete', router, 'https://1.2.3.4/api/v1/logical-routers/%s?force=True' % uuid, headers=self.default_headers()) def test_list_logical_router_by_type(self): router = self.get_mocked_resource() router_type = 'TIER0' router.list(router_type=router_type) test_client.assert_json_call( 'get', router, 'https://1.2.3.4/api/v1/logical-routers?router_type=%s' % router_type) def test_get_logical_router_fw_section(self): fake_router = test_constants.FAKE_ROUTER.copy() router = self.get_mocked_resource() section_id = router.get_firewall_section_id( test_constants.FAKE_ROUTER_UUID, router_body=fake_router) self.assertEqual(test_constants.FAKE_ROUTER_FW_SEC_UUID, section_id) def _test_nat_rule_create(self, nsx_version, add_bypas_arg): router = self.get_mocked_resource() action = 'SNAT' translated_net = '1.1.1.1' priority = 10 data = { 'action': action, 'enabled': True, 'translated_network': translated_net, 'rule_priority': priority } if add_bypas_arg: # Expect nat_pass to be sent to the backend data['nat_pass'] = False # Ignoring 'bypass_firewall' with version 1.1 with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value=nsx_version): router.add_nat_rule(test_constants.FAKE_ROUTER_UUID, action=action, translated_network=translated_net, rule_priority=priority, bypass_firewall=False) test_client.assert_json_call( 'post', router, ('https://1.2.3.4/api/v1/logical-routers/%s/nat/rules' % test_constants.FAKE_ROUTER_UUID), data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_nat_rule_create_v1(self): # Ignoring 'bypass_firewall' with version 1.1 self._test_nat_rule_create('1.1.0', False) def test_nat_rule_create_v2(self): # Sending 'bypass_firewall' with version 1.1 self._test_nat_rule_create('2.0.0', True) def test_nat_rule_list(self): router = self.get_mocked_resource() router.list_nat_rules(test_constants.FAKE_ROUTER_UUID) test_client.assert_json_call( 'get', router, ('https://1.2.3.4/api/v1/logical-routers/%s/nat/rules' % test_constants.FAKE_ROUTER_UUID), headers=self.default_headers()) def test_nat_rule_update(self): router = self.get_mocked_resource() rule_id = '123' with mock.patch.object(router.client, 'get', return_value={'id': rule_id}): router.update_nat_rule(test_constants.FAKE_ROUTER_UUID, rule_id, nat_pass=False) data = {'id': rule_id, 'nat_pass': False} test_client.assert_json_call( 'put', router, ('https://1.2.3.4/api/v1/logical-routers/%s/nat/rules/%s' % (test_constants.FAKE_ROUTER_UUID, rule_id)), data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_delete_nat_rule_by_gw(self): router = self.get_mocked_resource() rule_id = '123' router_id = test_constants.FAKE_ROUTER_UUID gw_ip = '3.3.3.3' existing_rules = [{ 'translated_network': gw_ip, 'logical_router_id': router_id, 'id': rule_id, 'action': 'SNAT', 'resource_type': 'NatRule'}] with mock.patch.object(router.client, 'list', return_value={'results': existing_rules}): router.delete_nat_rule_by_values(router_id, translated_network=gw_ip) test_client.assert_json_call( 'delete', router, ('https://1.2.3.4/api/v1/logical-routers/%s/nat/rules/%s' % (router_id, rule_id)), headers=self.default_headers()) def test_delete_nat_rule_by_gw_and_source(self): router = self.get_mocked_resource() rule_id = '123' router_id = test_constants.FAKE_ROUTER_UUID gw_ip = '3.3.3.3' source_net = '4.4.4.4' existing_rules = [{ 'translated_network': gw_ip, 'logical_router_id': router_id, 'id': rule_id, 'match_source_network': source_net, 'action': 'SNAT', 'resource_type': 'NatRule'}] with mock.patch.object(router.client, 'list', return_value={'results': existing_rules}): router.delete_nat_rule_by_values(router_id, translated_network=gw_ip, match_source_network=source_net) test_client.assert_json_call( 'delete', router, ('https://1.2.3.4/api/v1/logical-routers/%s/nat/rules/%s' % (router_id, rule_id)), headers=self.default_headers()) def test_update_advertisement(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID data = {'advertise_nat_routes': 'a', 'advertise_nsx_connected_routes': 'b', 'advertise_static_routes': False, 'enabled': True, 'advertise_lb_vip': False, 'advertise_lb_snat_ip': False} with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.1.0'), \ mock.patch.object(router.client, 'get', return_value={}): router.update_advertisement( router_id, **data) test_client.assert_json_call( 'put', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'advertisement' % router_id), data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_update_advertisement_no_lb(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID data = {'advertise_nat_routes': 'a', 'advertise_nsx_connected_routes': 'b', 'advertise_static_routes': False, 'enabled': True} with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='1.1.0'), \ mock.patch.object(router.client, 'get', return_value={}): # lb args will be ignored on this nsx version router.update_advertisement( router_id, advertise_lb_vip=False, advertise_lb_snat_ip=False, **data) test_client.assert_json_call( 'put', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'advertisement' % router_id), data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_update_advertisement_rules(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID rules = [{"action": "ALLOW", "networks": ["44.0.0.0/20"], "display_name": "rule1"}, {"action": "ALLOW", "networks": ["6.60.0.0/20"], "display_name": "rule2"}] with mock.patch.object(router.client, 'get', return_value={}): router.update_advertisement_rules(router_id, rules) test_client.assert_json_call( 'put', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'advertisement/rules' % router_id), data=jsonutils.dumps({'rules': rules}, sort_keys=True), headers=self.default_headers()) def test_get_advertisement_rules(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID router.get_advertisement_rules(router_id) test_client.assert_json_call( 'get', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'advertisement/rules' % router_id), headers=self.default_headers()) class LogicalRouterPortTestCase(BaseTestResource): def setUp(self): super(LogicalRouterPortTestCase, self).setUp( resources.LogicalRouterPort) def test_create_logical_router_port(self): """Test creating a router port. returns the correct response and 201 status """ fake_router_port = test_constants.FAKE_ROUTER_PORT.copy() fake_relay_uuid = uuidutils.generate_uuid() lrport = self.get_mocked_resource() data = { 'display_name': fake_router_port['display_name'], 'logical_router_id': fake_router_port['logical_router_id'], 'resource_type': fake_router_port['resource_type'], 'tags': [], 'service_bindings': [{'service_id': { 'target_type': 'LogicalService', 'target_id': fake_relay_uuid}}] } with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.0.0'): lrport.create(fake_router_port['logical_router_id'], fake_router_port['display_name'], None, fake_router_port['resource_type'], None, None, None, relay_service_uuid=fake_relay_uuid) test_client.assert_json_call( 'post', lrport, 'https://1.2.3.4/api/v1/logical-router-ports', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_logical_router_port_max_attempts(self): """Test a router port api has the configured retries.""" lrport = self.get_mocked_resource() self.assertEqual(nsxlib_testcase.NSX_MAX_ATTEMPTS, lrport.client.max_attempts) def test_update_logical_router_port(self): fake_router_port = test_constants.FAKE_ROUTER_PORT.copy() uuid = fake_router_port['id'] fake_relay_uuid = uuidutils.generate_uuid() lrport = self.get_mocked_resource() with mock.patch.object(lrport.client, 'get', return_value=fake_router_port),\ mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.0.0'): lrport.update(uuid, relay_service_uuid=fake_relay_uuid) data = { 'id': uuid, 'display_name': fake_router_port['display_name'], 'logical_router_id': fake_router_port['logical_router_id'], 'resource_type': fake_router_port['resource_type'], "revision": 0, 'service_bindings': [{'service_id': { 'target_type': 'LogicalService', 'target_id': fake_relay_uuid}}] } test_client.assert_json_call( 'put', lrport, 'https://1.2.3.4/api/v1/logical-router-ports/%s' % uuid, data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_get_logical_router_port_by_router_id(self): """Test getting a router port by router id.""" fake_router_port = test_constants.FAKE_ROUTER_PORT.copy() resp_resources = {'results': [fake_router_port]} lrport = self.get_mocked_resource(response=resp_resources) router_id = fake_router_port['logical_router_id'] result = lrport.get_by_router_id(router_id) self.assertEqual(fake_router_port, result[0]) test_client.assert_json_call( 'get', lrport, 'https://1.2.3.4/api/v1/logical-router-ports/?' 'logical_router_id=%s' % router_id, headers=self.default_headers()) def test_get_logical_router_port_by_switch_id(self): """Test getting a router port by switch id.""" fake_router_port = test_constants.FAKE_ROUTER_PORT.copy() resp_resources = { 'result_count': 1, 'results': [fake_router_port] } lrport = self.get_mocked_resource(response=resp_resources) switch_id = test_constants.FAKE_SWITCH_UUID lrport.get_by_lswitch_id(switch_id) test_client.assert_json_call( 'get', lrport, 'https://1.2.3.4/api/v1/logical-router-ports/?' 'logical_switch_id=%s' % switch_id, headers=self.default_headers()) class IpPoolTestCase(BaseTestResource): def setUp(self): super(IpPoolTestCase, self).setUp(resources.IpPool) def test_create_ip_pool_all_args(self): """Test creating an IP pool returns the correct response and 201 status """ pool = self.get_mocked_resource() display_name = 'dummy' gateway_ip = '1.1.1.1' ranges = [{'start': '2.2.2.0', 'end': '2.2.2.255'}, {'start': '3.2.2.0', 'end': '3.2.2.255'}] cidr = '2.2.2.0/24' description = 'desc' dns_nameserver = '7.7.7.7' pool.create(cidr, allocation_ranges=ranges, display_name=display_name, gateway_ip=gateway_ip, description=description, dns_nameservers=[dns_nameserver]) data = { 'display_name': display_name, 'description': description, 'subnets': [{ 'gateway_ip': gateway_ip, 'allocation_ranges': ranges, 'cidr': cidr, 'dns_nameservers': [dns_nameserver] }] } test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_ip_pool_minimal_args(self): pool = self.get_mocked_resource() ranges = [{'start': '2.2.2.0', 'end': '2.2.2.255'}, {'start': '3.2.2.0', 'end': '3.2.2.255'}] cidr = '2.2.2.0/24' pool.create(cidr, allocation_ranges=ranges) data = { 'subnets': [{ 'allocation_ranges': ranges, 'cidr': cidr, }] } test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_ip_pool_no_ranges_with_gateway(self): pool = self.get_mocked_resource() cidr = '2.2.2.0/30' gateway_ip = '2.2.2.1' pool.create(cidr, allocation_ranges=None, gateway_ip=gateway_ip) exp_ranges = [{'start': '2.2.2.0', 'end': '2.2.2.0'}, {'start': '2.2.2.2', 'end': '2.2.2.3'}] data = { 'subnets': [{ 'gateway_ip': gateway_ip, 'allocation_ranges': exp_ranges, 'cidr': cidr, }] } test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_ip_pool_no_ranges_no_gateway(self): pool = self.get_mocked_resource() cidr = '2.2.2.0/30' pool.create(cidr, allocation_ranges=None) exp_ranges = [{'start': '2.2.2.0', 'end': '2.2.2.3'}] data = { 'subnets': [{ 'allocation_ranges': exp_ranges, 'cidr': cidr, }] } test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_ip_pool_no_cidr(self): pool = self.get_mocked_resource() gateway_ip = '1.1.1.1' ranges = [{'start': '2.2.2.0', 'end': '2.2.2.255'}, {'start': '3.2.2.0', 'end': '3.2.2.255'}] cidr = None try: pool.create(cidr, allocation_ranges=ranges, gateway_ip=gateway_ip) except exceptions.InvalidInput: # This call should fail pass else: self.fail("shouldn't happen") def test_update_ip_pool_name(self): fake_ip_pool = test_constants.FAKE_IP_POOL.copy() pool = self.get_mocked_resource(response=fake_ip_pool) uuid = fake_ip_pool['id'] new_name = 'new_name' pool.update(uuid, display_name=new_name) fake_ip_pool['display_name'] = new_name test_client.assert_json_call( 'put', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s' % uuid, data=jsonutils.dumps(fake_ip_pool, sort_keys=True), headers=self.default_headers()) def test_update_ip_pool_gateway(self): fake_ip_pool = test_constants.FAKE_IP_POOL.copy() pool = self.get_mocked_resource(response=fake_ip_pool) uuid = fake_ip_pool['id'] new_gateway = '1.0.0.1' pool.update(uuid, gateway_ip=new_gateway) fake_ip_pool["subnets"][0]['gateway_ip'] = new_gateway test_client.assert_json_call( 'put', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s' % uuid, data=jsonutils.dumps(fake_ip_pool, sort_keys=True), headers=self.default_headers()) def test_update_ip_pool_delete_gateway(self): fake_ip_pool = test_constants.FAKE_IP_POOL.copy() pool = self.get_mocked_resource(response=fake_ip_pool) uuid = fake_ip_pool['id'] pool.update(uuid, gateway_ip=None) del fake_ip_pool["subnets"][0]['gateway_ip'] test_client.assert_json_call( 'put', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s' % uuid, data=jsonutils.dumps(fake_ip_pool, sort_keys=True), headers=self.default_headers()) def test_allocate_ip_from_pool(self): pool = self.get_mocked_resource() uuid = test_constants.FAKE_IP_POOL['id'] addr = '1.1.1.1' pool.allocate(uuid, ip_addr=addr) data = {'allocation_id': addr} test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s?action=ALLOCATE' % uuid, data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_release_ip_to_pool(self): pool = self.get_mocked_resource() uuid = test_constants.FAKE_IP_POOL['id'] addr = '1.1.1.1' pool.release(uuid, addr) data = {'allocation_id': addr} test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s?action=RELEASE' % uuid, data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_get_ip_pool_allocations(self): """Test getting a router port by router id""" fake_ip_pool = test_constants.FAKE_IP_POOL.copy() pool = self.get_mocked_resource(response=fake_ip_pool) uuid = fake_ip_pool['id'] result = pool.get_allocations(uuid) self.assertEqual(fake_ip_pool, result) test_client.assert_json_call( 'get', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s/allocations' % uuid, headers=self.default_headers()) class TestNsxSearch(nsxlib_testcase.NsxClientTestCase): def test_nsx_search_tags(self): """Test search of resources with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'scope': 'user', 'tag': 'k8s'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags) search.assert_called_with('search?query=%s' % query) def test_nsx_search_tags_scope_only(self): """Test search of resources with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'scope': 'user'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags) search.assert_called_with('search?query=%s' % query) def test_nsx_search_tags_tag_only(self): """Test search of resources with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'tag': 'k8s'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags) search.assert_called_with('search?query=%s' % query) def test_nsx_search_tags_tag_and_scope(self): """Test search of resources with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'tag': 'k8s'}, {'scope': 'user'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags) search.assert_called_with('search?query=%s' % query) def test_nsx_search_tags_and_resource_type(self): """Test search of specified resource with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'scope': 'user', 'tag': 'k8s'}] res_type = 'LogicalPort' query = self.nsxlib._build_query(tags=user_tags) # Add resource_type to the query query = "resource_type:%s AND %s" % (res_type, query) self.nsxlib.search_by_tags(tags=user_tags, resource_type=res_type) search.assert_called_with('search?query=%s' % query) def test_nsx_search_tags_and_cursor(self): """Test search of resources with the specified tag and cursor.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'scope': 'user', 'tag': 'k8s'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags, cursor=50) search.assert_called_with('search?query=%s&cursor=50' % query) def test_nsx_search_tags_and_page_size(self): """Test search of resources with the specified tag and page size.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'scope': 'user', 'tag': 'k8s'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags, page_size=100) search.assert_called_with('search?query=%s&page_size=100' % query) def test_nsx_search_invalid_query_fail(self): """Test search query failure for missing tag argument.""" self.assertRaises(exceptions.NsxSearchInvalidQuery, self.nsxlib.search_by_tags, tags=None, resource_type=None) def test_nsx_search_invalid_tags_fail(self): """Test search of resources with the invalid tag.""" user_tags = [{'scope': 'user', 'invalid_tag_key': 'k8s'}] self.assertRaises(exceptions.NsxSearchInvalidQuery, self.nsxlib._build_query, tags=user_tags) def test_nsx_search_all_by_tags(self): """Test search all of resources with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: search.side_effect = [ {"cursor": "2", "result_count": 3, "results": [{"id": "s1"}, {"id": "s2"}]}, {"cursor": "3", "result_count": 3, "results": [{"id": "s3"}]}] user_tags = [{'scope': 'user', 'tag': 'k8s'}] query = self.nsxlib._build_query(tags=user_tags) results = self.nsxlib.search_all_by_tags(tags=user_tags) search.assert_has_calls([ mock.call('search?query=%s' % query), mock.call('search?query=%s&cursor=2' % query)]) self.assertEqual(3, len(results)) def test_get_id_by_resource_and_tag(self): id = 'test' scope = 'user' tag = 'k8s' res_type = 'LogicalPort' results = {'result_count': 1, 'results': [{'id': id}]} with mock.patch.object(self.nsxlib.client, 'url_get', return_value=results): actual_id = self.nsxlib.get_id_by_resource_and_tag( res_type, scope, tag) self.assertEqual(id, actual_id) def test_get_id_by_resource_and_tag_not_found(self): scope = 'user' tag = 'k8s' res_type = 'LogicalPort' results = {'result_count': 0, 'results': []} with mock.patch.object(self.nsxlib.client, 'url_get', return_value=results): self.assertRaises(exceptions.ResourceNotFound, self.nsxlib.get_id_by_resource_and_tag, res_type, scope, tag, alert_not_found=True) def test_get_id_by_resource_and_tag_multiple(self): scope = 'user' tag = 'k8s' res_type = 'LogicalPort' results = {'result_count': 2, 'results': [{'id': '1'}, {'id': '2'}]} with mock.patch.object(self.nsxlib.client, 'url_get', return_value=results): self.assertRaises(exceptions.ManagerError, self.nsxlib.get_id_by_resource_and_tag, res_type, scope, tag, alert_multiple=True) class TransportZone(BaseTestResource): def setUp(self): super(TransportZone, self).setUp(core_resources.NsxLibTransportZone) def test_get_transport_zone_type(self): fake_tz = test_constants.FAKE_TZ.copy() tz = self.get_mocked_resource() with mock.patch.object(tz.client, 'url_get', return_value=fake_tz): tz_type = tz.get_transport_type(fake_tz['id']) self.assertEqual(tz.TRANSPORT_TYPE_OVERLAY, tz_type) # call it again to test it when cached tz_type = tz.get_transport_type(fake_tz['id']) self.assertEqual(tz.TRANSPORT_TYPE_OVERLAY, tz_type) def test_get_host_switch_mode(self): fake_tz = test_constants.FAKE_TZ.copy() tz = self.get_mocked_resource() with mock.patch.object(tz.client, 'url_get', return_value=fake_tz): tz_mode = tz.get_host_switch_mode(fake_tz['id']) self.assertEqual(tz.HOST_SWITCH_MODE_STANDARD, tz_mode) class MetadataProxy(BaseTestResource): def setUp(self): super(MetadataProxy, self).setUp(core_resources.NsxLibMetadataProxy) def test_update_metadata_proxy(self): fake_md = test_constants.FAKE_MD.copy() md = self.get_mocked_resource() new_url = "http://2.2.2.20:3500/xyz" new_secret = 'abc' new_edge = uuidutils.generate_uuid() with mock.patch.object(md.client, 'url_get', return_value=fake_md): md.update(fake_md['id'], server_url=new_url, secret=new_secret, edge_cluster_id=new_edge) fake_md.update({'metadata_server_url': new_url, 'secret': new_secret, 'edge_cluster_id': new_edge}) test_client.assert_json_call( 'put', md, 'https://1.2.3.4/api/v1/md-proxies/%s' % fake_md['id'], data=jsonutils.dumps(fake_md, sort_keys=True), headers=self.default_headers()) class NsxLibSwitchTestCase(BaseTestResource): def setUp(self): super(NsxLibSwitchTestCase, self).setUp( core_resources.NsxLibLogicalSwitch) self._tz_id = uuidutils.generate_uuid() def _create_body(self, admin_state=nsx_constants.ADMIN_STATE_UP, vlan_id=None, description=None, trunk_vlan=None): body = { "transport_zone_id": self._tz_id, "replication_mode": "MTEP", "display_name": "fake_name", "tags": [], "admin_state": admin_state } if vlan_id: body['vlan'] = vlan_id if description is not None: body['description'] = description if trunk_vlan: body['vlan_trunk_spec'] = { 'vlan_ranges': [{'start': trunk_vlan[0], 'end': trunk_vlan[1]}]} return body def test_create_logical_switch(self): """Test creating a switch returns the correct response and 200 status """ desc = 'dummy' ls = self.get_mocked_resource() ls.create(mocks.FAKE_NAME, self._tz_id, [], description=desc) data = self._create_body(description=desc) test_client.assert_json_call( 'post', ls, 'https://1.2.3.4/api/v1/logical-switches', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_logical_switch_admin_down(self): """Test creating switch with admin_state down""" ls = self.get_mocked_resource() ls.create(mocks.FAKE_NAME, self._tz_id, [], admin_state=False) data = self._create_body(admin_state=nsx_constants.ADMIN_STATE_DOWN) test_client.assert_json_call( 'post', ls, 'https://1.2.3.4/api/v1/logical-switches', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_logical_switch_vlan(self): """Test creating switch with provider:network_type VLAN""" ls = self.get_mocked_resource() vlan_id = '123' ls.create(mocks.FAKE_NAME, self._tz_id, [], vlan_id=vlan_id) data = self._create_body(vlan_id=vlan_id) test_client.assert_json_call( 'post', ls, 'https://1.2.3.4/api/v1/logical-switches', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_logical_switch_trunk(self): """Test creating switch with trunk vlan""" ls = self.get_mocked_resource() trunk_vlan = [10, 20] with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.2.0'): ls.create(mocks.FAKE_NAME, self._tz_id, [], trunk_vlan_range=trunk_vlan) data = self._create_body(trunk_vlan=trunk_vlan) test_client.assert_json_call( 'post', ls, 'https://1.2.3.4/api/v1/logical-switches', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_logical_switch_trunk_not_supported(self): """Test creating switch with trunk vlan without the support""" ls = self.get_mocked_resource() trunk_vlan = [10, 20] with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.0.0'): self.assertRaises(exceptions.InvalidInput, ls.create, mocks.FAKE_NAME, self._tz_id, [], trunk_vlan_range=trunk_vlan) def test_create_logical_switch_trunk_with_vlan(self): """Test creating switch with trunk vlan and vlan tag""" ls = self.get_mocked_resource() trunk_vlan = [10, 20] with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.2.0'): self.assertRaises(exceptions.InvalidInput, ls.create, mocks.FAKE_NAME, self._tz_id, [], trunk_vlan_range=trunk_vlan, vlan_id='111') def test_create_logical_switch_illegal_trunk(self): """Test creating switch with illegal trunk vlan""" ls = self.get_mocked_resource() trunk_vlan = [10] with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.2.0'): self.assertRaises(exceptions.InvalidInput, ls.create, mocks.FAKE_NAME, self._tz_id, [], trunk_vlan_range=trunk_vlan) def test_delete_resource(self): """Test deleting switch""" super(NsxLibSwitchTestCase, self).test_delete_resource( extra_params='detach=true&cascade=true') class NsxLibPortMirrorTestCase(BaseTestResource): def setUp(self): super(NsxLibPortMirrorTestCase, self).setUp( core_resources.NsxLibPortMirror) class NsxLibBridgeEndpointTestCase(BaseTestResource): def setUp(self): super(NsxLibBridgeEndpointTestCase, self).setUp( core_resources.NsxLibBridgeEndpoint) class NsxLibEdgeClusterTestCase(BaseTestResource): def setUp(self): super(NsxLibEdgeClusterTestCase, self).setUp( core_resources.NsxLibEdgeCluster) class NsxLibDhcpProfileTestCase(BaseTestResource): def setUp(self): super(NsxLibDhcpProfileTestCase, self).setUp( core_resources.NsxLibDhcpProfile) class NsxLibDhcpRelayServiceTestCase(BaseTestResource): def setUp(self): super(NsxLibDhcpRelayServiceTestCase, self).setUp( core_resources.NsxLibDhcpRelayService) def test_server_ips(self): fake_srv = test_constants.FAKE_RELAY_SERVICE.copy() relay_service = self.get_mocked_resource() with mock.patch.object(relay_service.client, 'url_get', return_value=fake_srv), \ mock.patch.object(self.nsxlib.client, 'url_get', return_value=test_constants.FAKE_RELAY_PROFILE): server_ips = relay_service.get_server_ips(fake_srv['id']) self.assertEqual(1, len(server_ips)) self.assertEqual(test_constants.FAKE_RELAY_SERVER, server_ips[0]) class NsxLibDhcpRelayProfileTestCase(BaseTestResource): def setUp(self): super(NsxLibDhcpRelayProfileTestCase, self).setUp( core_resources.NsxLibDhcpRelayProfile) def test_server_ips(self): fake_prf = test_constants.FAKE_RELAY_PROFILE.copy() relay_profile = self.get_mocked_resource() with mock.patch.object(relay_profile.client, 'url_get', return_value=fake_prf): server_ips = relay_profile.get_server_ips(fake_prf['id']) self.assertEqual(1, len(server_ips)) self.assertEqual(test_constants.FAKE_RELAY_SERVER, server_ips[0]) class NsxLibBridgeClusterTestCase(BaseTestResource): def setUp(self): super(NsxLibBridgeClusterTestCase, self).setUp( core_resources.NsxLibBridgeCluster) class NsxLibIpBlockSubnetTestCase(BaseTestResource): def setUp(self): super(NsxLibIpBlockSubnetTestCase, self).setUp( core_resources.NsxLibIpBlockSubnet) def test_list_all(self): if not self.resource: return mocked_resource = self.get_mocked_resource() block_id = '7' mocked_resource.list(block_id) test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s?block_id=%s' % (mocked_resource.uri_segment, block_id), headers=self.default_headers()) class NsxLibIpBlockTestCase(BaseTestResource): def setUp(self): super(NsxLibIpBlockTestCase, self).setUp( core_resources.NsxLibIpBlock) class NsxLibFabricVirtualInterfaceTestCase(BaseTestResource): def setUp(self): super(NsxLibFabricVirtualInterfaceTestCase, self).setUp( core_resources.NsxLibFabricVirtualInterface) def test_get_by_owner_vm_id(self): mocked_resource = self.get_mocked_resource() vm_id = uuidutils.generate_uuid() mocked_resource.get_by_owner_vm_id(vm_id) test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s?owner_vm_id=%s' % (mocked_resource.uri_segment, vm_id), headers=self.default_headers()) class NsxLibFabricVirtualMachineTestCase(BaseTestResource): def setUp(self): super(NsxLibFabricVirtualMachineTestCase, self).setUp( core_resources.NsxLibFabricVirtualMachine) def test_get_by_display_name(self): mocked_resource = self.get_mocked_resource() display_name = 'some-vm-name' mocked_resource.get_by_display_name(display_name) test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s?display_name=%s' % (mocked_resource.uri_segment, display_name), headers=self.default_headers()) class LogicalDhcpServerTestCase(BaseTestResource): def setUp(self): super(LogicalDhcpServerTestCase, self).setUp( resources.LogicalDhcpServer) def test_update_empty_dhcp_server(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' ip = '1.1.1.1' with mock.patch.object(mocked_resource.client, "get", return_value={}): mocked_resource.update(server_uuid, server_ip=ip) body = {'ipv4_dhcp_server': {'dhcp_server_ip': ip}} test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, server_uuid), data=jsonutils.dumps(body, sort_keys=True), headers=self.default_headers()) def test_update_dhcp_server_new_val(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' ip = '1.1.1.1' domain_name = 'dummy' existing_server = {'ipv4_dhcp_server': {'domain_name': domain_name}} # add the server ip with mock.patch.object(mocked_resource.client, "get", return_value=existing_server): mocked_resource.update(server_uuid, server_ip=ip) existing_server['ipv4_dhcp_server']['dhcp_server_ip'] = ip test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, server_uuid), data=jsonutils.dumps(existing_server, sort_keys=True), headers=self.default_headers()) def test_update_dhcp_server_replace_val(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' ip = '1.1.1.1' domain_name = 'dummy' existing_server = {'ipv4_dhcp_server': {'domain_name': domain_name, 'dhcp_server_ip': ip}} # replace the server ip new_ip = '2.2.2.2' with mock.patch.object(mocked_resource.client, "get", return_value=existing_server): mocked_resource.update(server_uuid, server_ip=new_ip) existing_server['ipv4_dhcp_server']['dhcp_server_ip'] = new_ip test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, server_uuid), data=jsonutils.dumps(existing_server, sort_keys=True), headers=self.default_headers()) def test_create_binding(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' mac = 'aa:bb:cc:dd:ee:ff' ip = '1.1.1.1' host = 'host' mocked_resource.create_binding(server_uuid, mac, ip, hostname=host) body = { 'mac_address': mac, 'ip_address': ip, 'host_name': host, } test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s/static-bindings' % (mocked_resource.uri_segment, server_uuid), data=jsonutils.dumps(body, sort_keys=True), headers=self.default_headers()) def test_get_binding(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' binding_uuid = 'binding-uuid' mocked_resource.get_binding(server_uuid, binding_uuid) test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s/static-bindings/%s' % (mocked_resource.uri_segment, server_uuid, binding_uuid), headers=self.default_headers()) def test_update_binding(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' binding_uuid = 'binding-uuid' mac = 'aa:bb:cc:dd:ee:ff' new_mac = 'dd:bb:cc:dd:ee:ff' ip = '1.1.1.1' host = 'host' body = { 'mac_address': mac, 'ip_address': ip, 'host_name': host, } with mock.patch.object(mocked_resource.client, "get", return_value=body): mocked_resource.update_binding(server_uuid, binding_uuid, mac_address=new_mac) body['mac_address'] = new_mac test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s/static-bindings/%s' % (mocked_resource.uri_segment, server_uuid, binding_uuid), data=jsonutils.dumps(body, sort_keys=True), headers=self.default_headers()) class NodeHttpServicePropertiesTestCase(BaseTestResource): def setUp(self): super(NodeHttpServicePropertiesTestCase, self).setUp( resources.NodeHttpServiceProperties) def test_get_resource(self): self.skipTest("The action is not supported by this resource") def test_list_all(self): self.skipTest("The action is not supported by this resource") def test_delete_resource(self): self.skipTest("The action is not supported by this resource") def test_get_rate_limit(self): mocked_resource = self.get_mocked_resource() rate_limit = 40 body = {'service_properties': {'client_api_rate_limit': rate_limit}} with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.2.0'),\ mock.patch.object(mocked_resource.client, "url_get", return_value=body): result = mocked_resource.get_rate_limit() self.assertEqual(rate_limit, result) def test_update_rate_limit(self): mocked_resource = self.get_mocked_resource() old_rate_limit = 40 new_rate_limit = 50 body = {'service_properties': { 'client_api_rate_limit': old_rate_limit}} with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.2.0'),\ mock.patch.object(mocked_resource.client, "url_get", return_value=body): mocked_resource.update_rate_limit(new_rate_limit) body['service_properties'][ 'client_api_rate_limit'] = new_rate_limit test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/node/services/http', data=jsonutils.dumps(body, sort_keys=True), headers=self.default_headers()) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/node/services/http?action=restart', headers=self.default_headers()) class DummyCachedResource(utils.NsxLibApiBase): @property def uri_segment(self): return 'XXX' @property def resource_type(self): return 'xxx' @property def use_cache_for_get(self): return True @property def cache_timeout(self): return 2 class ResourceCache(BaseTestResource): def setUp(self): super(ResourceCache, self).setUp(DummyCachedResource) def test_get_with_cache(self): mocked_resource = self.get_mocked_resource() fake_uuid = uuidutils.generate_uuid() # first call -> goes to the client mocked_resource.get(fake_uuid) self.assertEqual(1, test_client.mock_calls_count( 'get', mocked_resource)) # second call -> goes to cache mocked_resource.get(fake_uuid) self.assertEqual(1, test_client.mock_calls_count( 'get', mocked_resource)) # a different call -> goes to the client fake_uuid2 = uuidutils.generate_uuid() mocked_resource.get(fake_uuid2) self.assertEqual(2, test_client.mock_calls_count( 'get', mocked_resource)) # third call -> still goes to cache mocked_resource.get(fake_uuid) self.assertEqual(2, test_client.mock_calls_count( 'get', mocked_resource)) # after timeout -> goes to the client eventlet.sleep(2) mocked_resource.get(fake_uuid) self.assertEqual(3, test_client.mock_calls_count( 'get', mocked_resource)) # after delete -> goes to the client mocked_resource.delete(fake_uuid) mocked_resource.get(fake_uuid) self.assertEqual(4, test_client.mock_calls_count( 'get', mocked_resource)) # And from cache again mocked_resource.get(fake_uuid) self.assertEqual(4, test_client.mock_calls_count( 'get', mocked_resource)) # Update the entry. The get inside the update is from # the client too, because it must be current) mocked_resource._update_with_retry(fake_uuid, {}) self.assertEqual(5, test_client.mock_calls_count( 'get', mocked_resource)) # after update -> goes to client mocked_resource.get(fake_uuid) self.assertEqual(6, test_client.mock_calls_count( 'get', mocked_resource)) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/unit/v3/mocks.py0000666000175100017510000001723413244535763023461 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import requests import six.moves.urllib.parse as urlparse from oslo_serialization import jsonutils from oslo_utils import uuidutils from vmware_nsxlib.v3 import nsx_constants FAKE_NAME = "fake_name" DEFAULT_TIER0_ROUTER_UUID = "efad0078-9204-4b46-a2d8-d4dd31ed448f" NSX_BRIDGE_CLUSTER_NAME = 'default bridge cluster' FAKE_MANAGER = "fake_manager_ip" def make_fake_switch(switch_uuid=None, tz_uuid=None, name=FAKE_NAME): if not switch_uuid: switch_uuid = uuidutils.generate_uuid() if not tz_uuid: tz_uuid = uuidutils.generate_uuid() fake_switch = { "id": switch_uuid, "display_name": name, "resource_type": "LogicalSwitch", "address_bindings": [], "transport_zone_id": tz_uuid, "replication_mode": nsx_constants.MTEP, "admin_state": nsx_constants.ADMIN_STATE_UP, "vni": 50056, "switching_profile_ids": [ { "value": "64814784-7896-3901-9741-badeff705639", "key": "IpDiscoverySwitchingProfile" }, { "value": "fad98876-d7ff-11e4-b9d6-1681e6b88ec1", "key": "SpoofGuardSwitchingProfile" }, { "value": "93b4b7e8-f116-415d-a50c-3364611b5d09", "key": "PortMirroringSwitchingProfile" }, { "value": "fbc4fb17-83d9-4b53-a286-ccdf04301888", "key": "SwitchSecuritySwitchingProfile" }, { "value": "f313290b-eba8-4262-bd93-fab5026e9495", "key": "QosSwitchingProfile" } ], } return fake_switch def make_fake_dhcp_profile(): return {"id": uuidutils.generate_uuid(), "edge_cluster_id": uuidutils.generate_uuid(), "edge_cluster_member_indexes": [0, 1]} def make_fake_metadata_proxy(): return {"id": uuidutils.generate_uuid(), "metadata_server_url": "http://1.2.3.4", "secret": "my secret", "edge_cluster_id": uuidutils.generate_uuid(), "edge_cluster_member_indexes": [0, 1]} class MockRequestsResponse(object): def __init__(self, status_code, content=None): self.status_code = status_code self.content = content def json(self): return jsonutils.loads(self.content) class MockRequestSessionApi(object): def __init__(self): self._store = {} def _format_uri(self, uri): uri = urlparse.urlparse(uri).path while uri.endswith('/'): uri = uri[:-1] while uri.startswith('/'): uri = uri[1:] if not self._is_uuid_uri(uri): uri = "%s/" % uri return uri def _is_uuid_uri(self, uri): return uuidutils.is_uuid_like( urlparse.urlparse(uri).path.split('/')[-1]) def _query(self, search_key, copy=True): items = [] for uri, obj in self._store.items(): if uri.startswith(search_key): items.append(obj.copy() if copy else obj) return items def _build_response(self, url, content=None, status=requests.codes.ok, **kwargs): if isinstance(content, list): content = { 'result_count': len(content), 'results': content } if (content is not None and kwargs.get('headers', {}).get( 'Content-Type') == 'application/json'): content = jsonutils.dumps(content) return MockRequestsResponse(status, content=content) def _get_content(self, **kwargs): content = kwargs.get('data', None) if content and kwargs.get('headers', {}).get( 'Content-Type') == 'application/json': content = jsonutils.loads(content) return content def get(self, url, **kwargs): url = self._format_uri(url) if self._is_uuid_uri(url): item = self._store.get(url) code = requests.codes.ok if item else requests.codes.not_found return self._build_response( url, content=item, status=code, **kwargs) return self._build_response( url, content=self._query(url), status=requests.codes.ok, **kwargs) def _create(self, url, content, **kwargs): resource_id = content.get('id') if resource_id and self._store.get("%s%s" % (url, resource_id)): return self._build_response( url, content=None, status=requests.codes.bad, **kwargs) resource_id = resource_id or uuidutils.generate_uuid() content['id'] = resource_id self._store["%s%s" % (url, resource_id)] = content.copy() return content def post(self, url, **kwargs): parsed_url = urlparse.urlparse(url) url = self._format_uri(url) if self._is_uuid_uri(url): if self._store.get(url) is None: return self._build_response( url, content=None, status=requests.codes.bad, **kwargs) body = self._get_content(**kwargs) if body is None: return self._build_response( url, content=None, status=requests.codes.bad, **kwargs) response_content = None url_queries = urlparse.parse_qs(parsed_url.query) if 'create_multiple' in url_queries.get('action', []): response_content = {} for resource_name, resource_body in body.items(): for new_resource in resource_body: created_resource = self._create( url, new_resource, **kwargs) if response_content.get(resource_name, None) is None: response_content[resource_name] = [] response_content[resource_name].append(created_resource) else: response_content = self._create(url, body, **kwargs) if isinstance(response_content, MockRequestsResponse): return response_content return self._build_response( url, content=response_content, status=requests.codes.created, **kwargs) def put(self, url, **kwargs): url = self._format_uri(url) item = {} if self._is_uuid_uri(url): item = self._store.get(url, None) if item is None: return self._build_response( url, content=None, status=requests.codes.not_found, **kwargs) body = self._get_content(**kwargs) if body is None: return self._build_response( url, content=None, status=requests.codes.bad, **kwargs) item.update(body) self._store[url] = item return self._build_response( url, content=item, status=requests.codes.ok, **kwargs) def delete(self, url, **kwargs): url = self._format_uri(url) if not self._store.get(url): return self._build_response( url, content=None, status=requests.codes.not_found, **kwargs) del self._store[url] return self._build_response( url, content=None, status=requests.codes.ok, **kwargs) vmware-nsxlib-12.0.1/vmware_nsxlib/tests/base.py0000666000175100017510000000143213244535763021741 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base class TestCase(base.BaseTestCase): """Test case base class for all unit tests.""" vmware-nsxlib-12.0.1/vmware_nsxlib/_i18n.py0000666000175100017510000000210113244535763020575 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n DOMAIN = "vmware_nsxlib" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) vmware-nsxlib-12.0.1/vmware_nsxlib/__init__.py0000666000175100017510000000123513244535763021425 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version __version__ = pbr.version.VersionInfo( 'vmware_nsxlib').version_string() vmware-nsxlib-12.0.1/vmware_nsxlib/v3/0000775000175100017510000000000013244536266017640 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/vmware_nsxlib/v3/policy_resources.py0000666000175100017510000010364713244535763023621 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import uuid from oslo_log import log as logging import six from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import policy_constants from vmware_nsxlib.v3 import policy_defs LOG = logging.getLogger(__name__) # TODO(asarfaty): support retries? # TODO(asarfaty): In future versions PATCH may be supported for update @six.add_metaclass(abc.ABCMeta) class NsxPolicyResourceBase(object): """Abstract class for NSX policy resources declaring the basic apis each policy resource should support, and implement some common apis and utilities """ def __init__(self, policy_api): self.policy_api = policy_api @abc.abstractmethod def list(self, *args, **kwargs): pass @abc.abstractmethod def get(self, uuid, *args, **kwargs): pass @abc.abstractmethod def delete(self, uuid, *args, **kwargs): pass @abc.abstractmethod def create_or_overwrite(self, *args, **kwargs): pass @abc.abstractmethod def update(self, uuid, *args, **kwargs): pass @staticmethod def _init_obj_uuid(obj_uuid): if not obj_uuid: # generate a random id obj_uuid = str(uuid.uuid4()) return obj_uuid def get_by_name(self, name, *args, **kwargs): # Return first match by name resources_list = self.list(*args, **kwargs) for obj in resources_list: if obj.get('display_name') == name: return obj def _get_realized_state(self, path): try: result = self.policy_api.get_by_path(path) if result and result.get('state'): return result['state'] except exceptions.BackendResourceNotFound: # resource not deployed yet LOG.warning("No realized state found for %s", path) class NsxPolicyDomainApi(NsxPolicyResourceBase): """NSX Policy Domain.""" def create_or_overwrite(self, name, domain_id=None, description=None, tenant=policy_constants.POLICY_INFRA_TENANT): domain_id = self._init_obj_uuid(domain_id) domain_def = policy_defs.DomainDef(domain_id=domain_id, name=name, description=description, tenant=tenant) return self.policy_api.create_or_update(domain_def) def delete(self, domain_id, tenant=policy_constants.POLICY_INFRA_TENANT): domain_def = policy_defs.DomainDef(domain_id, tenant=tenant) self.policy_api.delete(domain_def) def get(self, domain_id, tenant=policy_constants.POLICY_INFRA_TENANT): domain_def = policy_defs.DomainDef(domain_id, tenant=tenant) return self.policy_api.get(domain_def) def list(self, tenant=policy_constants.POLICY_INFRA_TENANT): domain_def = policy_defs.DomainDef(tenant=tenant) return self.policy_api.list(domain_def)['results'] def update(self, domain_id, name=None, description=None, tenant=policy_constants.POLICY_INFRA_TENANT): domain_def = policy_defs.DomainDef(domain_id=domain_id, tenant=tenant) domain_def.update_attributes_in_body(name=name, description=description) # update the backend return self.policy_api.create_or_update(domain_def) class NsxPolicyGroupApi(NsxPolicyResourceBase): """NSX Policy Group (under a Domain) with a single condition.""" def create_or_overwrite( self, name, domain_id, group_id=None, description=None, cond_val=None, cond_key=policy_constants.CONDITION_KEY_TAG, cond_op=policy_constants.CONDITION_OP_EQUALS, cond_member_type=policy_constants.CONDITION_MEMBER_PORT, tenant=policy_constants.POLICY_INFRA_TENANT): """Create a group with/without a condition. Empty condition value will result a group with no condition. """ group_id = self._init_obj_uuid(group_id) # Prepare the condition if cond_val is not None: condition = policy_defs.Condition(value=cond_val, key=cond_key, operator=cond_op, member_type=cond_member_type) conditions = [condition] else: conditions = [] group_def = policy_defs.GroupDef(domain_id=domain_id, group_id=group_id, name=name, description=description, conditions=conditions, tenant=tenant) return self.policy_api.create_or_update(group_def) def delete(self, domain_id, group_id, tenant=policy_constants.POLICY_INFRA_TENANT): group_def = policy_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=tenant) self.policy_api.delete(group_def) def get(self, domain_id, group_id, tenant=policy_constants.POLICY_INFRA_TENANT): group_def = policy_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=tenant) return self.policy_api.get(group_def) def list(self, domain_id, tenant=policy_constants.POLICY_INFRA_TENANT): """List all the groups of a specific domain.""" group_def = policy_defs.GroupDef(domain_id=domain_id, tenant=tenant) return self.policy_api.list(group_def)['results'] def get_by_name(self, domain_id, name, tenant=policy_constants.POLICY_INFRA_TENANT): """Return first group matched by name of this domain""" return super(NsxPolicyGroupApi, self).get_by_name(name, domain_id, tenant=tenant) def update(self, domain_id, group_id, name=None, description=None, tenant=policy_constants.POLICY_INFRA_TENANT): """Update the general data of the group. Without changing the conditions """ group_def = policy_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=tenant) group_def.update_attributes_in_body(name=name, description=description) # update the backend return self.policy_api.create_or_update(group_def) def update_condition( self, domain_id, group_id, cond_val=None, cond_key=policy_constants.CONDITION_KEY_TAG, cond_op=policy_constants.CONDITION_OP_EQUALS, cond_member_type=policy_constants.CONDITION_MEMBER_PORT, tenant=policy_constants.POLICY_INFRA_TENANT): """Update/Remove the condition of a group. Empty condition value will result a group with no condition. """ group_def = policy_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=tenant) # Prepare the condition if cond_val is not None: condition = policy_defs.Condition(value=cond_val, key=cond_key, operator=cond_op, member_type=cond_member_type) conditions = [condition] else: conditions = [] # Get the current data, and update it with the new values # We need to do that here because of the conditions data group = self.get(domain_id, group_id, tenant=tenant) group_def.update_attributes_in_body(body=group, conditions=conditions) # update the backend return self.policy_api.create_or_update(group_def) def get_realized_state(self, domain_id, group_id, ep_id, tenant=policy_constants.POLICY_INFRA_TENANT): group_def = policy_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=tenant) path = group_def.get_realized_state_path(ep_id) return self._get_realized_state(path) class NsxPolicyServiceBase(NsxPolicyResourceBase): """Base class for NSX Policy Service with a single entry. Note the nsx-policy backend supports multiple service entries per service. At this point this is not supported here. """ def delete(self, service_id, tenant=policy_constants.POLICY_INFRA_TENANT): """Delete the service with all its entries""" service_def = policy_defs.ServiceDef(service_id=service_id, tenant=tenant) service = self.policy_api.get(service_def) # first delete all the service entries if 'service_entries' in service: for entry in service['service_entries']: entry_def = self.entry_def( service_id=service_id, service_entry_id=entry['id'], tenant=tenant) self.policy_api.delete(entry_def) self.policy_api.delete(service_def) def get(self, service_id, tenant=policy_constants.POLICY_INFRA_TENANT): service_def = policy_defs.ServiceDef(service_id=service_id, tenant=tenant) return self.policy_api.get(service_def) def list(self, tenant=policy_constants.POLICY_INFRA_TENANT): service_def = policy_defs.ServiceDef(tenant=tenant) return self.policy_api.list(service_def)['results'] def get_realized_state(self, service_id, ep_id, tenant=policy_constants.POLICY_INFRA_TENANT): service_def = policy_defs.ServiceDef(service_id=service_id, tenant=tenant) path = service_def.get_realized_state_path(ep_id) return self._get_realized_state(path) @property def entry_def(self): pass class NsxPolicyL4ServiceApi(NsxPolicyServiceBase): """NSX Policy Service with a single L4 service entry. Note the nsx-policy backend supports multiple service entries per service. At this point this is not supported here. """ @property def entry_def(self): return policy_defs.L4ServiceEntryDef def create_or_overwrite(self, name, service_id=None, description=None, protocol=policy_constants.TCP, dest_ports=None, tenant=policy_constants.POLICY_INFRA_TENANT): service_id = self._init_obj_uuid(service_id) service_def = policy_defs.ServiceDef(service_id=service_id, name=name, description=description, tenant=tenant) # NOTE(asarfaty) We set the service entry display name (which is also # used as the id) to be the same as the service name. In case we # support multiple service entries, we need the name to be unique. entry_def = policy_defs.L4ServiceEntryDef( service_id=service_id, name=name, description=description, protocol=protocol, dest_ports=dest_ports, tenant=tenant) return self.policy_api.create_with_parent(service_def, entry_def) def _update_service_entry(self, service_id, srv_entry, name=None, description=None, protocol=None, dest_ports=None, tenant=policy_constants.POLICY_INFRA_TENANT): entry_id = srv_entry['id'] entry_def = policy_defs.L4ServiceEntryDef(service_id=service_id, service_entry_id=entry_id, tenant=tenant) entry_def.update_attributes_in_body(body=srv_entry, name=name, description=description, protocol=protocol, dest_ports=dest_ports) self.policy_api.create_or_update(entry_def) def update(self, service_id, name=None, description=None, protocol=None, dest_ports=None, tenant=policy_constants.POLICY_INFRA_TENANT): # Get the current data of service & its' service entry service = self.get(service_id, tenant=tenant) # update the service itself: if name is not None or description is not None: # update the service itself service_def = policy_defs.ServiceDef(service_id=service_id, tenant=tenant) service_def.update_attributes_in_body(name=name, description=description) # update the backend updated_service = self.policy_api.create_or_update(service_def) else: updated_service = service # update the service entry if it exists service_entry = policy_defs.ServiceDef.get_single_entry(service) if not service_entry: LOG.error("Cannot update service %s - expected 1 service " "entry", service_id) return updated_service self._update_service_entry( service_id, service_entry, name=name, description=description, protocol=protocol, dest_ports=dest_ports, tenant=tenant) # re-read the service from the backend to return the current data return self.get(service_id, tenant=tenant) class NsxPolicyIcmpServiceApi(NsxPolicyServiceBase): """NSX Policy Service with a single ICMP service entry. Note the nsx-policy backend supports multiple service entries per service. At this point this is not supported here. """ @property def entry_def(self): return policy_defs.IcmpServiceEntryDef def create_or_overwrite(self, name, service_id=None, description=None, version=4, icmp_type=None, icmp_code=None, tenant=policy_constants.POLICY_INFRA_TENANT): service_id = self._init_obj_uuid(service_id) service_def = policy_defs.ServiceDef(service_id=service_id, name=name, description=description, tenant=tenant) # NOTE(asarfaty) We set the service entry display name (which is also # used as the id) to be the same as the service name. In case we # support multiple service entries, we need the name to be unique. entry_def = policy_defs.IcmpServiceEntryDef( service_id=service_id, name=name, description=description, version=version, icmp_type=icmp_type, icmp_code=icmp_code, tenant=tenant) return self.policy_api.create_with_parent(service_def, entry_def) def _update_service_entry(self, service_id, srv_entry, name=None, description=None, version=None, icmp_type=None, icmp_code=None, tenant=policy_constants.POLICY_INFRA_TENANT): entry_id = srv_entry['id'] entry_def = policy_defs.IcmpServiceEntryDef(service_id=service_id, service_entry_id=entry_id, tenant=tenant) entry_def.update_attributes_in_body(body=srv_entry, name=name, description=description, version=version, icmp_type=icmp_type, icmp_code=icmp_code) self.policy_api.create_or_update(entry_def) def update(self, service_id, name=None, description=None, version=None, icmp_type=None, icmp_code=None, tenant=policy_constants.POLICY_INFRA_TENANT): # Get the current data of service & its' service entry service = self.get(service_id, tenant=tenant) # update the service itself: if name is not None or description is not None: # update the service itself service_def = policy_defs.ServiceDef(service_id=service_id, tenant=tenant) service_def.update_attributes_in_body(name=name, description=description) # update the backend updated_service = self.policy_api.create_or_update(service_def) else: updated_service = service # update the service entry if it exists service_entry = policy_defs.ServiceDef.get_single_entry(service) if not service_entry: LOG.error("Cannot update service %s - expected 1 service " "entry", service_id) return updated_service self._update_service_entry( service_id, service_entry, name=name, description=description, version=version, icmp_type=icmp_type, icmp_code=icmp_code, tenant=tenant) # re-read the service from the backend to return the current data return self.get(service_id, tenant=tenant) class NsxPolicyCommunicationProfileApi(NsxPolicyResourceBase): """NSX Policy Communication profile (with a single entry). Note the nsx-policy backend supports multiple entries per communication profile. At this point this is not supported here. """ def create_or_overwrite(self, name, profile_id=None, description=None, services=None, action=policy_constants.ACTION_ALLOW, tenant=policy_constants.POLICY_INFRA_TENANT): """Create a Communication profile with a single entry. Services should be a list of service ids """ profile_id = self._init_obj_uuid(profile_id) profile_def = policy_defs.CommunicationProfileDef( profile_id=profile_id, name=name, description=description, tenant=tenant) # NOTE(asarfaty) We set the profile entry display name (which is also # used as the id) to be the same as the profile name. In case we # support multiple entries, we need the name to be unique. entry_def = policy_defs.CommunicationProfileEntryDef( profile_id=profile_id, name=name, description=description, services=services, action=action, tenant=tenant) return self.policy_api.create_with_parent(profile_def, entry_def) def delete(self, profile_id, tenant=policy_constants.POLICY_INFRA_TENANT): """Delete the Communication profile with all the entries""" # first delete the entries, or else the profile deletion will fail profile_def = policy_defs.CommunicationProfileDef( profile_id=profile_id, tenant=tenant) prof = self.policy_api.get(profile_def) if 'communication_profile_entries' in prof: for entry in prof['communication_profile_entries']: entry_def = policy_defs.CommunicationProfileEntryDef( profile_id=profile_id, profile_entry_id=entry['id'], tenant=tenant) self.policy_api.delete(entry_def) self.policy_api.delete(profile_def) def get(self, profile_id, tenant=policy_constants.POLICY_INFRA_TENANT): profile_def = policy_defs.CommunicationProfileDef( profile_id=profile_id, tenant=tenant) return self.policy_api.get(profile_def) def list(self, tenant=policy_constants.POLICY_INFRA_TENANT): profile_def = policy_defs.CommunicationProfileDef(tenant=tenant) return self.policy_api.list(profile_def)['results'] def _update_profile_entry(self, profile_id, profile_entry, name=None, description=None, services=None, action=None, tenant=policy_constants.POLICY_INFRA_TENANT): entry_id = profile_entry['id'] entry_def = policy_defs.CommunicationProfileEntryDef( profile_id=profile_id, profile_entry_id=entry_id, tenant=tenant) entry_def.update_attributes_in_body(body=profile_entry, name=name, description=description, services=services, action=action) self.policy_api.create_or_update(entry_def) def update(self, profile_id, name=None, description=None, services=None, action=None, tenant=policy_constants.POLICY_INFRA_TENANT): # Get the current data of the profile & its' entry profile = self.get(profile_id, tenant=tenant) if name is not None or description is not None: # update the profile itself profile_def = policy_defs.CommunicationProfileDef( profile_id=profile_id, tenant=tenant) profile_def.update_attributes_in_body(name=name, description=description) # update the backend updated_profile = self.policy_api.create_or_update(profile_def) else: updated_profile = profile # update the profile entry if it exists profile_entry = policy_defs.CommunicationProfileDef.get_single_entry( profile) if not profile_entry: LOG.error("Cannot update communication profile %s - expected 1 " "profile entry", profile_id) return updated_profile self._update_profile_entry( profile_id, profile_entry, name=name, description=description, services=services, action=action, tenant=tenant) # re-read the profile from the backend to return the current data return self.get(profile_id, tenant=tenant) class NsxPolicyCommunicationMapApi(NsxPolicyResourceBase): """NSX Policy CommunicationMap (Under a Domain).""" def _get_last_seq_num(self, domain_id, tenant=policy_constants.POLICY_INFRA_TENANT): # get the current entries, and choose the next unused sequence number try: com_entries = self.list(domain_id, tenant=tenant) except exceptions.ResourceNotFound: return -1 if not com_entries: return 0 seq_nums = [int(cm['sequence_number']) for cm in com_entries] seq_nums.sort() return seq_nums[-1] def create_or_overwrite(self, name, domain_id, map_id=None, description=None, sequence_number=None, profile_id=None, source_groups=None, dest_groups=None, tenant=policy_constants.POLICY_INFRA_TENANT): """Create CommunicationMapEntry. source_groups/dest_groups should be a list of group ids belonging to the domain. NOTE: In multi-connection environment, it is recommended to execute this call under lock to prevent race condition where two entries end up with same sequence number. """ # Validate and convert inputs map_id = self._init_obj_uuid(map_id) if not profile_id: # profile-id must be provided err_msg = (_("Cannot create a communication map %(name)s without " "communication profile id") % {'name': name}) raise exceptions.ManagerError(details=err_msg) # get the next available sequence number last_sequence = self._get_last_seq_num(domain_id, tenant=tenant) if not sequence_number: if last_sequence < 0: sequence_number = 1 else: sequence_number = last_sequence + 1 entry_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, map_id=map_id, name=name, description=description, sequence_number=sequence_number, source_groups=source_groups, dest_groups=dest_groups, profile_id=profile_id, tenant=tenant) if last_sequence < 0: # if communication map is absent, we need to create it map_def = policy_defs.CommunicationMapDef(domain_id, tenant) map_result = self.policy_api.create_with_parent(map_def, entry_def) # return the created entry return map_result['communication_entries'][0] return self.policy_api.create_or_update(entry_def) def delete(self, domain_id, map_id, tenant=policy_constants.POLICY_INFRA_TENANT): map_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, map_id=map_id, tenant=tenant) self.policy_api.delete(map_def) def get(self, domain_id, map_id, tenant=policy_constants.POLICY_INFRA_TENANT): map_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, map_id=map_id, tenant=tenant) return self.policy_api.get(map_def) def get_by_name(self, domain_id, name, tenant=policy_constants.POLICY_INFRA_TENANT): """Return first communication map entry matched by name""" return super(NsxPolicyCommunicationMapApi, self).get_by_name( name, domain_id, tenant=tenant) def list(self, domain_id, tenant=policy_constants.POLICY_INFRA_TENANT): """List all the map entries of a specific domain.""" map_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, tenant=tenant) return self.policy_api.list(map_def)['results'] def update(self, domain_id, map_id, name=None, description=None, sequence_number=None, profile_id=None, source_groups=None, dest_groups=None, tenant=policy_constants.POLICY_INFRA_TENANT): map_def = policy_defs.CommunicationMapEntryDef( domain_id=domain_id, map_id=map_id, tenant=tenant) # Get the current data, and update it with the new values try: comm_map = self.get(domain_id, map_id, tenant=tenant) except exceptions.ResourceNotFound: return self.create_or_overwrite(name, domain_id, map_id, description, sequence_number, profile_id, source_groups, dest_groups, tenant) map_def.update_attributes_in_body( body=comm_map, name=name, description=description, sequence_number=sequence_number, profile_id=profile_id, source_groups=source_groups, dest_groups=dest_groups) # update the backend return self.policy_api.create_or_update(map_def) def get_realized_state(self, domain_id, ep_id, tenant=policy_constants.POLICY_INFRA_TENANT): map_def = policy_defs.CommunicationMapDef(domain_id, tenant) path = map_def.get_realized_state_path(ep_id) return self._get_realized_state(path) class NsxPolicyEnforcementPointApi(NsxPolicyResourceBase): """NSX Policy Enforcement Point.""" def create_or_overwrite(self, name, ep_id=None, description=None, ip_address=None, username=None, password=None, thumbprint=None, tenant=policy_constants.POLICY_INFRA_TENANT): if not ip_address or not username or password is None: err_msg = (_("Cannot create an enforcement point without " "ip_address, username and password")) raise exceptions.ManagerError(details=err_msg) ep_id = self._init_obj_uuid(ep_id) ep_def = policy_defs.EnforcementPointDef( ep_id=ep_id, name=name, description=description, ip_address=ip_address, username=username, password=password, thumbprint=thumbprint, tenant=tenant) return self.policy_api.create_or_update(ep_def) def delete(self, ep_id, tenant=policy_constants.POLICY_INFRA_TENANT): ep_def = policy_defs.EnforcementPointDef( ep_id=ep_id, tenant=tenant) self.policy_api.delete(ep_def) def get(self, ep_id, tenant=policy_constants.POLICY_INFRA_TENANT): ep_def = policy_defs.EnforcementPointDef( ep_id=ep_id, tenant=tenant) return self.policy_api.get(ep_def) def list(self, tenant=policy_constants.POLICY_INFRA_TENANT): ep_def = policy_defs.EnforcementPointDef(tenant=tenant) return self.policy_api.list(ep_def)['results'] def update(self, ep_id, name=None, description=None, ip_address=None, username=None, password=None, thumbprint=None, tenant=policy_constants.POLICY_INFRA_TENANT): """Update the enforcement point. username & password must be defined """ if not username or password is None: # profile-id must be provided err_msg = (_("Cannot update an enforcement point without " "username and password")) raise exceptions.ManagerError(details=err_msg) ep_def = policy_defs.EnforcementPointDef(ep_id=ep_id, tenant=tenant) ep_def.update_attributes_in_body(name=name, description=description, ip_address=ip_address, username=username, password=password, thumbprint=thumbprint) # update the backend return self.policy_api.create_or_update(ep_def) def get_realized_state(self, ep_id, tenant=policy_constants.POLICY_INFRA_TENANT): ep_def = policy_defs.EnforcementPointDef(ep_id=ep_id, tenant=tenant) path = ep_def.get_realized_state_path() return self._get_realized_state(path) class NsxPolicyDeploymentMapApi(NsxPolicyResourceBase): """NSX Policy Deployment Map.""" def create_or_overwrite(self, name, map_id=None, description=None, ep_id=None, domain_id=None, tenant=policy_constants.POLICY_INFRA_TENANT): map_id = self._init_obj_uuid(map_id) map_def = policy_defs.DeploymentMapDef( map_id=map_id, name=name, description=description, ep_id=ep_id, domain_id=domain_id, tenant=tenant) return self.policy_api.create_or_update(map_def) def delete(self, map_id, domain_id=None, tenant=policy_constants.POLICY_INFRA_TENANT): if not domain_id: # domain_id must be provided err_msg = (_("Cannot delete deployment maps without a domain")) raise exceptions.ManagerError(details=err_msg) map_def = policy_defs.DeploymentMapDef( map_id=map_id, domain_id=domain_id, tenant=tenant) self.policy_api.delete(map_def) def get(self, map_id, domain_id=None, tenant=policy_constants.POLICY_INFRA_TENANT): if not domain_id: # domain_id must be provided err_msg = (_("Cannot get deployment maps without a domain")) raise exceptions.ManagerError(details=err_msg) map_def = policy_defs.DeploymentMapDef( map_id=map_id, domain_id=domain_id, tenant=tenant) return self.policy_api.get(map_def) def list(self, domain_id=None, tenant=policy_constants.POLICY_INFRA_TENANT): if not domain_id: # domain_id must be provided err_msg = (_("Cannot list deployment maps without a domain")) raise exceptions.ManagerError(details=err_msg) map_def = policy_defs.DeploymentMapDef(domain_id=domain_id, tenant=tenant) return self.policy_api.list(map_def)['results'] def update(self, map_id, name=None, description=None, ep_id=None, domain_id=None, tenant=policy_constants.POLICY_INFRA_TENANT): map_def = policy_defs.DeploymentMapDef( map_id=map_id, tenant=tenant) map_def.update_attributes_in_body(name=name, description=description, ep_id=ep_id, domain_id=domain_id) # update the backend return self.policy_api.create_or_update(map_def) vmware-nsxlib-12.0.1/vmware_nsxlib/v3/vpn_ipsec.py0000666000175100017510000003446513244535763022217 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) VPN_IPSEC_PATH = 'vpn/ipsec/' # TODO(asarfaty) Add update for tags class IkeVersionTypes(object): """Supported IKE versions (NSX default is V2)""" IKE_VERSION_V1 = 'IKE_V1' IKE_VERSION_V2 = 'IKE_V2' IKE_VERSION_Flex = 'IKE_FLEX' class EncryptionAlgorithmTypes(object): """Supported encryption algorithms (NSX default is GCM)""" ENCRYPTION_ALGORITHM_128 = 'AES_128' ENCRYPTION_ALGORITHM_256 = 'AES_256' ENCRYPTION_ALGORITHM_GCM128 = 'AES_GCM_128' # only with IKE_V2 ENCRYPTION_ALGORITHM_GCM192 = 'AES_GCM_192' # only with IKE_V2 ENCRYPTION_ALGORITHM_GCM256 = 'AES_GCM_256' # only with IKE_V2 class DigestAlgorithmTypes(object): """Supported digest (auth) algorithms (NSX default is SHA2_256)""" DIGEST_ALGORITHM_SHA1 = 'SHA1' DIGEST_ALGORITHM_SHA256 = 'SHA2_256' DIGEST_ALGORITHM_GMAC_128 = 'GMAC_128' # only for tunnel profile DIGEST_ALGORITHM_GMAC_192 = 'GMAC_192' # only for tunnel profile DIGEST_ALGORITHM_GMAC_256 = 'GMAC_256' # only for tunnel profile class DHGroupTypes(object): """Supported DH groups for Perfect Forward Secrecy""" DH_GROUP_14 = 'GROUP14' DH_GROUP_15 = 'GROUP15' DH_GROUP_16 = 'GROUP16' class EncapsulationModeTypes(object): """Supported encapsulation modes for ipsec tunnel profile""" ENCAPSULATION_MODE_TUNNEL = 'TUNNEL_MODE' class TransformProtocolTypes(object): """Supported transform protocols for ipsec tunnel profile""" TRANSFORM_PROTOCOL_ESP = 'ESP' class AuthenticationModeTypes(object): """Supported authentication modes for ipsec peer endpoint (default PSK)""" AUTH_MODE_PSK = 'PSK' AUTH_MODE_CERT = 'CERTIFICATE' class DpdProfileActionTypes(object): """Supported DPD profile actions""" DPD_PROFILE_ACTION_HOLD = 'HOLD' class DpdProfileTimeoutLimits(object): """Supported DPD timeout range""" DPD_TIMEOUT_MIN = 3 DPD_TIMEOUT_MAX = 360 class IkeSALifetimeLimits(object): """Limits to the allowed SA lifetime in seconds (NSX default is 1 day)""" SA_LIFETIME_MIN = 21600 SA_LIFETIME_MAX = 31536000 class IPsecSALifetimeLimits(object): """Limits to the allowed SA lifetime in seconds (NSX default is 3600)""" SA_LIFETIME_MIN = 900 SA_LIFETIME_MAX = 31536000 class ConnectionInitiationModeTypes(object): """Supported connection initiation mode type""" INITIATION_MODE_INITIATOR = 'INITIATOR' INITIATION_MODE_RESPOND_ONLY = 'RESPOND_ONLY' INITIATION_MODE_ON_DEMAND = 'ON_DEMAND' class IkeLogLevelTypes(object): """Supported service IKE log levels (default ERROR)""" LOG_LEVEL_DEBUG = 'DEBUG' LOG_LEVEL_INFO = 'INFO' LOG_LEVEL_WARN = 'WARN' LOG_LEVEL_ERROR = 'ERROR' class IkeProfile(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNIKEProfile' @property def uri_segment(self): return VPN_IPSEC_PATH + 'ike-profiles' def create(self, name, description=None, encryption_algorithm=None, digest_algorithm=None, ike_version=None, dh_group=None, sa_life_time=None, tags=None): # mandatory parameters body = {'display_name': name} # optional parameters if description: body['description'] = description if encryption_algorithm: body['encryption_algorithms'] = [encryption_algorithm] if digest_algorithm: body['digest_algorithms'] = [digest_algorithm] if ike_version: body['ike_version'] = ike_version if sa_life_time: body['sa_life_time'] = sa_life_time if dh_group: body['dh_groups'] = [dh_group] if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) class IPSecTunnelProfile(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNTunnelProfile' @property def uri_segment(self): return VPN_IPSEC_PATH + 'tunnel-profiles' def create(self, name, description=None, encryption_algorithm=None, digest_algorithm=None, pfs=None, dh_group=None, sa_life_time=None, tags=None): # mandatory parameters body = {'display_name': name} # optional parameters if description: body['description'] = description if encryption_algorithm: body['encryption_algorithms'] = [encryption_algorithm] if digest_algorithm: body['digest_algorithms'] = [digest_algorithm] if sa_life_time: body['sa_life_time'] = sa_life_time if dh_group: body['dh_groups'] = [dh_group] if tags: body['tags'] = tags # Boolean parameters if pfs is not None: body['enable_perfect_forward_secrecy'] = pfs return self.client.create(self.get_path(), body=body) class IPSecDpdProfile(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNDPDProfile' @property def uri_segment(self): return VPN_IPSEC_PATH + 'dpd-profiles' def create(self, name, description=None, enabled=None, timeout=None, tags=None): # mandatory parameters body = {'display_name': name} # optional parameters if description: body['description'] = description if timeout: body['dpd_probe_interval'] = timeout # Boolean parameters if enabled is not None: body['enabled'] = enabled if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) def update(self, profile_id, enabled=None, timeout=None, tags=None): body = self.get(profile_id) if timeout: body['dpd_probe_interval'] = timeout if enabled is not None: body['enabled'] = enabled if tags is not None: body['tags'] = tags return self.client.update(self.get_path(profile_id), body=body) class IPSecPeerEndpoint(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNPeerEndpoint' @property def uri_segment(self): return VPN_IPSEC_PATH + 'peer-endpoints' def create(self, name, peer_address, peer_id, description=None, authentication_mode=None, dpd_profile_id=None, ike_profile_id=None, ipsec_tunnel_profile_id=None, connection_initiation_mode=None, psk=None, tags=None): # mandatory parameters body = {'display_name': name, 'peer_address': peer_address, 'peer_id': peer_id} # optional parameters if description: body['description'] = description if authentication_mode: body['authentication_mode'] = authentication_mode if dpd_profile_id: body['dpd_profile_id'] = dpd_profile_id if ike_profile_id: body['ike_profile_id'] = ike_profile_id if ipsec_tunnel_profile_id: body['ipsec_tunnel_profile_id'] = ipsec_tunnel_profile_id if psk: body['psk'] = psk if connection_initiation_mode: body['connection_initiation_mode'] = connection_initiation_mode if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) def update(self, uuid, name=None, description=None, peer_address=None, peer_id=None, connection_initiation_mode=None, psk=None, tags=None): body = self.get(uuid) if description: body['description'] = description if name: body['display_name'] = name if psk: body['psk'] = psk if connection_initiation_mode: body['connection_initiation_mode'] = connection_initiation_mode if peer_address: body['peer_address'] = peer_address if peer_id: body['peer_id'] = peer_id if tags is not None: body['tags'] = tags return self.client.update(self.get_path(uuid), body=body) class LocalEndpoint(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNLocalEndpoint' @property def uri_segment(self): return VPN_IPSEC_PATH + 'local-endpoints' def create(self, name, local_address, ipsec_vpn_service_id, description=None, local_id=None, certificate_id=None, trust_ca_ids=None, trust_crl_ids=None, tags=None): # mandatory parameters body = {'display_name': name, 'local_address': local_address, 'ipsec_vpn_service_id': {'target_id': ipsec_vpn_service_id}} # optional parameters if description: body['description'] = description if local_id: body['local_id'] = local_id if certificate_id: body['certificate_id'] = certificate_id if trust_ca_ids: body['trust_ca_ids'] = trust_ca_ids if trust_crl_ids: body['trust_crl_ids'] = trust_crl_ids if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) def update(self, uuid, name=None, description=None, local_address=None, ipsec_vpn_service_id=None, local_id=None, certificate_id=None, trust_ca_ids=None, trust_crl_ids=None, tags=None): body = self.get(uuid) if description: body['description'] = description if name: body['display_name'] = name if local_address: body['local_address'] = local_address if ipsec_vpn_service_id: body['ipsec_vpn_service_id'] = {'target_id': ipsec_vpn_service_id} if local_id: body['local_id'] = local_id if certificate_id: body['certificate_id'] = certificate_id if trust_ca_ids: body['trust_ca_ids'] = trust_ca_ids if trust_crl_ids: body['trust_crl_ids'] = trust_crl_ids if tags is not None: body['tags'] = tags return self.client.update(self.get_path(uuid), body=body) class Session(utils.NsxLibApiBase): @property def resource_type(self): return 'PolicyBasedIPSecVPNSession' @property def uri_segment(self): return VPN_IPSEC_PATH + 'sessions' def create(self, name, local_endpoint_id, peer_endpoint_id, policy_rules, description=None, enabled=True, tags=None): # mandatory parameters body = {'display_name': name, 'description': description, 'local_endpoint_id': local_endpoint_id, 'peer_endpoint_id': peer_endpoint_id, 'enabled': enabled, 'resource_type': self.resource_type, 'policy_rules': policy_rules} if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) def get_rule_obj(self, sources, destinations): src_subnets = [{'subnet': src} for src in sources] dst_subnets = [{'subnet': dst} for dst in destinations] return { 'sources': src_subnets, 'destinations': dst_subnets } def update(self, uuid, name=None, description=None, policy_rules=None, tags=None): body = self.get(uuid) if description: body['description'] = description if name: body['display_name'] = name if name: body['display_name'] = name if policy_rules is not None: body['policy_rules'] = policy_rules return self.client.update(self.get_path(uuid), body=body) class Service(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNService' @property def uri_segment(self): return VPN_IPSEC_PATH + 'services' def create(self, name, logical_router_id, enabled=True, ike_log_level="ERROR", tags=None, bypass_rules=None): # mandatory parameters body = {'display_name': name, 'logical_router_id': logical_router_id} # optional parameters if ike_log_level: body['ike_log_level'] = ike_log_level if enabled is not None: body['enabled'] = enabled if tags: body['tags'] = tags if bypass_rules: body['bypass_rules'] = bypass_rules return self.client.create(self.get_path(), body=body) class VpnIpSec(object): """This is the class that have all vpn ipsec resource clients""" def __init__(self, client, nsxlib_config, nsxlib=None): self.ike_profile = IkeProfile(client, nsxlib_config, nsxlib=nsxlib) self.tunnel_profile = IPSecTunnelProfile(client, nsxlib_config, nsxlib=nsxlib) self.dpd_profile = IPSecDpdProfile(client, nsxlib_config, nsxlib=nsxlib) self.peer_endpoint = IPSecPeerEndpoint(client, nsxlib_config, nsxlib=nsxlib) self.local_endpoint = LocalEndpoint(client, nsxlib_config, nsxlib=nsxlib) self.session = Session(client, nsxlib_config, nsxlib=nsxlib) self.service = Service(client, nsxlib_config, nsxlib=nsxlib) vmware-nsxlib-12.0.1/vmware_nsxlib/v3/utils.py0000666000175100017510000004420513244535763021362 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import inspect import re import time from oslo_log import log import tenacity from tenacity import _utils as tenacity_utils from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions as nsxlib_exceptions LOG = log.getLogger(__name__) TagLimits = collections.namedtuple('TagLimits', ['scope_length', 'tag_length', 'max_tags']) # The tag limits may change in the NSX. We set the default values to be those # in NSX 2.0. If the NSX returns different values we update these globals. MAX_RESOURCE_TYPE_LEN = 20 MAX_TAG_LEN = 40 MAX_TAGS = 15 DEFAULT_MAX_ATTEMPTS = 10 DEFAULT_CACHE_AGE_SEC = 600 INJECT_HEADERS_CALLBACK = None IS_ATTR_SET_CALLBACK = None def set_is_attr_callback(callback): global IS_ATTR_SET_CALLBACK IS_ATTR_SET_CALLBACK = callback def is_attr_set(attr): if IS_ATTR_SET_CALLBACK: return IS_ATTR_SET_CALLBACK(attr) return attr is not None def set_inject_headers_callback(callback): global INJECT_HEADERS_CALLBACK INJECT_HEADERS_CALLBACK = callback def _update_resource_length(length): global MAX_RESOURCE_TYPE_LEN MAX_RESOURCE_TYPE_LEN = length def _update_tag_length(length): global MAX_TAG_LEN MAX_TAG_LEN = length def _update_max_tags(max_tags): global MAX_TAGS MAX_TAGS = max_tags def update_tag_limits(limits): _update_resource_length(limits.scope_length) _update_tag_length(limits.tag_length) _update_max_tags(limits.max_tags) def _validate_resource_type_length(resource_type): # Add in a validation to ensure that we catch this at build time if len(resource_type) > MAX_RESOURCE_TYPE_LEN: raise nsxlib_exceptions.NsxLibInvalidInput( error_message=(_('Resource type cannot exceed %(max_len)s ' 'characters: %(resource_type)s') % {'max_len': MAX_RESOURCE_TYPE_LEN, 'resource_type': resource_type})) def add_v3_tag(tags, resource_type, tag): _validate_resource_type_length(resource_type) tags.append({'scope': resource_type, 'tag': tag[:MAX_TAG_LEN]}) return tags def update_v3_tags(current_tags, tags_update): current_scopes = set([tag['scope'] for tag in current_tags]) updated_scopes = set([tag['scope'] for tag in tags_update]) # All tags scopes which are either completely new or already defined on the # resource are left in place, unless the tag value is empty, in that case # it is ignored. tags = [{'scope': tag['scope'], 'tag': tag['tag']} for tag in (current_tags + tags_update) if tag['tag'] and tag['scope'] in (current_scopes ^ updated_scopes)] modified_scopes = current_scopes & updated_scopes for tag in tags_update: if tag['scope'] in modified_scopes: # If the tag value is empty or None, then remove the tag completely if tag['tag']: tag['tag'] = tag['tag'][:MAX_TAG_LEN] tags.append(tag) return tags def _log_before_retry(func, trial_number): """Before call strategy that logs to some logger the attempt.""" if trial_number > 1: LOG.warning("Retrying call to '%(func)s' for the %(num)s time", {'func': tenacity_utils.get_callback_name(func), 'num': tenacity_utils.to_ordinal(trial_number)}) def _get_args_from_frame(frames, frame_num): if len(frames) > frame_num and frames[frame_num] and frames[frame_num][0]: argvalues = inspect.getargvalues(frames[frame_num][0]) formated_args = inspect.formatargvalues(*argvalues) # remove the first 'self' arg from the log as it adds no information formated_args = re.sub(r'\(self=.*?, ', "(", formated_args) return formated_args def _log_after_retry(func, trial_number, trial_time_taken): """After call strategy that logs to some logger the finished attempt.""" # Using inspect to get arguments of the relevant call frames = inspect.trace() # Look at frame #2 first because of the internal functions _do_X formated_args = _get_args_from_frame(frames, 2) if not formated_args: formated_args = _get_args_from_frame(frames, 1) if not formated_args: formated_args = "Unknown" LOG.warning("Finished retry of %(func)s for the %(num)s time after " "%(time)0.3f(s) with args: %(args)s", {'func': tenacity_utils.get_callback_name(func), 'num': tenacity_utils.to_ordinal(trial_number), 'time': trial_time_taken, 'args': formated_args}) def retry_upon_exception(exc, delay=0.5, max_delay=2, max_attempts=DEFAULT_MAX_ATTEMPTS): return tenacity.retry(reraise=True, retry=tenacity.retry_if_exception_type(exc), wait=tenacity.wait_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry) def retry_random_upon_exception(exc, delay=0.5, max_delay=5, max_attempts=DEFAULT_MAX_ATTEMPTS): return tenacity.retry(reraise=True, retry=tenacity.retry_if_exception_type(exc), wait=tenacity.wait_random_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry) def list_match(list1, list2): # Check if list1 and list2 have identical elements, but relaxed on # dict elements where list1's dict element can be a subset of list2's # corresponding element. if (not isinstance(list1, list) or not isinstance(list2, list) or len(list1) != len(list2)): return False list1 = sorted(list1) list2 = sorted(list2) for (v1, v2) in zip(list1, list2): if isinstance(v1, dict): if not dict_match(v1, v2): return False elif isinstance(v1, list): if not list_match(v1, v2): return False elif v1 != v2: return False return True def dict_match(dict1, dict2): # Check if dict1 is a subset of dict2. if not isinstance(dict1, dict) or not isinstance(dict2, dict): return False for k1, v1 in dict1.items(): if k1 not in dict2: return False v2 = dict2[k1] if isinstance(v1, dict): if not dict_match(v1, v2): return False elif isinstance(v1, list): if not list_match(v1, v2): return False elif v1 != v2: return False return True def get_name_and_uuid(name, uuid, tag=None, maxlen=80): short_uuid = '_' + uuid[:5] + '...' + uuid[-5:] maxlen = maxlen - len(short_uuid) if tag: maxlen = maxlen - len(tag) - 1 return name[:maxlen] + '_' + tag + short_uuid else: return name[:maxlen] + short_uuid def build_extra_args(body, extra_args, **kwargs): for arg in extra_args: if arg in kwargs: body[arg] = kwargs[arg] return body def escape_tag_data(data): # ElasticSearch query_string requires slashes and dashes to # be escaped. We assume no other reserved characters will be # used in tag scopes or values return data.replace('/', '\\/').replace('-', '\\-') class NsxLibCache(object): def __init__(self, timeout): self.timeout = timeout self._cache = {} super(NsxLibCache, self).__init__() def expired(self, entry): return (time.time() - entry['time']) > self.timeout def get(self, key): if key in self._cache: # check that the value is still valid if self.expired(self._cache[key]): # this entry has expired self.remove(key) else: return self._cache[key]['value'] def update(self, key, value): self._cache[key] = {'time': time.time(), 'value': value} def remove(self, key): if key in self._cache: del self._cache[key] class NsxLibApiBase(object): """Base class for nsxlib api """ def __init__(self, client, nsxlib_config=None, nsxlib=None): self.client = client self.nsxlib_config = nsxlib_config self.nsxlib = nsxlib super(NsxLibApiBase, self).__init__() self.cache = NsxLibCache(self.cache_timeout) @abc.abstractproperty def uri_segment(self): pass @abc.abstractproperty def resource_type(self): pass @property def use_cache_for_get(self): """By default no caching is used""" return False @property def cache_timeout(self): """the default cache aging time in seconds""" return DEFAULT_CACHE_AGE_SEC def get_path(self, resource=None): if resource: return '%s/%s' % (self.uri_segment, resource) return self.uri_segment def list(self): return self.client.list(self.uri_segment) def get(self, uuid, silent=False): if self.use_cache_for_get: # try to get it from the cache result = self.cache.get(uuid) if result: if not silent: LOG.debug("Getting %s from cache.", self.get_path(uuid)) return result # call the client result = self.client.get(self.get_path(uuid), silent=silent) if result and self.use_cache_for_get: # add the result to the cache self.cache.update(uuid, result) return result def read(self, uuid, silent=False): """The same as get""" return self.get(uuid, silent=silent) def delete(self, uuid): if self.use_cache_for_get: self.cache.remove(uuid) return self.client.delete(self.get_path(uuid)) def find_by_display_name(self, display_name): found = [] for resource in self.list()['results']: if resource['display_name'] == display_name: found.append(resource) return found def _update_with_retry(self, uuid, payload): if self.use_cache_for_get: self.cache.remove(uuid) return self._update_resource(self.get_path(uuid), payload, retry=True) def _internal_update_resource(self, resource, payload, headers=None, create_action=False, get_params=None, action_params=None, update_payload_cbk=None): get_path = action_path = resource if get_params: get_path = get_path + get_params if action_params: action_path = action_path + action_params revised_payload = self.client.get(get_path) # custom resource callback for updating the payload if update_payload_cbk: update_payload_cbk(revised_payload, payload) # special treatment for tags (merge old and new) if 'tags_update' in payload.keys(): revised_payload['tags'] = update_v3_tags( revised_payload.get('tags', []), payload['tags_update']) del payload['tags_update'] # update all the rest of the parameters for key_name in payload.keys(): # handle 2 levels of dictionary: if isinstance(payload[key_name], dict): if key_name not in revised_payload: revised_payload[key_name] = payload[key_name] else: # copy each key revised_payload[key_name].update(payload[key_name]) else: revised_payload[key_name] = payload[key_name] if create_action: return self.client.create(action_path, revised_payload, headers=headers) else: return self.client.update(action_path, revised_payload, headers=headers) def _update_resource(self, resource, payload, headers=None, create_action=False, get_params=None, action_params=None, update_payload_cbk=None, retry=False): if retry: # If revision_id of the payload that we send is older than what # NSX has, we will get a 412: Precondition Failed. # In that case we need to re-fetch, patch the response and send # it again with the new revision_id @retry_upon_exception( nsxlib_exceptions.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): return self._internal_update_resource( resource, payload, headers=headers, create_action=create_action, get_params=get_params, action_params=action_params, update_payload_cbk=update_payload_cbk) return do_update() else: return self._internal_update_resource( resource, payload, headers=headers, create_action=create_action, get_params=get_params, action_params=action_params, update_payload_cbk=update_payload_cbk) def _delete_with_retry(self, resource): # Using internal method so we can access max_attempts in the decorator @retry_upon_exception( nsxlib_exceptions.StaleRevision, max_attempts=self.client.max_attempts) def _do_delete(): self.client.delete(self.get_path(resource)) _do_delete() def _create_with_retry(self, resource, body=None, headers=None): # Using internal method so we can access max_attempts in the decorator @retry_upon_exception( nsxlib_exceptions.StaleRevision, max_attempts=self.client.max_attempts) def _do_create(): return self.client.create(resource, body, headers=headers) return _do_create() def _get_resource_by_name_or_id(self, name_or_id, resource): all_results = self.client.list(resource)['results'] matched_results = [] for rs in all_results: if rs.get('id') == name_or_id: # Matched by id - must be unique return name_or_id if rs.get('display_name') == name_or_id: # Matched by name - add to the list to verify it is unique matched_results.append(rs) if len(matched_results) == 0: err_msg = (_("Could not find %(resource)s %(name)s") % {'name': name_or_id, 'resource': resource}) # TODO(aaron): improve exception handling... raise nsxlib_exceptions.ManagerError(details=err_msg) elif len(matched_results) > 1: err_msg = (_("Found multiple %(resource)s named %(name)s") % {'name': name_or_id, 'resource': resource}) # TODO(aaron): improve exception handling... raise nsxlib_exceptions.ManagerError(details=err_msg) return matched_results[0].get('id') def get_id_by_name_or_id(self, name_or_id): """Get a resource by it's display name or uuid Return the resource data, or raise an exception if not found or not unique """ return self._get_resource_by_name_or_id(name_or_id, self.get_path()) def build_v3_api_version_tag(self): """Some resources are created on the manager that do not have a corresponding plugin resource. """ return [{'scope': self.nsxlib_config.plugin_scope, 'tag': self.nsxlib_config.plugin_tag}, {'scope': "os-api-version", 'tag': self.nsxlib_config.plugin_ver}] def is_internal_resource(self, nsx_resource): """Indicates whether the passed nsx-resource is internal owned by the plugin for internal use. """ for tag in nsx_resource.get('tags', []): if tag['scope'] == self.nsxlib_config.plugin_scope: return tag['tag'] == self.nsxlib_config.plugin_tag return False def build_v3_tags_payload(self, resource, resource_type, project_name): """Construct the tags payload that will be pushed to NSX-v3 Add :, os-project-id:, os-project-name: os-api-version: """ _validate_resource_type_length(resource_type) # There may be cases when the plugin creates the port, for example DHCP if not project_name: project_name = self.nsxlib_config.plugin_tag project_id = (resource.get('project_id', '') or resource.get('tenant_id', '')) # If project_id is present in resource and set to None, explicitly set # the project_id in tags as ''. if project_id is None: project_id = '' return [{'scope': resource_type, 'tag': resource.get('id', '')[:MAX_TAG_LEN]}, {'scope': 'os-project-id', 'tag': project_id[:MAX_TAG_LEN]}, {'scope': 'os-project-name', 'tag': project_name[:MAX_TAG_LEN]}, {'scope': 'os-api-version', 'tag': self.nsxlib_config.plugin_ver}] vmware-nsxlib-12.0.1/vmware_nsxlib/v3/native_dhcp.py0000666000175100017510000000702113244535763022501 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from vmware_nsxlib.v3 import constants from vmware_nsxlib.v3 import utils class NsxLibNativeDhcp(utils.NsxLibApiBase): def build_static_routes(self, gateway_ip, cidr, host_routes): # The following code is based on _generate_opts_per_subnet() in # neutron/agent/linux/dhcp.py. It prepares DHCP options for a subnet. # Add route for directly connected network. static_routes = [{'network': cidr, 'next_hop': '0.0.0.0'}] # Copy routes from subnet host_routes attribute. for hr in host_routes: if hr['destination'] == constants.IPv4_ANY: if not gateway_ip: gateway_ip = hr['nexthop'] else: static_routes.append({'network': hr['destination'], 'next_hop': hr['nexthop']}) # If gateway_ip is defined, add default route via this gateway. if gateway_ip: static_routes.append({'network': constants.IPv4_ANY, 'next_hop': gateway_ip}) return static_routes, gateway_ip def build_server_config(self, network, subnet, port, tags, default_dns_nameservers=None, default_dns_domain=None): # Prepare the configuration for a new logical DHCP server. server_ip = "%s/%u" % (port['fixed_ips'][0]['ip_address'], netaddr.IPNetwork(subnet['cidr']).prefixlen) dns_nameservers = subnet['dns_nameservers'] if not dns_nameservers or not utils.is_attr_set(dns_nameservers): # use the default one , or the globally configured one if default_dns_nameservers is not None: dns_nameservers = default_dns_nameservers else: dns_nameservers = self.nsxlib_config.dns_nameservers gateway_ip = subnet['gateway_ip'] if not utils.is_attr_set(gateway_ip): gateway_ip = None static_routes, gateway_ip = self.build_static_routes( gateway_ip, subnet['cidr'], subnet['host_routes']) options = {'option121': {'static_routes': static_routes}} name = utils.get_name_and_uuid(network['name'] or 'dhcpserver', network['id']) dns_domain = network.get('dns_domain') if dns_domain: domain_name = dns_domain['dns_domain'] else: # use the default one , or the globally configured one if default_dns_domain is not None: domain_name = default_dns_domain else: domain_name = self.nsxlib_config.dns_domain return {'name': name, 'server_ip': server_ip, 'dns_nameservers': dns_nameservers, 'domain_name': domain_name, 'gateway_ip': gateway_ip, 'options': options, 'tags': tags} vmware-nsxlib-12.0.1/vmware_nsxlib/v3/cluster.py0000666000175100017510000005573413244535763021714 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import contextlib import copy import datetime import inspect import itertools import logging import re import eventlet from eventlet import greenpool from eventlet import pools import OpenSSL from oslo_log import log from oslo_service import loopingcall import requests from requests import adapters from requests import exceptions as requests_exceptions import six import six.moves.urllib.parse as urlparse from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import client as nsx_client from vmware_nsxlib.v3 import exceptions LOG = log.getLogger(__name__) # disable warning message for each HTTP retry logging.getLogger( "requests.packages.urllib3.connectionpool").setLevel(logging.ERROR) @six.add_metaclass(abc.ABCMeta) class AbstractHTTPProvider(object): """Interface for providers of HTTP connections. which are responsible for creating and validating connections for their underlying HTTP support. """ @property def default_scheme(self): return 'https' @abc.abstractproperty def provider_id(self): """A unique string name for this provider.""" pass @abc.abstractmethod def validate_connection(self, cluster_api, endpoint, conn): """Validate the said connection for the given endpoint and cluster.""" pass @abc.abstractmethod def new_connection(self, cluster_api, provider): """Create a new http connection. Create a new http connection for the said cluster and cluster provider. The actual connection should duck type requests.Session http methods (get(), put(), etc.). """ pass @abc.abstractmethod def is_connection_exception(self, exception): """Determine if the given exception is related to connection failure. Return True if it's a connection exception and False otherwise. """ class TimeoutSession(requests.Session): """Extends requests.Session to support timeout at the session level.""" def __init__(self, timeout, read_timeout): self.timeout = timeout self.read_timeout = read_timeout self.cert_provider = None super(TimeoutSession, self).__init__() @property def cert_provider(self): return self._cert_provider @cert_provider.setter def cert_provider(self, value): self._cert_provider = value # wrapper timeouts at the session level # see: https://goo.gl/xNk7aM def request(self, *args, **kwargs): def request_with_retry_on_ssl_error(*args, **kwargs): try: return super(TimeoutSession, self).request(*args, **kwargs) except OpenSSL.SSL.Error: # This can happen when connection tries to access certificate # file it was opened with (renegotiation?) # Proper way to solve this would be to pass in-memory cert # to ssl C code. # Retrying here works around the problem return super(TimeoutSession, self).request(*args, **kwargs) def get_cert_provider(): if inspect.isclass(self._cert_provider): # If client provided certificate provider as a class, # we spawn an instance here return self._cert_provider() return self._cert_provider if 'timeout' not in kwargs: kwargs['timeout'] = (self.timeout, self.read_timeout) if not self.cert_provider: # No client certificate needed return super(TimeoutSession, self).request(*args, **kwargs) if self.cert is not None: # Recursive call - shouldn't happen return request_with_retry_on_ssl_error(*args, **kwargs) # The following with statement allows for preparing certificate and # private key file and dispose it at the end of request # (since PK is sensitive information, immediate disposal is # important). # It would be optimal to populate certificate once per connection, # per request. Unfortunately requests library verifies cert file # existence regardless of whether certificate is going to be used # for this request. # Optimal solution for this would be to expose certificate as variable # and not as a file to the SSL library with get_cert_provider() as provider: self.cert = provider.filename() try: ret = request_with_retry_on_ssl_error(*args, **kwargs) except Exception as e: self.cert = None raise e self.cert = None return ret class NSXRequestsHTTPProvider(AbstractHTTPProvider): """Concrete implementation of AbstractHTTPProvider. using requests.Session() as the underlying connection. """ SESSION_CREATE_URL = '/api/session/create' COOKIE_FIELD = 'Cookie' SET_COOKIE_FIELD = 'Set-Cookie' XSRF_TOKEN = 'X-XSRF-TOKEN' JSESSIONID = 'JSESSIONID' @property def provider_id(self): return "%s-%s" % (requests.__title__, requests.__version__) def validate_connection(self, cluster_api, endpoint, conn): client = nsx_client.NSX3Client( conn, url_prefix=endpoint.provider.url, url_path_base=cluster_api.nsxlib_config.url_base, default_headers=conn.default_headers) keepalive_section = cluster_api.nsxlib_config.keepalive_section result = client.get(keepalive_section, silent=True) # If keeplive section returns a list, it is assumed to be non-empty if not result or result.get('result_count', 1) <= 0: msg = _("No %(section)s found " "for '%(url)s'") % {'section': keepalive_section, 'url': endpoint.provider.url} LOG.warning(msg) raise exceptions.ResourceNotFound( manager=endpoint.provider.url, operation=msg) def new_connection(self, cluster_api, provider): config = cluster_api.nsxlib_config session = TimeoutSession(config.http_timeout, config.http_read_timeout) if config.client_cert_provider: session.cert_provider = config.client_cert_provider else: session.auth = (provider.username, provider.password) # NSX v3 doesn't use redirects session.max_redirects = 0 session.verify = not config.insecure if session.verify and provider.ca_file: # verify using the said ca bundle path session.verify = provider.ca_file # we are pooling with eventlet in the cluster class adapter = adapters.HTTPAdapter( pool_connections=1, pool_maxsize=1, max_retries=config.retries, pool_block=False) session.mount('http://', adapter) session.mount('https://', adapter) self.get_default_headers(session, provider, config.allow_overwrite_header) return session def is_connection_exception(self, exception): return isinstance(exception, requests_exceptions.ConnectionError) def get_default_headers(self, session, provider, allow_overwrite_header): """Get the default headers that should be added to future requests""" session.default_headers = {} # Perform the initial session create and get the relevant jsessionid & # X-XSRF-TOKEN for future requests req_data = '' if not session.cert_provider: # With client certificate authentication, username and password # may not be provided. # If provided, backend treats these credentials as authentication # and ignores client cert as principal identity indication. req_data = 'j_username=%s&j_password=%s' % (provider.username, provider.password) req_headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'} # Cannot use the certificate at this stage, because it is used for # the certificate generation resp = session.request('post', provider.url + self.SESSION_CREATE_URL, data=req_data, headers=req_headers) if resp.status_code != 200: LOG.error("Session create failed for endpoint %s", provider.url) # this will later cause the endpoint to be Down else: for header_name in resp.headers: if self.SET_COOKIE_FIELD.lower() == header_name.lower(): m = re.match('%s=.*?\;' % self.JSESSIONID, resp.headers[header_name]) if m: session.default_headers[self.COOKIE_FIELD] = m.group() if self.XSRF_TOKEN.lower() == header_name.lower(): session.default_headers[self.XSRF_TOKEN] = resp.headers[ header_name] LOG.info("Session create succeeded for endpoint %(url)s with " "headers %(hdr)s", {'url': provider.url, 'hdr': session.default_headers}) # Add allow-overwrite if configured if allow_overwrite_header: session.default_headers['X-Allow-Overwrite'] = 'true' class ClusterHealth(object): """Indicator of overall cluster health. with respect to the connectivity of the clusters managed endpoints. """ # all endpoints are UP GREEN = 'GREEN' # at least 1 endpoint is UP, but 1 or more are DOWN ORANGE = 'ORANGE' # all endpoints are DOWN RED = 'RED' class EndpointState(object): """Tracks the connectivity state for a said endpoint.""" # no UP or DOWN state recorded yet INITIALIZED = 'INITIALIZED' # endpoint has been validate and is good UP = 'UP' # endpoint can't be reached or validated DOWN = 'DOWN' class Provider(object): """Data holder for a provider Which has a unique id a connection URL, and the credential details. """ def __init__(self, provider_id, provider_url, username, password, ca_file): self.id = provider_id self.url = provider_url self.username = username self.password = password self.ca_file = ca_file def __str__(self): return str(self.url) class Endpoint(object): """A single NSX manager endpoint (host). A single NSX manager endpoint (host) which includes related information such as the endpoint's provider, state, etc.. A pool is used to hold connections to the endpoint which are doled out when proxying HTTP methods to the underlying connections. """ def __init__(self, provider, pool): self.provider = provider self.pool = pool self._state = EndpointState.INITIALIZED self._last_updated = datetime.datetime.now() def regenerate_pool(self): self.pool = pools.Pool(min_size=self.pool.min_size, max_size=self.pool.max_size, order_as_stack=True, create=self.pool.create) @property def last_updated(self): return self._last_updated @property def state(self): return self._state def set_state(self, state): if self.state != state: LOG.info("Endpoint '%(ep)s' changing from state" " '%(old)s' to '%(new)s'", {'ep': self.provider, 'old': self.state, 'new': state}) old_state = self._state self._state = state self._last_updated = datetime.datetime.now() return old_state def __str__(self): return "[%s] %s" % (self.state, self.provider) class EndpointConnection(object): """Simple data holder Which contains an endpoint and a connection for that endpoint. """ def __init__(self, endpoint, connection): self.endpoint = endpoint self.connection = connection class ClusteredAPI(object): """Duck types the major HTTP based methods of a requests.Session Such as get(), put(), post(), etc. and transparently proxies those calls to one of its managed NSX manager endpoints. """ _HTTP_VERBS = ['get', 'delete', 'head', 'put', 'post', 'patch', 'create'] def __init__(self, providers, http_provider, min_conns_per_pool=1, max_conns_per_pool=500, keepalive_interval=33): self._http_provider = http_provider self._keepalive_interval = keepalive_interval def _init_cluster(*args, **kwargs): self._init_endpoints(providers, min_conns_per_pool, max_conns_per_pool) _init_cluster() # keep this internal method for reinitialize upon fork # for api workers to ensure each process has its own keepalive # loops + state self._reinit_cluster = _init_cluster def _init_endpoints(self, providers, min_conns_per_pool, max_conns_per_pool): LOG.debug("Initializing API endpoints") def _create_conn(p): def _conn(): # called when a pool needs to create a new connection return self._http_provider.new_connection(self, p) return _conn self._endpoints = {} for provider in providers: pool = pools.Pool( min_size=min_conns_per_pool, max_size=max_conns_per_pool, order_as_stack=True, create=_create_conn(provider)) endpoint = Endpoint(provider, pool) self._endpoints[provider.id] = endpoint # service requests using round robin self._endpoint_schedule = itertools.cycle(self._endpoints.values()) # duck type to proxy http invocations for method in ClusteredAPI._HTTP_VERBS: setattr(self, method, self._proxy_stub(method)) conns = greenpool.GreenPool() for endpoint in self._endpoints.values(): conns.spawn(self._validate, endpoint) eventlet.sleep(0) while conns.running(): if (self.health == ClusterHealth.GREEN or self.health == ClusterHealth.ORANGE): # only wait for 1 or more endpoints to reduce init time break eventlet.sleep(0.5) for endpoint in self._endpoints.values(): # dynamic loop for each endpoint to ensure connectivity loop = loopingcall.DynamicLoopingCall( self._endpoint_keepalive, endpoint) loop.start(initial_delay=self._keepalive_interval, periodic_interval_max=self._keepalive_interval, stop_on_exception=False) LOG.debug("Done initializing API endpoint(s). " "API cluster health: %s", self.health) def _endpoint_keepalive(self, endpoint): delta = datetime.datetime.now() - endpoint.last_updated if delta.seconds >= self._keepalive_interval: # TODO(boden): backoff on validation failure self._validate(endpoint) return self._keepalive_interval return self._keepalive_interval - delta.seconds @property def providers(self): return [ep.provider for ep in self._endpoints.values()] @property def endpoints(self): return copy.copy(self._endpoints) @property def http_provider(self): return self._http_provider @property def health(self): down = 0 up = 0 for endpoint in self._endpoints.values(): if endpoint.state != EndpointState.UP: down += 1 else: up += 1 if down == len(self._endpoints): return ClusterHealth.RED return (ClusterHealth.GREEN if up == len(self._endpoints) else ClusterHealth.ORANGE) def _validate(self, endpoint): try: with endpoint.pool.item() as conn: self._http_provider.validate_connection(self, endpoint, conn) endpoint.set_state(EndpointState.UP) except exceptions.ClientCertificateNotTrusted: LOG.warning("Failed to validate API cluster endpoint " "'%(ep)s' due to untrusted client certificate", {'ep': endpoint}) # regenerate connection pool based on new certificate endpoint.regenerate_pool() except exceptions.BadXSRFToken: LOG.warning("Failed to validate API cluster endpoint " "'%(ep)s' due to expired XSRF token", {'ep': endpoint}) # regenerate connection pool based on token endpoint.regenerate_pool() except Exception as e: endpoint.set_state(EndpointState.DOWN) LOG.warning("Failed to validate API cluster endpoint " "'%(ep)s' due to: %(err)s", {'ep': endpoint, 'err': e}) def _select_endpoint(self): # check for UP state until exhausting all endpoints seen, total = 0, len(self._endpoints.values()) while seen < total: endpoint = next(self._endpoint_schedule) if endpoint.state == EndpointState.UP: return endpoint seen += 1 def endpoint_for_connection(self, conn): # check all endpoint pools for endpoint in self._endpoints.values(): if (conn in endpoint.pool.channel.queue or conn in endpoint.pool.free_items): return endpoint @property def cluster_id(self): return ','.join([str(ep.provider.url) for ep in self._endpoints.values()]) @contextlib.contextmanager def connection(self): with self.endpoint_connection() as conn_data: yield conn_data.connection @contextlib.contextmanager def endpoint_connection(self): endpoint = self._select_endpoint() if not endpoint: LOG.debug("All endpoints down for: %s" % [str(ep) for ep in self._endpoints.values()]) # all endpoints are DOWN and will have their next # state updated as per _endpoint_keepalive() raise exceptions.ServiceClusterUnavailable( cluster_id=self.cluster_id) if endpoint.pool.free() == 0: LOG.info("API endpoint %(ep)s at connection " "capacity %(max)s and has %(waiting)s waiting", {'ep': endpoint, 'max': endpoint.pool.max_size, 'waiting': endpoint.pool.waiting()}) # pool.item() will wait if pool has 0 free with endpoint.pool.item() as conn: yield EndpointConnection(endpoint, conn) def _proxy_stub(self, proxy_for): def _call_proxy(url, *args, **kwargs): return self._proxy(proxy_for, url, *args, **kwargs) return _call_proxy def _proxy(self, proxy_for, uri, *args, **kwargs): # proxy http request call to an avail endpoint with self.endpoint_connection() as conn_data: conn = conn_data.connection endpoint = conn_data.endpoint # http conn must support requests style interface do_request = getattr(conn, proxy_for) if not uri.startswith('/'): uri = "/%s" % uri url = "%s%s" % (endpoint.provider.url, uri) try: LOG.debug("API cluster proxy %s %s to %s", proxy_for.upper(), uri, url) # Add the connection default headers if conn.default_headers: kwargs['headers'] = kwargs.get('headers', {}) kwargs['headers'].update(conn.default_headers) # call the actual connection method to do the # http request/response over the wire response = do_request(url, *args, **kwargs) endpoint.set_state(EndpointState.UP) return response except Exception as e: LOG.warning("Request failed due to: %s", e) if not self._http_provider.is_connection_exception(e): # only trap and retry connection errors raise e endpoint.set_state(EndpointState.DOWN) LOG.debug("Connection to %s failed, checking additional " "endpoints" % url) # retry until exhausting endpoints return self._proxy(proxy_for, uri, *args, **kwargs) class NSXClusteredAPI(ClusteredAPI): """Extends ClusteredAPI to get conf values and setup the NSXv3 cluster.""" def __init__(self, nsxlib_config): self.nsxlib_config = nsxlib_config self._http_provider = (nsxlib_config.http_provider or NSXRequestsHTTPProvider()) super(NSXClusteredAPI, self).__init__( self._build_conf_providers(), self._http_provider, max_conns_per_pool=self.nsxlib_config.concurrent_connections, keepalive_interval=self.nsxlib_config.conn_idle_timeout) LOG.debug("Created NSX clustered API with '%s' " "provider", self._http_provider.provider_id) def _build_conf_providers(self): def _schemed_url(uri): uri = uri.strip('/') return urlparse.urlparse( uri if uri.startswith('http') else "%s://%s" % (self._http_provider.default_scheme, uri)) conf_urls = self.nsxlib_config.nsx_api_managers[:] urls = [] providers = [] provider_index = -1 for conf_url in conf_urls: provider_index += 1 conf_url = _schemed_url(conf_url) if conf_url in urls: LOG.warning("'%s' already defined in configuration file. " "Skipping.", urlparse.urlunparse(conf_url)) continue urls.append(conf_url) providers.append( Provider( conf_url.netloc, urlparse.urlunparse(conf_url), self.nsxlib_config.username(provider_index), self.nsxlib_config.password(provider_index), self.nsxlib_config.ca_file(provider_index))) return providers vmware-nsxlib-12.0.1/vmware_nsxlib/v3/ns_group_manager.py0000666000175100017510000001363313244535763023551 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_log import log from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants as consts LOG = log.getLogger(__name__) class NSGroupManager(object): """This class assists with NSX integration for Neutron security-groups Each Neutron security-group is associated with NSX NSGroup object. Some specific security policies are the same across all security-groups, i.e - Default drop rule, DHCP. In order to bind these rules to all NSGroups (security-groups), we create a nested NSGroup (which its members are also of type NSGroups) to group the other NSGroups and associate it with these rules. In practice, one NSGroup (nested) can't contain all the other NSGroups, as it has strict size limit. To overcome the limited space challenge, we create several nested groups instead of just one, and we evenly distribute NSGroups (security-groups) between them. By using an hashing function on the NSGroup uuid we determine in which group it should be added, and when deleting an NSGroup (security-group) we use the same procedure to find which nested group it was added. """ NESTED_GROUP_NAME = 'OS Nested Group' NESTED_GROUP_DESCRIPTION = ('OpenStack NSGroup. Do not delete.') def __init__(self, nsxlib, size): self.nsxlib_nsgroup = nsxlib.ns_group self._nested_groups = self._init_nested_groups(size) self._size = len(self._nested_groups) @property def size(self): return self._size @property def nested_groups(self): return self._nested_groups def _init_nested_groups(self, requested_size): # Construct the groups dict - # {0: ,.., n-1: } size = requested_size nested_groups = { self._get_nested_group_index_from_name(nsgroup): nsgroup['id'] for nsgroup in self.nsxlib_nsgroup.list() if self.nsxlib_nsgroup.is_internal_resource(nsgroup)} if nested_groups: size = max(requested_size, max(nested_groups) + 1) if size > requested_size: LOG.warning("Lowering the value of " "nsx_v3:number_of_nested_groups isn't " "supported, '%s' nested-groups will be used.", size) absent_groups = set(range(size)) - set(nested_groups.keys()) if absent_groups: LOG.warning( "Found %(num_present)s Nested Groups, " "creating %(num_absent)s more.", {'num_present': len(nested_groups), 'num_absent': len(absent_groups)}) for i in absent_groups: cont = self._create_nested_group(i) nested_groups[i] = cont['id'] return nested_groups def _get_nested_group_index_from_name(self, nested_group): # The name format is "Nested Group " return int(nested_group['display_name'].split()[-1]) - 1 def _create_nested_group(self, index): name_prefix = NSGroupManager.NESTED_GROUP_NAME name = '%s %s' % (name_prefix, index + 1) description = NSGroupManager.NESTED_GROUP_DESCRIPTION tags = self.nsxlib_nsgroup.build_v3_api_version_tag() return self.nsxlib_nsgroup.create(name, description, tags) def _hash_uuid(self, internal_id): return hash(uuid.UUID(internal_id)) def _suggest_nested_group(self, internal_id): # Suggests a nested group to use, can be iterated to find alternative # group in case that previous suggestions did not help. index = self._hash_uuid(internal_id) % self.size yield self.nested_groups[index] for i in range(1, self.size): index = (index + 1) % self.size yield self.nested_groups[index] def add_nsgroup(self, nsgroup_id): for group in self._suggest_nested_group(nsgroup_id): try: LOG.debug("Adding NSGroup %s to nested group %s", nsgroup_id, group) self.nsxlib_nsgroup.add_members( group, consts.NSGROUP, [nsgroup_id]) break except exceptions.NSGroupIsFull: LOG.debug("Nested group %(group_id)s is full, trying the " "next group..", {'group_id': group}) else: raise exceptions.ManagerError( details=_("Reached the maximum supported amount of " "security groups.")) def remove_nsgroup(self, nsgroup_id): for group in self._suggest_nested_group(nsgroup_id): try: self.nsxlib_nsgroup.remove_member( group, consts.NSGROUP, nsgroup_id, verify=True) break except exceptions.NSGroupMemberNotFound: LOG.warning("NSGroup %(nsgroup)s was expected to be found " "in group %(group_id)s, but wasn't. " "Looking in the next group..", {'nsgroup': nsgroup_id, 'group_id': group}) continue else: LOG.warning("NSGroup %s was marked for removal, but its " "reference is missing.", nsgroup_id) vmware-nsxlib-12.0.1/vmware_nsxlib/v3/security.py0000666000175100017510000006151213244535763022071 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NSX-V3 Plugin security & Distributed Firewall integration module """ from oslo_log import log from oslo_utils import excutils from vmware_nsxlib.v3 import constants from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants as consts from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) PORT_SG_SCOPE = 'os-security-group' MAX_NSGROUPS_CRITERIA_TAGS = 10 class NsxLibNsGroup(utils.NsxLibApiBase): def __init__(self, client, nsxlib_config, firewall_section_handler, nsxlib=None): self.firewall_section = firewall_section_handler super(NsxLibNsGroup, self).__init__(client, nsxlib_config, nsxlib=nsxlib) @property def uri_segment(self): return 'ns-groups' @property def resource_type(self): return 'NSGroup' def update_on_backend(self, context, security_group, nsgroup_id, section_id, log_sg_allowed_traffic): name = self.get_name(security_group) description = security_group['description'] logging = (log_sg_allowed_traffic or security_group[consts.LOGGING]) rules = self.firewall_section._process_rules_logging_for_update( section_id, logging) self.update(nsgroup_id, name, description) self.firewall_section.update(section_id, name, description, rules=rules) def get_name(self, security_group): # NOTE(roeyc): We add the security-group id to the NSGroup name, # for usability purposes. return '%(name)s - %(id)s' % security_group def get_lport_tags(self, secgroups): if len(secgroups) > MAX_NSGROUPS_CRITERIA_TAGS: raise exceptions.NumberOfNsgroupCriteriaTagsReached( max_num=MAX_NSGROUPS_CRITERIA_TAGS) tags = [] for sg in secgroups: tags = utils.add_v3_tag(tags, PORT_SG_SCOPE, sg) if not tags: # This port shouldn't be associated with any security-group tags = [{'scope': PORT_SG_SCOPE, 'tag': None}] return tags def update_lport(self, context, lport_id, original, updated): added = set(updated) - set(original) removed = set(original) - set(updated) for nsgroup_id in added: try: self.add_members( nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT, [lport_id]) except exceptions.NSGroupIsFull: for nsgroup_id in added: # NOTE(roeyc): If the port was not added to the nsgroup # yet, then this request will silently fail. self.remove_member( nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT, lport_id) raise exceptions.SecurityGroupMaximumCapacityReached( sg_id=nsgroup_id) except exceptions.ResourceNotFound: with excutils.save_and_reraise_exception(): LOG.error("NSGroup %s doesn't exists", nsgroup_id) for nsgroup_id in removed: self.remove_member( nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT, lport_id) def get_nsservice(self, resource_type, **properties): service = {'resource_type': resource_type} service.update(properties) return {'service': service} def get_nsgroup_complex_expression(self, expressions): return {'resource_type': consts.NSGROUP_COMPLEX_EXP, 'expressions': expressions} def get_switch_tag_expression(self, scope, tag): return {'resource_type': consts.NSGROUP_TAG_EXP, 'target_type': consts.TARGET_TYPE_LOGICAL_SWITCH, 'scope': scope, 'tag': tag} def get_port_tag_expression(self, scope, tag): return {'resource_type': consts.NSGROUP_TAG_EXP, 'target_type': consts.TARGET_TYPE_LOGICAL_PORT, 'scope': scope, 'tag': tag} def create(self, display_name, description, tags, membership_criteria=None, members=None): body = {'display_name': display_name, 'description': description, 'tags': tags, 'members': [] if members is None else members} if membership_criteria: # Allow caller to pass a list of membership criterias. # The 'else' block is maintained for backwards compatibility # where in a caller might only send a single membership criteria. if isinstance(membership_criteria, list): body.update({'membership_criteria': membership_criteria}) else: body.update({'membership_criteria': [membership_criteria]}) return self.client.create(self.get_path(), body) def list(self): return self.client.list( '%s?populate_references=false' % self.get_path()).get( 'results', []) def update(self, nsgroup_id, display_name=None, description=None, membership_criteria=None, members=None, tags_update=None): nsgroup = {} if display_name is not None: nsgroup['display_name'] = display_name if description is not None: nsgroup['description'] = description if members is not None: nsgroup['members'] = members if membership_criteria is not None: if isinstance(membership_criteria, list): nsgroup['membership_criteria'] = membership_criteria else: nsgroup['membership_criteria'] = [membership_criteria] if tags_update is not None: nsgroup['tags_update'] = tags_update return self._update_resource( self.get_path(nsgroup_id), nsgroup, get_params='?populate_references=true', retry=True) def get_member_expression(self, target_type, target_id): return { 'resource_type': consts.NSGROUP_SIMPLE_EXP, 'target_property': 'id', 'target_type': target_type, 'op': consts.EQUALS, 'value': target_id} def _update_with_members(self, nsgroup_id, members, action): members_update = '%s?action=%s' % (self.get_path(nsgroup_id), action) return self.client.create(members_update, members) def add_members(self, nsgroup_id, target_type, target_ids): members = [] for target_id in target_ids: member_expr = self.get_member_expression( target_type, target_id) members.append(member_expr) members = {'members': members} try: return self._update_with_members( nsgroup_id, members, consts.NSGROUP_ADD_MEMBERS) except (exceptions.StaleRevision, exceptions.ResourceNotFound): raise except exceptions.ManagerError: # REVISIT(roeyc): A ManagerError might have been raised for a # different reason, e.g - NSGroup does not exists. LOG.warning("Failed to add %(target_type)s resources " "(%(target_ids)s) to NSGroup %(nsgroup_id)s", {'target_type': target_type, 'target_ids': target_ids, 'nsgroup_id': nsgroup_id}) raise exceptions.NSGroupIsFull(nsgroup_id=nsgroup_id) def remove_member(self, nsgroup_id, target_type, target_id, verify=False): member_expr = self.get_member_expression( target_type, target_id) members = {'members': [member_expr]} try: return self._update_with_members( nsgroup_id, members, consts.NSGROUP_REMOVE_MEMBERS) except exceptions.ManagerError: if verify: raise exceptions.NSGroupMemberNotFound(member_id=target_id, nsgroup_id=nsgroup_id) def read(self, nsgroup_id): return self.client.get( '%s?populate_references=true' % self.get_path(nsgroup_id)) def delete(self, nsgroup_id): try: return self.client.delete( '%s?force=true' % self.get_path(nsgroup_id)) # FIXME(roeyc): Should only except NotFound error. except Exception: LOG.debug("NSGroup %s does not exists for delete request.", nsgroup_id) def find_by_display_name(self, display_name): found = [] for resource in self.list(): if resource['display_name'] == display_name: found.append(resource) return found class NsxLibFirewallSection(utils.NsxLibApiBase): @property def uri_segment(self): return 'firewall/sections' @property def resource_type(self): return 'FirewallSection' def add_member_to_fw_exclude_list(self, target_id, target_type): resource = 'firewall/excludelist?action=add_member' body = {"target_id": target_id, "target_type": target_type} self._create_with_retry(resource, body) def remove_member_from_fw_exclude_list(self, target_id, target_type): resource = ('firewall/excludelist?action=remove_member&object_id=' + target_id) self._create_with_retry(resource) def get_excludelist(self): return self.client.list('firewall/excludelist') def _get_direction(self, sg_rule): return ( consts.IN if sg_rule['direction'] == 'ingress' else consts.OUT ) def _get_l4_protocol_name(self, protocol_number): if protocol_number is None: return protocol_number = constants.IP_PROTOCOL_MAP.get(protocol_number, protocol_number) protocol_number = int(protocol_number) if protocol_number == 6: return consts.TCP elif protocol_number == 17: return consts.UDP elif protocol_number == 1: return consts.ICMPV4 else: return protocol_number def get_nsservice(self, resource_type, **properties): service = {'resource_type': resource_type} service.update(properties) return {'service': service} def _decide_service(self, sg_rule): l4_protocol = self._get_l4_protocol_name(sg_rule['protocol']) direction = self._get_direction(sg_rule) if l4_protocol in [consts.TCP, consts.UDP]: # If port_range_min is not specified then we assume all ports are # matched, relying on neutron to perform validation. source_ports = [] if sg_rule['port_range_min'] is None: destination_ports = [] elif sg_rule['port_range_min'] != sg_rule['port_range_max']: # NSX API requires a non-empty range (e.g - '22-23') destination_ports = ['%(port_range_min)s-%(port_range_max)s' % sg_rule] else: destination_ports = ['%(port_range_min)s' % sg_rule] if direction == consts.OUT: source_ports, destination_ports = destination_ports, [] return self.get_nsservice( consts.L4_PORT_SET_NSSERVICE, l4_protocol=l4_protocol, source_ports=source_ports, destination_ports=destination_ports) elif l4_protocol == consts.ICMPV4: return self.get_nsservice( consts.ICMP_TYPE_NSSERVICE, protocol=l4_protocol, icmp_type=sg_rule['port_range_min'], icmp_code=sg_rule['port_range_max']) elif l4_protocol is not None: return self.get_nsservice( consts.IP_PROTOCOL_NSSERVICE, protocol_number=l4_protocol) def _build(self, display_name, description, applied_tos, tags): return {'display_name': display_name, 'description': description, 'stateful': True, 'section_type': consts.FW_SECTION_LAYER3, 'applied_tos': [self.get_nsgroup_reference(t_id) for t_id in applied_tos], 'tags': tags} def create_empty(self, display_name, description, applied_tos, tags, operation=consts.FW_INSERT_BOTTOM, other_section=None): resource = '%s?operation=%s' % (self.uri_segment, operation) body = self._build(display_name, description, applied_tos, tags) if other_section: resource += '&id=%s' % other_section return self._create_with_retry(resource, body) def create_with_rules(self, display_name, description, applied_tos=None, tags=None, operation=consts.FW_INSERT_BOTTOM, other_section=None, rules=None): resource = '%s?operation=%s' % (self.uri_segment, operation) body = { 'display_name': display_name, 'description': description, 'stateful': True, 'section_type': consts.FW_SECTION_LAYER3, 'applied_tos': applied_tos or [], 'tags': tags or [] } if rules is not None: resource += '&action=create_with_rules' body['rules'] = rules if other_section: resource += '&id=%s' % other_section return self._create_with_retry(resource, body) def update(self, section_id, display_name=None, description=None, applied_tos=None, rules=None, tags_update=None, force=False): resource = self.get_path(section_id) params = None section = {} if rules is not None: params = '?action=update_with_rules' section['rules'] = rules if display_name is not None: section['display_name'] = display_name if description is not None: section['description'] = description if applied_tos is not None: section['applied_tos'] = [self.get_nsgroup_reference(nsg_id) for nsg_id in applied_tos] if tags_update is not None: section['tags_update'] = tags_update headers = None if force: # shared sections (like default section) can serve multiple # openstack deployments. If some operate under protected # identities, force-overwrite is needed. # REVISIT(annak): find better solution for shared sections headers = {'X-Allow-Overwrite': 'true'} if rules is not None: return self._update_resource(resource, section, headers=headers, create_action=True, action_params=params, retry=True) elif any(p is not None for p in (display_name, description, applied_tos, tags_update)): return self._update_resource(resource, section, headers=headers, action_params=params, retry=True) def list(self): return self.client.list(self.get_path()).get('results', []) def delete(self, section_id): resource = '%s?cascade=true' % self.get_path(section_id) return self.client.delete(resource) def get_nsgroup_reference(self, nsgroup_id): return {'target_id': nsgroup_id, 'target_type': consts.NSGROUP} def get_logicalport_reference(self, port_id): return {'target_id': port_id, 'target_type': consts.TARGET_TYPE_LOGICAL_PORT} def get_ip_cidr_reference(self, ip_cidr_block, ip_protocol): target_type = (consts.TARGET_TYPE_IPV4ADDRESS if ip_protocol == consts.IPV4 else consts.TARGET_TYPE_IPV6ADDRESS) return {'target_id': ip_cidr_block, 'target_type': target_type} def get_rule_address(self, target_id, display_name=None, is_valid=True, target_type=consts.TARGET_TYPE_IPV4ADDRESS): return {'target_display_name': display_name or '', 'target_id': target_id, 'is_valid': is_valid, 'target_type': target_type} def get_l4portset_nsservice(self, sources=None, destinations=None, protocol=consts.TCP): return { 'service': { 'resource_type': 'L4PortSetNSService', 'source_ports': sources or [], 'destination_ports': destinations or [], 'l4_protocol': protocol} } def get_rule_dict(self, display_name, sources=None, destinations=None, direction=consts.IN_OUT, ip_protocol=consts.IPV4_IPV6, services=None, action=consts.FW_ACTION_ALLOW, logged=False, disabled=False, applied_tos=None): rule_dict = {'display_name': display_name, 'direction': direction, 'ip_protocol': ip_protocol, 'action': action, 'logged': logged, 'disabled': disabled, 'sources': sources or [], 'destinations': destinations or [], 'services': services or []} if applied_tos is not None: rule_dict['applied_tos'] = applied_tos return rule_dict def add_rule(self, rule, section_id, operation=consts.FW_INSERT_BOTTOM): resource = '%s/rules' % self.get_path(section_id) params = '?operation=%s' % operation return self._create_with_retry(resource + params, rule) def add_rules(self, rules, section_id, operation=consts.FW_INSERT_BOTTOM): resource = '%s/rules' % self.get_path(section_id) params = '?action=create_multiple&operation=%s' % operation return self._create_with_retry(resource + params, {'rules': rules}) def delete_rule(self, section_id, rule_id): resource = '%s/rules/%s' % (section_id, rule_id) return self._delete_with_retry(resource) def get_rules(self, section_id): resource = '%s/rules' % self.get_path(section_id) return self.client.get(resource) def get_default_rule(self, section_id): rules = self.get_rules(section_id)['results'] last_rule = rules[-1] if last_rule['is_default']: return last_rule def _get_fw_rule_from_sg_rule(self, sg_rule, nsgroup_id, rmt_nsgroup_id, logged, action): # IPV4 or IPV6 ip_protocol = sg_rule['ethertype'].upper() direction = self._get_direction(sg_rule) if sg_rule.get(consts.LOCAL_IP_PREFIX): local_ip_prefix = self.get_ip_cidr_reference( sg_rule[consts.LOCAL_IP_PREFIX], ip_protocol) else: local_ip_prefix = None source = None local_group = self.get_nsgroup_reference(nsgroup_id) if sg_rule['remote_ip_prefix'] is not None: source = self.get_ip_cidr_reference( sg_rule['remote_ip_prefix'], ip_protocol) destination = local_ip_prefix or local_group else: if rmt_nsgroup_id: source = self.get_nsgroup_reference(rmt_nsgroup_id) destination = local_ip_prefix or local_group if direction == consts.OUT: source, destination = destination, source service = self._decide_service(sg_rule) name = sg_rule['id'] return self.get_rule_dict(name, [source] if source else None, [destination] if destination else None, direction, ip_protocol, [service] if service else None, action, logged) def create_rules(self, context, section_id, nsgroup_id, logging_enabled, action, security_group_rules, ruleid_2_remote_nsgroup_map): # 1. translate rules # 2. insert in section # 3. return the rules firewall_rules = [] for sg_rule in security_group_rules: remote_nsgroup_id = ruleid_2_remote_nsgroup_map[sg_rule['id']] fw_rule = self._get_fw_rule_from_sg_rule( sg_rule, nsgroup_id, remote_nsgroup_id, logging_enabled, action) firewall_rules.append(fw_rule) return self.add_rules(firewall_rules, section_id) def set_rule_logging(self, section_id, logging): rules = self._process_rules_logging_for_update( section_id, logging) self.update(section_id, rules=rules) def _process_rules_logging_for_update(self, section_id, logging_enabled): rules = self.get_rules(section_id).get('results', []) update_rules = False for rule in rules: if rule['logged'] != logging_enabled: rule['logged'] = logging_enabled update_rules = True return rules if update_rules else None def init_default(self, name, description, nested_groups, log_sg_blocked_traffic): fw_sections = self.list() for section in reversed(fw_sections): if section['display_name'] == name: break else: tags = self.build_v3_api_version_tag() section = self.create_empty( name, description, nested_groups, tags) block_rule = self.get_rule_dict( 'Block All', action=consts.FW_ACTION_DROP, logged=log_sg_blocked_traffic) # TODO(roeyc): Add additional rules to allow IPV6 NDP. dhcp_client = self.get_nsservice( consts.L4_PORT_SET_NSSERVICE, l4_protocol=consts.UDP, source_ports=[67], destination_ports=[68]) dhcp_client_rule_in = self.get_rule_dict( 'DHCP Reply', direction=consts.IN, services=[dhcp_client]) dhcp_server = ( self.get_nsservice( consts.L4_PORT_SET_NSSERVICE, l4_protocol=consts.UDP, source_ports=[68], destination_ports=[67])) dhcp_client_rule_out = self.get_rule_dict( 'DHCP Request', direction=consts.OUT, services=[dhcp_server]) self.update(section['id'], name, section['description'], applied_tos=nested_groups, rules=[dhcp_client_rule_out, dhcp_client_rule_in, block_rule], force=True) return section['id'] class NsxLibIPSet(utils.NsxLibApiBase): @property def uri_segment(self): return 'ip-sets' @property def resource_type(self): return 'IPSet' def create(self, display_name, description=None, ip_addresses=None, tags=None): body = { 'display_name': display_name, 'description': description or '', 'ip_addresses': ip_addresses or [], 'tags': tags or [] } return self.client.create(self.get_path(), body) def update(self, ip_set_id, display_name=None, description=None, ip_addresses=None, tags_update=None): ip_set = {} if tags_update: ip_set['tags_update'] = tags_update if display_name is not None: ip_set['display_name'] = display_name if description is not None: ip_set['description'] = description if ip_addresses is not None: ip_set['ip_addresses'] = ip_addresses return self._update_resource(self.get_path(ip_set_id), ip_set, retry=True) def read(self, ip_set_id): return self.client.get('ip-sets/%s' % ip_set_id) def delete(self, ip_set_id): self._delete_with_retry(ip_set_id) def get_ipset_reference(self, ip_set_id): return {'target_id': ip_set_id, 'target_type': consts.IP_SET} vmware-nsxlib-12.0.1/vmware_nsxlib/v3/config.py0000666000175100017510000001550313244535763021466 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_log import versionutils LOG = log.getLogger(__name__) class NsxLibConfig(object): """Class holding all the configuration parameters used by the nsxlib code. :param nsx_api_managers: List of IP addresses of the NSX managers. Each IP address should be of the form: [://][:] If scheme is not provided https is used. If port is not provided port 80 is used for http and port 443 for https. :param username: User name for the NSX manager :param password: Password for the NSX manager :param client_cert_provider: None, or ClientCertProvider object. If specified, nsxlib will use client cert auth instead of basic authentication. :param insecure: If true, the NSX Manager server certificate is not verified. If false the CA bundle specified via "ca_file" will be used or if unset the default system root CAs will be used. :param ca_file: Specify a CA bundle file to use in verifying the NSX Manager server certificate. This option is ignored if "insecure" is set to True. If "insecure" is set to False and ca_file is unset, the system root CAs will be used to verify the server certificate. :param concurrent_connections: Maximum concurrent connections to each NSX manager. :param retries: Maximum number of times to retry a HTTP connection. :param http_timeout: The time in seconds before aborting a HTTP connection to a NSX manager. :param http_read_timeout: The time in seconds before aborting a HTTP read response from a NSX manager. :param conn_idle_timeout: The amount of time in seconds to wait before ensuring connectivity to the NSX manager if no manager connection has been used. :param http_provider: HTTPProvider object, or None. :param max_attempts: Maximum number of times to retry API requests upon stale revision errors. :param plugin_scope: The default scope for the v3 api-version tag :param plugin_tag: The value for the v3 api-version tag :param plugin_ver: The version of the plugin used as the 'os-api-version' tag value in the v3 api-version tag :param dns_nameservers: List of nameservers to configure for the DHCP binding entries. These will be used if there are no nameservers defined on the subnet. :param dns_domain: Domain to use for building the hostnames. :param dhcp_profile_uuid: Currently unused and deprecated. Kept for backward compatibility. :param allow_overwrite_header: If True, a default header of X-Allow-Overwrite:true will be added to all the requests, to allow admin user to update/ delete all entries. :param rate_limit_retry: If True, the client will retry requests failed on "Too many requests" error """ def __init__(self, nsx_api_managers=None, username=None, password=None, client_cert_provider=None, insecure=True, ca_file=None, concurrent_connections=10, retries=3, http_timeout=10, http_read_timeout=180, conn_idle_timeout=10, http_provider=None, max_attempts=10, plugin_scope=None, plugin_tag=None, plugin_ver=None, dns_nameservers=None, dns_domain='openstacklocal', dhcp_profile_uuid=None, allow_overwrite_header=False, rate_limit_retry=True): self.nsx_api_managers = nsx_api_managers self._username = username self._password = password self._ca_file = ca_file self.insecure = insecure self.concurrent_connections = concurrent_connections self.retries = retries self.http_timeout = http_timeout self.http_read_timeout = http_read_timeout self.conn_idle_timeout = conn_idle_timeout self.http_provider = http_provider self.client_cert_provider = client_cert_provider self.max_attempts = max_attempts self.plugin_scope = plugin_scope self.plugin_tag = plugin_tag self.plugin_ver = plugin_ver self.dns_nameservers = dns_nameservers or [] self.dns_domain = dns_domain self.allow_overwrite_header = allow_overwrite_header self.rate_limit_retry = rate_limit_retry if dhcp_profile_uuid: # this is deprecated, and never used. versionutils.report_deprecated_feature( LOG, 'dhcp_profile_uuid is not used by the nsxlib, and will ' 'be removed from its configuration in the future.') def extend(self, keepalive_section, url_base=None): """Called by library code to initialize application-specific data""" self.keepalive_section = keepalive_section self.url_base = url_base def _attribute_by_index(self, scalar_or_list, index): if isinstance(scalar_or_list, list): if not len(scalar_or_list): return None if len(scalar_or_list) > index: return scalar_or_list[index] # if not long enough - use the first one as default return scalar_or_list[0] # this is a scalar return scalar_or_list def username(self, index): return self._attribute_by_index(self._username, index) def password(self, index): return self._attribute_by_index(self._password, index) def ca_file(self, index): return self._attribute_by_index(self._ca_file, index) vmware-nsxlib-12.0.1/vmware_nsxlib/v3/client.py0000666000175100017510000002773013244536000021465 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import re import time from oslo_log import log from oslo_serialization import jsonutils import requests import six.moves.urllib.parse as urlparse from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) NULL_CURSOR_PREFIX = '0000' def http_error_to_exception(status_code, error_code): errors = { requests.codes.NOT_FOUND: {'202': exceptions.BackendResourceNotFound, 'default': exceptions.ResourceNotFound}, requests.codes.CONFLICT: exceptions.StaleRevision, requests.codes.PRECONDITION_FAILED: exceptions.StaleRevision, requests.codes.INTERNAL_SERVER_ERROR: {'99': exceptions.ClientCertificateNotTrusted}, requests.codes.FORBIDDEN: {'98': exceptions.BadXSRFToken}, requests.codes.TOO_MANY_REQUESTS: exceptions.TooManyRequests} if status_code in errors: if isinstance(errors[status_code], dict): # choose based on error code if error_code and str(error_code) in errors[status_code]: return errors[status_code][str(error_code)] elif 'default' in errors[status_code]: return errors[status_code]['default'] else: return errors[status_code] # default exception return exceptions.ManagerError class RESTClient(object): _VERB_RESP_CODES = { 'get': [requests.codes.ok], 'post': [requests.codes.created, requests.codes.ok], 'put': [requests.codes.created, requests.codes.ok], 'delete': [requests.codes.ok] } def __init__(self, connection, url_prefix=None, default_headers=None, client_obj=None): self._conn = connection self._url_prefix = url_prefix or "" self._default_headers = default_headers or {} def new_client_for(self, *uri_segments): uri = self._build_url('/'.join(uri_segments)) return self.__class__( self._conn, url_prefix=uri, default_headers=self._default_headers, client_obj=self) def list(self, resource='', headers=None, silent=False): return self.url_list(resource, headers=headers, silent=silent) def get(self, uuid, headers=None, silent=False): return self.url_get(uuid, headers=headers, silent=silent) def delete(self, uuid, headers=None, expected_results=None): return self.url_delete(uuid, headers=headers, expected_results=expected_results) def update(self, uuid, body=None, headers=None, expected_results=None): return self.url_put(uuid, body, headers=headers, expected_results=expected_results) def create(self, resource='', body=None, headers=None, expected_results=None): return self.url_post(resource, body, headers=headers, expected_results=expected_results) def url_list(self, url, headers=None, silent=False): concatenate_response = self.url_get(url, headers=headers) cursor = concatenate_response.get('cursor', NULL_CURSOR_PREFIX) op = '&' if urlparse.urlparse(url).query else '?' url += op + 'cursor=' while cursor and not cursor.startswith(NULL_CURSOR_PREFIX): page = self.url_get(url + cursor, headers=headers, silent=silent) concatenate_response['results'].extend(page.get('results', [])) cursor = page.get('cursor', NULL_CURSOR_PREFIX) return concatenate_response def url_get(self, url, headers=None, silent=False): return self._rest_call(url, method='GET', headers=headers, silent=silent) def url_delete(self, url, headers=None, expected_results=None): return self._rest_call(url, method='DELETE', headers=headers, expected_results=expected_results) def url_put(self, url, body, headers=None, expected_results=None): return self._rest_call(url, method='PUT', body=body, headers=headers, expected_results=expected_results) def url_post(self, url, body, headers=None, expected_results=None): return self._rest_call(url, method='POST', body=body, headers=headers, expected_results=expected_results) def _raise_error(self, status_code, operation, result_msg, error_code=None): error = http_error_to_exception(status_code, error_code) raise error(manager='', operation=operation, details=result_msg, error_code=error_code) def _validate_result(self, result, expected, operation, silent=False): if result.status_code not in expected: result_msg = result.json() if result.content else '' if not silent: LOG.warning("The HTTP request returned error code " "%(result)s, whereas %(expected)s response " "codes were expected. Response body %(body)s", {'result': result.status_code, 'expected': '/'.join([str(code) for code in expected]), 'body': result_msg}) error_code = None if isinstance(result_msg, dict) and 'error_message' in result_msg: error_code = result_msg.get('error_code') related_errors = [error['error_message'] for error in result_msg.get('related_errors', [])] result_msg = result_msg['error_message'] if related_errors: result_msg += " relatedErrors: %s" % ' '.join( related_errors) self._raise_error(result.status_code, operation, result_msg, error_code=error_code) @classmethod def merge_headers(cls, *headers): merged = {} for header in headers: if header: merged.update(header) return merged def _build_url(self, uri): prefix = urlparse.urlparse(self._url_prefix) uri = ("/%s/%s" % (prefix.path, uri)).replace('//', '/').strip('/') if prefix.netloc: uri = "%s/%s" % (prefix.netloc, uri) if prefix.scheme: uri = "%s://%s" % (prefix.scheme, uri) return uri def _mask_password(self, json): '''Mask password value in json format''' if not json: return json pattern = r'\"password\": [^,}]*' return re.sub(pattern, '"password": "********"', json) def _rest_call(self, url, method='GET', body=None, headers=None, silent=False, expected_results=None): request_headers = headers.copy() if headers else {} request_headers.update(self._default_headers) if utils.INJECT_HEADERS_CALLBACK: inject_headers = utils.INJECT_HEADERS_CALLBACK() request_headers.update(inject_headers) request_url = self._build_url(url) do_request = getattr(self._conn, method.lower()) if not silent: LOG.debug("REST call: %s %s. Headers: %s. Body: %s", method, request_url, request_headers, self._mask_password(body)) ts = time.time() result = do_request( request_url, data=body, headers=request_headers) te = time.time() if not silent: LOG.debug("REST call: %s %s. Response: %s. Took %2.4f", method, request_url, result.json() if result.content else '', te - ts) if not expected_results: expected_results = RESTClient._VERB_RESP_CODES[method.lower()] self._validate_result( result, expected_results, _("%(verb)s %(url)s") % {'verb': method, 'url': request_url}, silent=silent) return result class JSONRESTClient(RESTClient): _DEFAULT_HEADERS = { 'Accept': 'application/json', 'Content-Type': 'application/json' } def __init__(self, connection, url_prefix=None, default_headers=None, client_obj=None): super(JSONRESTClient, self).__init__( connection, url_prefix=url_prefix, default_headers=RESTClient.merge_headers( JSONRESTClient._DEFAULT_HEADERS, default_headers), client_obj=None) def _rest_call(self, *args, **kwargs): if kwargs.get('body') is not None: kwargs['body'] = jsonutils.dumps(kwargs['body'], sort_keys=True) result = super(JSONRESTClient, self)._rest_call(*args, **kwargs) return result.json() if result.content else result class NSX3Client(JSONRESTClient): NSX_V1_API_PREFIX = 'api/v1/' NSX_POLICY_V1_API_PREFIX = 'policy/api/v1/' def __init__(self, connection, url_prefix=None, default_headers=None, nsx_api_managers=None, max_attempts=utils.DEFAULT_MAX_ATTEMPTS, rate_limit_retry=True, client_obj=None, url_path_base=NSX_V1_API_PREFIX): # If the client obj is defined - copy configuration from it if client_obj: self.nsx_api_managers = client_obj.nsx_api_managers or [] self.max_attempts = client_obj.max_attempts self.rate_limit_retry = client_obj.rate_limit_retry else: self.nsx_api_managers = nsx_api_managers or [] self.max_attempts = max_attempts self.rate_limit_retry = rate_limit_retry url_prefix = url_prefix or url_path_base if url_prefix and url_path_base not in url_prefix: if url_prefix.startswith('http'): url_prefix += '/' + url_path_base else: url_prefix = "%s/%s" % (url_path_base, url_prefix or '') self.max_attempts = max_attempts super(NSX3Client, self).__init__( connection, url_prefix=url_prefix, default_headers=default_headers, client_obj=client_obj) def _raise_error(self, status_code, operation, result_msg, error_code=None): """Override the Rest client errors to add the manager IPs""" error = http_error_to_exception(status_code, error_code) raise error(manager=self.nsx_api_managers, operation=operation, details=result_msg, error_code=error_code) def _rest_call(self, url, **kwargs): if self.rate_limit_retry: # If too many requests are handled by the nsx at the same time, # error "429: Too Many Requests" will be returned. # the client is expected to retry after a random 400-600 milli, # and later exponentially until 5 seconds wait @utils.retry_random_upon_exception( exceptions.TooManyRequests, max_attempts=self.max_attempts) def _rest_call_with_retry(self, url, **kwargs): return super(NSX3Client, self)._rest_call(url, **kwargs) return _rest_call_with_retry(self, url, **kwargs) else: return super(NSX3Client, self)._rest_call(url, **kwargs) vmware-nsxlib-12.0.1/vmware_nsxlib/v3/client_cert.py0000666000175100017510000003045613244535763022520 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from time import time import uuid from OpenSSL import crypto from oslo_log import log from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions as nsxlib_exceptions LOG = log.getLogger(__name__) CERT_SUBJECT_COUNTRY = 'country' CERT_SUBJECT_STATE = 'state' CERT_SUBJECT_ORG = 'organization' CERT_SUBJECT_UNIT = 'unit' CERT_SUBJECT_HOST = 'hostname' def validate_cert_params(key_size, valid_for_days, signature_alg, subject): """Validate parameters for certificate""" expected_key_sizes = (2048, 4096) if key_size not in expected_key_sizes: raise nsxlib_exceptions.NsxLibInvalidInput( error_message=_('Invalid key size %(value)d' '(must be one of %(list)s)') % {'value': key_size, 'list': expected_key_sizes}) expected_signature_algs = ('sha224', 'sha256') if signature_alg not in expected_signature_algs: raise nsxlib_exceptions.NsxLibInvalidInput( error_message=_('Invalid signature algorithm %(value)s' '(must be one of %(list)s)') % {'value': signature_alg, 'list': expected_signature_algs}) if (CERT_SUBJECT_COUNTRY in subject and (len(subject[CERT_SUBJECT_COUNTRY]) != 2)): raise nsxlib_exceptions.NsxLibInvalidInput( error_message=_('Invalid country %s: ' 'must be exactly 2 characters') % subject[CERT_SUBJECT_COUNTRY]) # values defined in rfc5280 max_len_constraints = {CERT_SUBJECT_STATE: 128, CERT_SUBJECT_ORG: 64, CERT_SUBJECT_UNIT: 64, CERT_SUBJECT_HOST: 64} for field, max_len in max_len_constraints.items(): if field in subject and (len(subject[field]) > max_len): raise nsxlib_exceptions.NsxLibInvalidInput( error_message=_('Invalid %(field)s [%(value)s]: ' 'must not exceed %(max)d characters') % {'field': field, 'value': subject[field], 'max': max_len}) def generate_self_signed_cert_pair(key_size, valid_for_days, signature_alg, subject): """Generate self signed certificate and key pair""" validate_cert_params(key_size, valid_for_days, signature_alg, subject) # generate key pair key = crypto.PKey() key.generate_key(crypto.TYPE_RSA, key_size) # generate certificate cert = crypto.X509() cert.get_subject().C = subject.get(CERT_SUBJECT_COUNTRY, 'US') cert.get_subject().ST = subject.get(CERT_SUBJECT_STATE, 'California') cert.get_subject().O = subject.get(CERT_SUBJECT_ORG, 'MyOrg') cert.get_subject().OU = subject.get(CERT_SUBJECT_UNIT, 'MyUnit') cert.get_subject().CN = subject.get(CERT_SUBJECT_HOST, 'myorg.com') cert.gmtime_adj_notBefore(0) cert.gmtime_adj_notAfter(valid_for_days * 24 * 60 * 60) cert.set_issuer(cert.get_subject()) cert.set_pubkey(key) cert.set_serial_number(int(time())) cert.sign(key, signature_alg) return cert, key class ClientCertificateManager(object): """Manage Client Certificate for backend authentication There should be single client certificate associated with certain principal identity. Certificate and PK storage is pluggable. Storage API (similar to neutron-lbaas barbican API): store_cert(purpose, certificate, private_key) get_cert(purpose) delete_cert(purpose) """ def __init__(self, identity, nsx_trust_management, storage_driver): self._cert = None self._key = None self._storage_driver = storage_driver self._identity = identity self._nsx_trust_management = nsx_trust_management def __enter__(self): """Load cert from storage This is an optimization to avoid repeated storage access. Usage example: with cert_manager as c: if c.exists(): date = c.expires_on() days = c.exires_in_days() """ self._cert, self._key = self.get_cert_and_key() return self def __exit__(self, type, value, traceback): self._cert = None self._key = None def generate(self, subject, key_size=2048, valid_for_days=3650, signature_alg='sha256', node_id=None): """Generate new certificate and register it in the system Generate certificate with RSA key based on arguments provided, register and associate it to principal identity on backend, and store it in storage. If certificate already exists, fail. """ self._validate_empty() cert, key = generate_self_signed_cert_pair(key_size, valid_for_days, signature_alg, subject) # register on backend self._register_cert(cert, node_id or uuid.uuid4()) # save in storage cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) key_pem = crypto.dump_privatekey(crypto.FILETYPE_PEM, key) self._storage_driver.store_cert(self._identity, cert_pem, key_pem) LOG.debug("Client certificate generated successfully") def delete(self): """Delete existing certificate from storage and backend""" cert_pem, key_pem = self.get_pem() if not cert_pem: return ok = True try: self._nsx_trust_management.delete_cert_and_identity( self._identity, cert_pem) except nsxlib_exceptions.ManagerError as e: LOG.error("Failed to clear certificate on backend: %s", e) ok = False try: self._storage_driver.delete_cert(self._identity) except Exception: LOG.error("Failed to clear certificate in storage: %s", e) ok = False self._cert = None self._key = None if ok: LOG.debug("Client certificate removed successfully") def exists(self): """Check if certificate was created for given identity""" if self._cert: return True cert_pem, key_pem = self._storage_driver.get_cert(self._identity) return cert_pem is not None def _get_cert_from_file(self, filename): with open(filename, 'r') as f: cert_pem = f.read() if not cert_pem: raise nsxlib_exceptions.CertificateError( msg=_("Failed to read certificate from %s") % filename) # validate correct crypto try: cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem) except crypto.Error: raise nsxlib_exceptions.CertificateError( msg=_("Failed to import client certificate")) return cert def import_pem(self, filename, node_id=None): """Import and register existing certificate in PEM format""" # TODO(annak): support PK import as well self._validate_empty() cert = self._get_cert_from_file(filename) # register on backend self._register_cert(cert, node_id or uuid.uuid4()) cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) self._storage_driver.store_cert(self._identity, cert_pem, None) LOG.debug("Client certificate imported successfully") def delete_pem(self, filename): """Delete specified client certificate without storage verification""" # This file may contain private key # passing the pem through crypto will perform validation and # strip off the key cert = self._get_cert_from_file(filename) cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) self._nsx_trust_management.delete_cert_and_identity(self._identity, cert_pem) self._storage_driver.delete_cert(self._identity) def _load_from_storage(self): """Returns certificate and key pair in PEM format""" cert_pem, key_pem = self._storage_driver.get_cert(self._identity) if cert_pem is None: return None, None return (cert_pem, key_pem) def get_pem(self): return self._load_from_storage() def export_pem(self, filename): """Exports certificate and key pair to file""" self._validate_exists() cert_pem, key_pem = self._load_from_storage() with open(filename, 'w') as f: f.write(cert_pem) f.write(key_pem) def expires_on(self): """Returns certificate expiration timestamp""" self._validate_exists() cert, key = self.get_cert_and_key() converted = datetime.datetime.strptime( cert.get_notAfter().decode(), "%Y%m%d%H%M%SZ") return converted def expires_in_days(self): """Returns in how many days the certificate expires""" delta = self.expires_on() - datetime.datetime.utcnow() return delta.days def get_subject(self): self._validate_exists() cert, key = self.get_cert_and_key() return {CERT_SUBJECT_COUNTRY: cert.get_subject().C, CERT_SUBJECT_STATE: cert.get_subject().ST, CERT_SUBJECT_ORG: cert.get_subject().O, CERT_SUBJECT_UNIT: cert.get_subject().OU, CERT_SUBJECT_HOST: cert.get_subject().CN} def get_signature_alg(self): self._validate_exists() cert, key = self.get_cert_and_key() return cert.get_signature_algorithm() def get_key_size(self): self._validate_exists() cert, key = self.get_cert_and_key() return key.bits() def _validate_empty(self): if self.exists(): raise nsxlib_exceptions.ObjectAlreadyExists( object_type='Client Certificate') def _validate_exists(self): if not self.exists(): raise nsxlib_exceptions.ObjectNotGenerated( object_type='Client Certificate') def get_cert_and_key(self): """Load cert and key from storage""" if self._cert and self._key: return self._cert, self._key cert_pem, key_pem = self._load_from_storage() if cert_pem is None: return None, None try: cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem) key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem) except crypto.Error: raise nsxlib_exceptions.CertificateError( msg="Failed to load client certificate") return cert, key def _register_cert(self, cert, node_id): cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) self._nsx_trust_management.create_cert_and_identity(self._identity, cert_pem, node_id) class ClientCertProvider(object): """Basic implementation for client certificate provider Responsible for preparing, providing and disposing client certificate file. Basic implementation assumes the file exists in the file system and does not take responsibility of deleting this sensitive information after use. Inheriting objects should make use of __enter__ and __exit__ APIs to prepare and dispose the certificate file data. """ def __init__(self, filename): self._filename = filename def __enter__(self): return self def __exit__(self, type, value, traceback): pass def filename(self): return self._filename vmware-nsxlib-12.0.1/vmware_nsxlib/v3/nsx_constants.py0000666000175100017510000000746513244535763023135 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Admin statuses ADMIN_STATE_UP = "UP" ADMIN_STATE_DOWN = "DOWN" # Replication modes MTEP = "MTEP" # Port attachment types ATTACHMENT_VIF = "VIF" ATTACHMENT_LR = "LOGICALROUTER" ATTACHMENT_DHCP = "DHCP_SERVICE" ATTACHMENT_MDPROXY = "METADATA_PROXY" VIF_RESOURCE_TYPE = "VifAttachmentContext" VIF_TYPE_PARENT = "PARENT" VIF_TYPE_CHILD = "CHILD" ALLOCATE_ADDRESS_NONE = "None" # NSXv3 L2 Gateway constants BRIDGE_ENDPOINT = "BRIDGEENDPOINT" # Router type ROUTER_TYPE_TIER0 = "TIER0" ROUTER_TYPE_TIER1 = "TIER1" LROUTERPORT_UPLINK = "LogicalRouterUplinkPort" LROUTERPORT_DOWNLINK = "LogicalRouterDownLinkPort" LROUTERPORT_CENTRALIZED = "LogicalRouterCentralizedServicePort" LROUTERPORT_LINKONTIER0 = "LogicalRouterLinkPortOnTIER0" LROUTERPORT_LINKONTIER1 = "LogicalRouterLinkPortOnTIER1" # NSX service type SERVICE_DHCP = "dhcp" # NSX-V3 Distributed Firewall constants IP_SET = 'IPSet' NSGROUP = 'NSGroup' NSGROUP_COMPLEX_EXP = 'NSGroupComplexExpression' NSGROUP_SIMPLE_EXP = 'NSGroupSimpleExpression' NSGROUP_TAG_EXP = 'NSGroupTagExpression' EXCLUDE_PORT = 'Exclude-Port' # Firewall rule position FW_INSERT_BEFORE = 'insert_before' FW_INSERT_AFTER = 'insert_after' FW_INSERT_BOTTOM = 'insert_bottom' FW_INSERT_TOP = 'insert_top' # firewall rule actions FW_ACTION_ALLOW = 'ALLOW' FW_ACTION_DROP = 'DROP' FW_ACTION_REJECT = 'REJECT' # nsgroup members update actions NSGROUP_ADD_MEMBERS = 'ADD_MEMBERS' NSGROUP_REMOVE_MEMBERS = 'REMOVE_MEMBERS' # NSServices resource types L4_PORT_SET_NSSERVICE = 'L4PortSetNSService' ICMP_TYPE_NSSERVICE = 'ICMPTypeNSService' IP_PROTOCOL_NSSERVICE = 'IPProtocolNSService' # firewall section types FW_SECTION_LAYER3 = 'LAYER3' TARGET_TYPE_LOGICAL_SWITCH = 'LogicalSwitch' TARGET_TYPE_LOGICAL_PORT = 'LogicalPort' TARGET_TYPE_IPV4ADDRESS = 'IPv4Address' TARGET_TYPE_IPV6ADDRESS = 'IPv6Address' # filtering operators and expressions EQUALS = 'EQUALS' IN = 'IN' OUT = 'OUT' IN_OUT = 'IN_OUT' TCP = 'TCP' UDP = 'UDP' ICMPV4 = 'ICMPv4' ICMPV6 = 'ICMPv6' IPV4 = 'IPV4' IPV6 = 'IPV6' IPV4_IPV6 = 'IPV4_IPV6' LOCAL_IP_PREFIX = 'local_ip_prefix' LOGGING = 'logging' # QoS directions egress/ingress EGRESS = 'egress' INGRESS = 'ingress' EGRESS_SHAPING = 'EgressRateShaper' INGRESS_SHAPING = 'IngressRateShaper' # Error codes returned by the backend ERR_CODE_OBJECT_NOT_FOUND = 202 ERR_CODE_IPAM_POOL_EXHAUSTED = 5109 ERR_CODE_IPAM_SPECIFIC_IP = 5123 ERR_CODE_IPAM_IP_ALLOCATED = 5141 ERR_CODE_IPAM_IP_NOT_IN_POOL = 5110 ERR_CODE_IPAM_RANGE_MODIFY = 5602 ERR_CODE_IPAM_RANGE_DELETE = 5015 ERR_CODE_IPAM_RANGE_SHRUNK = 5016 # backend versions NSX_VERSION_1_1_0 = '1.1.0' NSX_VERSION_2_0_0 = '2.0.0' NSX_VERSION_2_1_0 = '2.1.0' NSX_VERSION_2_2_0 = '2.2.0' NSX_VERSION_3_0_0 = '3.0.0' # Features available depending on the backend version FEATURE_MAC_LEARNING = 'MAC Learning' FEATURE_DYNAMIC_CRITERIA = 'Dynamic criteria' FEATURE_EXCLUDE_PORT_BY_TAG = 'Exclude Port by Tag' FEATURE_ROUTER_FIREWALL = 'Router Firewall' FEATURE_LOAD_BALANCER = 'Load Balancer' FEATURE_DHCP_RELAY = 'DHCP Relay' FEATURE_NSX_POLICY = 'NSX Policy' FEATURE_VLAN_ROUTER_INTERFACE = 'VLAN Router Interface' FEATURE_RATE_LIMIT = 'Requests Rate Limit' FEATURE_IPSEC_VPN = 'IPSec VPN' FEATURE_ON_BEHALF_OF = 'On Behalf Of' FEATURE_TRUNK_VLAN = 'Trunk Vlan' vmware-nsxlib-12.0.1/vmware_nsxlib/v3/core_resources.py0000666000175100017510000007457613244535763023262 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_log import log from oslo_log import versionutils from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) SwitchingProfileTypeId = collections.namedtuple( 'SwitchingProfileTypeId', 'profile_type, profile_id') PacketAddressClassifier = collections.namedtuple( 'PacketAddressClassifier', 'ip_address, mac_address, vlan') class NsxLibPortMirror(utils.NsxLibApiBase): @property def uri_segment(self): return 'mirror-sessions' @property def resource_type(self): return 'PortMirroringSession' def create_session(self, source_ports, dest_ports, direction, description, name, tags): """Create a PortMirror Session on the backend. :param source_ports: List of UUIDs of the ports whose traffic is to be mirrored. :param dest_ports: List of UUIDs of the ports where the mirrored traffic is to be sent. :param direction: String representing the direction of traffic to be mirrored. [INGRESS, EGRESS, BIDIRECTIONAL] :param description: String representing the description of the session. :param name: String representing the name of the session. :param tags: nsx backend specific tags. """ body = {'direction': direction, 'tags': tags, 'display_name': name, 'description': description, 'mirror_sources': source_ports, 'mirror_destination': dest_ports} return self.client.create(self.get_path(), body) def delete_session(self, mirror_session_id): """Delete a PortMirror session on the backend. :param mirror_session_id: string representing the UUID of the port mirror session to be deleted. """ self.client.delete(self.get_path(mirror_session_id)) class NsxLibBridgeEndpoint(utils.NsxLibApiBase): @property def uri_segment(self): return 'bridge-endpoints' @property def resource_type(self): return 'BridgeEndpoint' def create(self, device_name, seg_id, tags): """Create a bridge endpoint on the backend. Create a bridge endpoint resource on a bridge cluster for the L2 gateway network connection. :param device_name: device_name actually refers to the bridge cluster's UUID. :param seg_id: integer representing the VLAN segmentation ID. :param tags: nsx backend specific tags. """ body = {'bridge_cluster_id': device_name, 'tags': tags, 'vlan': seg_id} return self.client.create(self.get_path(), body) def delete(self, bridge_endpoint_id): """Delete a bridge endpoint on the backend. :param bridge_endpoint_id: string representing the UUID of the bridge endpoint to be deleted. """ self.client.delete(self.get_path(bridge_endpoint_id)) class NsxLibLogicalSwitch(utils.NsxLibApiBase): @property def uri_segment(self): return 'logical-switches' @property def resource_type(self): return 'LogicalSwitch' def create(self, display_name, transport_zone_id, tags, replication_mode=nsx_constants.MTEP, admin_state=True, vlan_id=None, ip_pool_id=None, mac_pool_id=None, description=None, trunk_vlan_range=None): operation = "Create logical switch" # TODO(salv-orlando): Validate Replication mode and admin_state # NOTE: These checks might be moved to the API client library if one # that performs such checks in the client is available body = {'transport_zone_id': transport_zone_id, 'replication_mode': replication_mode, 'display_name': display_name, 'tags': tags} if admin_state: body['admin_state'] = nsx_constants.ADMIN_STATE_UP else: body['admin_state'] = nsx_constants.ADMIN_STATE_DOWN if trunk_vlan_range: failed = False if (self.nsxlib and self.nsxlib.feature_supported( nsx_constants.FEATURE_TRUNK_VLAN)): if vlan_id is not None: failed = True LOG.error("Failed to create logical switch %(name)s with " "trunk vlan: vlan id %(vlan)s is used.", {'name': display_name, 'vlan': vlan_id}) elif (len(trunk_vlan_range) != 2 or trunk_vlan_range[0] > trunk_vlan_range[1]): failed = True LOG.error("Failed to create logical switch %(name)s with " "trunk vlan: illegal range (%(trunk)s) is used.", {'name': display_name, 'trunk': trunk_vlan_range}) else: body['vlan_trunk_spec'] = {'vlan_ranges': [ {'start': trunk_vlan_range[0], 'end': trunk_vlan_range[1]}]} else: LOG.error("Failed to create logical switch %s with trunk " "vlan: this feature is not supported.", display_name) failed = True if failed: raise exceptions.InvalidInput( operation=operation, arg_val=trunk_vlan_range, arg_name='trunk_vlan_range') elif vlan_id: body['vlan'] = vlan_id if ip_pool_id: body['ip_pool_id'] = ip_pool_id if mac_pool_id: body['mac_pool_id'] = mac_pool_id if description is not None: body['description'] = description return self.client.create(self.get_path(), body) def delete(self, lswitch_id): resource = '%s?detach=true&cascade=true' % lswitch_id self._delete_with_retry(resource) def update(self, lswitch_id, name=None, admin_state=None, tags=None, description=None): body = {} if name: body['display_name'] = name if admin_state is not None: if admin_state: body['admin_state'] = nsx_constants.ADMIN_STATE_UP else: body['admin_state'] = nsx_constants.ADMIN_STATE_DOWN if tags is not None: body['tags'] = tags if description is not None: body['description'] = description return self._update_with_retry(lswitch_id, body) class SwitchingProfileTypes(object): IP_DISCOVERY = 'IpDiscoverySwitchingProfile' MAC_LEARNING = 'MacManagementSwitchingProfile' PORT_MIRRORING = 'PortMirroringSwitchingProfile' QOS = 'QosSwitchingProfile' SPOOF_GUARD = 'SpoofGuardSwitchingProfile' SWITCH_SECURITY = 'SwitchSecuritySwitchingProfile' class WhiteListAddressTypes(object): PORT = 'LPORT_BINDINGS' SWITCH = 'LSWITCH_BINDINGS' class NsxLibSwitchingProfile(utils.NsxLibApiBase): @property def uri_segment(self): return 'switching-profiles' def list(self): return self.client.list( self.get_path('?include_system_owned=True')) def create(self, profile_type, display_name=None, description=None, **api_args): body = { 'resource_type': profile_type, 'display_name': display_name or '', 'description': description or '' } body.update(api_args) return self.client.create(self.get_path(), body=body) def update(self, uuid, profile_type, **api_args): body = { 'resource_type': profile_type } body.update(api_args) return self.client.update(self.get_path(uuid), body=body) def create_spoofguard_profile(self, display_name, description, whitelist_ports=False, whitelist_switches=False, tags=None): whitelist_providers = [] if whitelist_ports: whitelist_providers.append(WhiteListAddressTypes.PORT) if whitelist_switches: whitelist_providers.append(WhiteListAddressTypes.SWITCH) return self.create(SwitchingProfileTypes.SPOOF_GUARD, display_name=display_name, description=description, white_list_providers=whitelist_providers, tags=tags or []) def create_dhcp_profile(self, display_name, description, tags=None): dhcp_filter = { 'client_block_enabled': True, 'server_block_enabled': False } rate_limits = { 'enabled': False, 'rx_broadcast': 0, 'tx_broadcast': 0, 'rx_multicast': 0, 'tx_multicast': 0 } bpdu_filter = { 'enabled': True, 'white_list': [] } return self.create(SwitchingProfileTypes.SWITCH_SECURITY, display_name=display_name, description=description, tags=tags or [], dhcp_filter=dhcp_filter, rate_limits=rate_limits, bpdu_filter=bpdu_filter, block_non_ip_traffic=True) def create_mac_learning_profile(self, display_name, description, tags=None): mac_learning = { 'enabled': True, } return self.create(SwitchingProfileTypes.MAC_LEARNING, display_name=display_name, description=description, tags=tags or [], mac_learning=mac_learning, mac_change_allowed=True) def create_port_mirror_profile(self, display_name, description, direction, destinations, tags=None): return self.create(SwitchingProfileTypes.PORT_MIRRORING, display_name=display_name, description=description, tags=tags or [], direction=direction, destinations=destinations) @classmethod def build_switch_profile_ids(cls, client, *profiles): ids = [] for profile in profiles: if isinstance(profile, str): profile = client.get(profile) if not isinstance(profile, SwitchingProfileTypeId): profile = SwitchingProfileTypeId( profile.get('key', profile.get('resource_type')), profile.get('value', profile.get('id'))) ids.append(profile) return ids class NsxLibQosSwitchingProfile(NsxLibSwitchingProfile): @property def resource_type(self): return 'QosSwitchingProfile' def _build_args(self, tags, name=None, description=None): body = {"resource_type": "QosSwitchingProfile", "tags": tags} return self._update_args( body, name=name, description=description) def _update_args(self, body, name=None, description=None): if name: body["display_name"] = name if description: body["description"] = description return body def _get_resource_type(self, direction): if direction == nsx_constants.EGRESS: return nsx_constants.EGRESS_SHAPING return nsx_constants.INGRESS_SHAPING def _enable_shaping_in_args(self, body, burst_size=None, peak_bandwidth=None, average_bandwidth=None, direction=None): resource_type = self._get_resource_type(direction) for shaper in body["shaper_configuration"]: if shaper["resource_type"] == resource_type: shaper["enabled"] = True if burst_size is not None: shaper["burst_size_bytes"] = burst_size if peak_bandwidth is not None: shaper["peak_bandwidth_mbps"] = peak_bandwidth if average_bandwidth is not None: shaper["average_bandwidth_mbps"] = average_bandwidth break return body def _disable_shaping_in_args(self, body, direction=None): resource_type = self._get_resource_type(direction) for shaper in body["shaper_configuration"]: if shaper["resource_type"] == resource_type: shaper["enabled"] = False shaper["burst_size_bytes"] = 0 shaper["peak_bandwidth_mbps"] = 0 shaper["average_bandwidth_mbps"] = 0 break return body def _update_dscp_in_args(self, body, qos_marking, dscp): body["dscp"] = {} body["dscp"]["mode"] = qos_marking.upper() if dscp: body["dscp"]["priority"] = dscp return body def create(self, tags, name=None, description=None): body = self._build_args(tags, name, description) return self.client.create(self.get_path(), body) def update(self, profile_id, tags, name=None, description=None): # update the relevant fields body = {} body = self._update_args(body, name, description) if tags is not None: body['tags'] = tags return self._update_with_retry(profile_id, body) def update_shaping(self, profile_id, shaping_enabled=False, burst_size=None, peak_bandwidth=None, average_bandwidth=None, qos_marking=None, dscp=None, direction=nsx_constants.INGRESS): versionutils.report_deprecated_feature( LOG, 'NsxLibQosSwitchingProfile.update_shaping is deprecated. ' 'Please use set_profile_shaping instead.') # get the current configuration body = self.get(profile_id) # update the relevant fields if shaping_enabled: body = self._enable_shaping_in_args( body, burst_size=burst_size, peak_bandwidth=peak_bandwidth, average_bandwidth=average_bandwidth, direction=direction) else: body = self._disable_shaping_in_args(body, direction=direction) body = self._update_dscp_in_args(body, qos_marking, dscp) return self._update_with_retry(profile_id, body) def set_profile_shaping(self, profile_id, ingress_bw_enabled=False, ingress_burst_size=None, ingress_peak_bandwidth=None, ingress_average_bandwidth=None, egress_bw_enabled=False, egress_burst_size=None, egress_peak_bandwidth=None, egress_average_bandwidth=None, qos_marking='trusted', dscp=None): """Set all shaping parameters in the QoS switch profile""" # get the current configuration body = self.get(profile_id) # update the ingress shaping if ingress_bw_enabled: body = self._enable_shaping_in_args( body, burst_size=ingress_burst_size, peak_bandwidth=ingress_peak_bandwidth, average_bandwidth=ingress_average_bandwidth, direction=nsx_constants.INGRESS) else: body = self._disable_shaping_in_args( body, direction=nsx_constants.INGRESS) # update the egress shaping if egress_bw_enabled: body = self._enable_shaping_in_args( body, burst_size=egress_burst_size, peak_bandwidth=egress_peak_bandwidth, average_bandwidth=egress_average_bandwidth, direction=nsx_constants.EGRESS) else: body = self._disable_shaping_in_args( body, direction=nsx_constants.EGRESS) # update dscp marking body = self._update_dscp_in_args(body, qos_marking, dscp) # update the profile in the backend return self._update_with_retry(profile_id, body) class NsxLibLogicalRouter(utils.NsxLibApiBase): @property def uri_segment(self): return 'logical-routers' @property def resource_type(self): return 'LogicalRouter' def _delete_resource_by_values(self, resource, skip_not_found=True, strict_mode=True, **kwargs): """Delete resource objects matching the values in kwargs If skip_not_found is True - do not raise an exception if no object was found. If strict_mode is True - warnings will be issued if 0 or >1 objects where deleted. """ resources_list = self.client.list(resource) matched_num = 0 for res in resources_list['results']: if utils.dict_match(kwargs, res): LOG.debug("Deleting %s from resource %s", res, resource) delete_resource = resource + "/" + str(res['id']) self.client.delete(delete_resource) matched_num = matched_num + 1 if matched_num == 0: if skip_not_found: if strict_mode: LOG.warning("No resource in %(res)s matched for values: " "%(values)s", {'res': resource, 'values': kwargs}) else: err_msg = (_("No resource in %(res)s matched for values: " "%(values)s") % {'res': resource, 'values': kwargs}) raise exceptions.ResourceNotFound( manager=self.client.nsx_api_managers, operation=err_msg) elif matched_num > 1 and strict_mode: LOG.warning("%(num)s resources in %(res)s matched for values: " "%(values)s", {'num': matched_num, 'res': resource, 'values': kwargs}) def add_nat_rule(self, logical_router_id, action, translated_network, source_net=None, dest_net=None, enabled=True, rule_priority=None, match_ports=None, match_protocol=None, match_resource_type=None, bypass_firewall=True): resource = 'logical-routers/%s/nat/rules' % logical_router_id body = {'action': action, 'enabled': enabled, 'translated_network': translated_network} if source_net: body['match_source_network'] = source_net if dest_net: body['match_destination_network'] = dest_net if rule_priority: body['rule_priority'] = rule_priority if match_ports: body['match_service'] = { 'resource_type': (match_resource_type or nsx_constants.L4_PORT_SET_NSSERVICE), 'destination_ports': match_ports, 'l4_protocol': match_protocol or nsx_constants.TCP} # nat_pass parameter is supported with the router firewall feature if (self.nsxlib and self.nsxlib.feature_supported( nsx_constants.FEATURE_ROUTER_FIREWALL)): body['nat_pass'] = bypass_firewall elif not bypass_firewall: LOG.error("Ignoring bypass_firewall for router %s nat rule: " "this feature is not supported.", logical_router_id) return self.client.create(resource, body) def add_static_route(self, logical_router_id, dest_cidr, nexthop): resource = ('logical-routers/%s/routing/static-routes' % logical_router_id) body = {} if dest_cidr: body['network'] = dest_cidr if nexthop: body['next_hops'] = [{"ip_address": nexthop}] return self.client.create(resource, body) def delete_static_route(self, logical_router_id, static_route_id): resource = 'logical-routers/%s/routing/static-routes/%s' % ( logical_router_id, static_route_id) self.client.delete(resource) def delete_static_route_by_values(self, logical_router_id, dest_cidr=None, nexthop=None): resource = ('logical-routers/%s/routing/static-routes' % logical_router_id) kwargs = {} if dest_cidr: kwargs['network'] = dest_cidr if nexthop: kwargs['next_hops'] = [{"ip_address": nexthop}] return self._delete_resource_by_values(resource, **kwargs) def delete_nat_rule(self, logical_router_id, nat_rule_id): resource = 'logical-routers/%s/nat/rules/%s' % (logical_router_id, nat_rule_id) self.client.delete(resource) def delete_nat_rule_by_values(self, logical_router_id, strict_mode=True, skip_not_found=True, **kwargs): resource = 'logical-routers/%s/nat/rules' % logical_router_id return self._delete_resource_by_values( resource, skip_not_found=skip_not_found, strict_mode=strict_mode, **kwargs) def list_nat_rules(self, logical_router_id): resource = 'logical-routers/%s/nat/rules' % logical_router_id return self.client.list(resource) def update_nat_rule(self, logical_router_id, nat_rule_id, **kwargs): resource = 'logical-routers/%s/nat/rules/%s' % ( logical_router_id, nat_rule_id) return self._update_resource(resource, kwargs, retry=True) def update_advertisement(self, logical_router_id, **kwargs): resource = ('logical-routers/%s/routing/advertisement' % logical_router_id) # ignore load balancing flags if lb is the not supported if (self.nsxlib and not self.nsxlib.feature_supported( nsx_constants.FEATURE_LOAD_BALANCER)): for arg in ('advertise_lb_vip', 'advertise_lb_snat_ip'): if kwargs[arg]: LOG.error("Ignoring %(arg)s for router %(rtr)s " "update_advertisement: This feature is not " "supported.", {'arg': arg, 'rtr': logical_router_id}) del kwargs[arg] return self._update_resource(resource, kwargs, retry=True) def update_advertisement_rules(self, logical_router_id, rules): resource = ('logical-routers/%s/routing/advertisement/rules' % logical_router_id) return self._update_resource(resource, {'rules': rules}, retry=True) def get_advertisement_rules(self, logical_router_id): resource = ('logical-routers/%s/routing/advertisement/rules' % logical_router_id) return self.client.get(resource) def create(self, display_name, tags, edge_cluster_uuid=None, tier_0=False, description=None): # TODO(salv-orlando): If possible do not manage edge clusters # in the main plugin logic. router_type = (nsx_constants.ROUTER_TYPE_TIER0 if tier_0 else nsx_constants.ROUTER_TYPE_TIER1) body = {'display_name': display_name, 'router_type': router_type, 'tags': tags} if edge_cluster_uuid: body['edge_cluster_id'] = edge_cluster_uuid if description: body['description'] = description return self.client.create(self.get_path(), body=body) def delete(self, lrouter_id, force=False): url = lrouter_id if force: url += '?force=%s' % force return self.client.delete(self.get_path(url)) def update(self, lrouter_id, *args, **kwargs): return self._update_with_retry(lrouter_id, kwargs) def get_firewall_section_id(self, lrouter_id, router_body=None): """Return the id of the auto created firewall section of the router If the router was already retrieved from the backend it is possible to give it as an input to avoid another backend call. """ if not router_body: router_body = self.get(lrouter_id) if 'firewall_sections' in router_body: firewall_sections = router_body['firewall_sections'] for sec in firewall_sections: if (sec.get('is_valid') and sec.get('target_type') == "FirewallSection"): return firewall_sections[0].get('target_id') def list(self, router_type=None): """List all/by type logical routers.""" if router_type: resource = '%s?router_type=%s' % (self.get_path(), router_type) else: resource = self.get_path() return self.client.list(resource) class NsxLibEdgeCluster(utils.NsxLibApiBase): @property def uri_segment(self): return 'edge-clusters' @property def resource_type(self): return 'EdgeCluster' class NsxLibTransportZone(utils.NsxLibApiBase): TRANSPORT_TYPE_VLAN = 'VLAN' TRANSPORT_TYPE_OVERLAY = 'OVERLAY' HOST_SWITCH_MODE_ENS = 'ENS' HOST_SWITCH_MODE_STANDARD = 'STANDARD' @property def uri_segment(self): return 'transport-zones' @property def resource_type(self): return 'TransportZone' @property def use_cache_for_get(self): return True def get_transport_type(self, uuid): tz = self.get(uuid) return tz['transport_type'] def get_host_switch_mode(self, uuid): tz = self.get(uuid) return tz.get('host_switch_mode', self.HOST_SWITCH_MODE_STANDARD) class NsxLibDhcpProfile(utils.NsxLibApiBase): @property def uri_segment(self): return 'dhcp/server-profiles' @property def resource_type(self): return 'DhcpProfile' class NsxLibDhcpRelayService(utils.NsxLibApiBase): @property def uri_segment(self): return 'dhcp/relays' @property def resource_type(self): return 'DhcpRelayService' @property def use_cache_for_get(self): return True def get_server_ips(self, uuid): # Return the server ips of the relay profile attached to this service service = self.get(uuid) profile_id = service.get('dhcp_relay_profile_id') if profile_id and self.nsxlib: return self.nsxlib.relay_profile.get_server_ips(profile_id) class NsxLibDhcpRelayProfile(utils.NsxLibApiBase): @property def uri_segment(self): return 'dhcp/relay-profiles' @property def resource_type(self): return 'DhcpRelayProfile' @property def use_cache_for_get(self): return True def get_server_ips(self, uuid): profile = self.get(uuid) return profile.get('server_addresses') class NsxLibMetadataProxy(utils.NsxLibApiBase): @property def uri_segment(self): return 'md-proxies' @property def resource_type(self): return 'MetadataProxy' def update(self, uuid, server_url=None, secret=None, edge_cluster_id=None): body = {} # update the relevant fields if server_url is not None: body['metadata_server_url'] = server_url if secret is not None: body['secret'] = secret if edge_cluster_id is not None: body['edge_cluster_id'] = edge_cluster_id return self._update_with_retry(uuid, body) class NsxLibBridgeCluster(utils.NsxLibApiBase): @property def uri_segment(self): return 'bridge-clusters' @property def resource_type(self): return 'BridgeCluster' class NsxLibIpBlockSubnet(utils.NsxLibApiBase): @property def uri_segment(self): return 'pools/ip-subnets' @property def resource_type(self): return 'IpBlockSubnet' def create(self, ip_block_id, subnet_size): """Create a IP block subnet on the backend.""" body = {'size': subnet_size, 'block_id': ip_block_id} return self.client.create(self.get_path(), body) def delete(self, subnet_id): """Delete a IP block subnet on the backend.""" self.client.delete(self.get_path(subnet_id)) def list(self, ip_block_id): resource = '%s?block_id=%s' % (self.get_path(), ip_block_id) return self.client.get(resource) class NsxLibIpBlock(utils.NsxLibApiBase): @property def uri_segment(self): return 'pools/ip-blocks' @property def resource_type(self): return 'IpBlock' class NsxLibFabricVirtualMachine(utils.NsxLibApiBase): @property def uri_segment(self): return 'fabric/virtual-machines' @property def resource_type(self): return 'VirtualMachine' def get_by_display_name(self, display_name): url = '%s?display_name=%s' % (self.get_path(), display_name) return self.client.get(url) class NsxLibFabricVirtualInterface(utils.NsxLibApiBase): @property def uri_segment(self): return 'fabric/vifs' @property def resource_type(self): return 'VirtualNetworkInterface' def get_by_owner_vm_id(self, owner_vm_id): url = '%s?owner_vm_id=%s' % (self.get_path(), owner_vm_id) return self.client.get(url) vmware-nsxlib-12.0.1/vmware_nsxlib/v3/trust_management.py0000666000175100017510000001071613244535763023577 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils BASE_SECTION = 'trust-management' CERT_SECTION = BASE_SECTION + '/certificates' ID_SECTION = BASE_SECTION + '/principal-identities' USER_GROUP_TYPES = [ 'read_only_api_users', 'read_write_api_users', 'superusers'] class NsxLibTrustManagement(utils.NsxLibApiBase): def create_cert(self, cert_pem, private_key=None, passphrase=None, tags=None): resource = CERT_SECTION + '?action=import' body = {'pem_encoded': cert_pem} if private_key: body.update( {'private_key': private_key}) if passphrase: body.update({'passphrase': passphrase}) if tags: body.update({'tags': tags}) results = self.client.create(resource, body)['results'] if len(results) > 0: # should be only one result return results[0]['id'] def get_cert(self, cert_id): resource = CERT_SECTION + '/' + cert_id return self.client.get(resource) def get_certs(self): return self.client.get(CERT_SECTION)['results'] def delete_cert(self, cert_id): resource = CERT_SECTION + '/' + cert_id self.client.delete(resource) def find_cert_with_pem(self, cert_pem): # Find certificate with cert_pem certs = self.get_certs() cert_ids = [cert['id'] for cert in certs if cert['pem_encoded'] == cert_pem] return cert_ids def create_identity(self, name, cert_id, node_id, permission_group): # Validate permission group before sending to server if permission_group not in USER_GROUP_TYPES: raise nsxlib_exc.InvalidInput( operation='create_identity', arg_val=permission_group, arg_name='permission_group') body = {'name': name, 'certificate_id': cert_id, 'node_id': node_id, 'permission_group': permission_group, 'is_protected': True} self.client.create(ID_SECTION, body) def get_identities(self, name): ids = self.client.get(ID_SECTION)['results'] return [identity for identity in ids if identity['name'] == name] def delete_identity(self, identity_id): resource = ID_SECTION + '/' + identity_id self.client.delete(resource) def find_cert_and_identity(self, name, cert_pem): nsx_style_pem = cert_pem certs = self.get_certs() cert_ids = [cert['id'] for cert in certs if cert['pem_encoded'] == nsx_style_pem.decode('ascii')] if not cert_ids: raise nsxlib_exc.ResourceNotFound( manager=self.client.nsx_api_managers, operation="find_certificate") identities = self.get_identities(name) # should be zero or one matching identities results = [identity for identity in identities if identity['certificate_id'] in cert_ids] if not results: raise nsxlib_exc.ResourceNotFound( manager=self.client.nsx_api_managers, operation="delete_identity") return results[0]['certificate_id'], results[0]['id'] def delete_cert_and_identity(self, name, cert_pem): cert_id, identity_id = self.find_cert_and_identity(name, cert_pem) self.delete_identity(identity_id) self.delete_cert(cert_id) def create_cert_and_identity(self, name, cert_pem, node_id, permission_group='read_write_api_users'): nsx_cert_id = self.create_cert(cert_pem) try: self.create_identity(name, nsx_cert_id, node_id, permission_group) except nsxlib_exc.ManagerError as e: self.delete_cert(nsx_cert_id) raise e vmware-nsxlib-12.0.1/vmware_nsxlib/v3/policy_defs.py0000666000175100017510000005333213244535763022523 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import six from vmware_nsxlib.v3 import policy_constants TENANTS_PATH_PATTERN = "%s/" DOMAINS_PATH_PATTERN = TENANTS_PATH_PATTERN + "domains/" COMM_PROF_PATH_PATTERN = TENANTS_PATH_PATTERN + "communication-profiles/" SERVICES_PATH_PATTERN = TENANTS_PATH_PATTERN + "services/" REALIZED_STATE_EF = (TENANTS_PATH_PATTERN + "realized-state/enforcement-points/%s/") REALIZED_STATE_GROUP = REALIZED_STATE_EF + "groups/nsgroups/%s-%s" REALIZED_STATE_COMM_MAP = REALIZED_STATE_EF + "firewalls/firewall-sections/%s" REALIZED_STATE_SERVICE = REALIZED_STATE_EF + "services/nsservices/services:%s" @six.add_metaclass(abc.ABCMeta) class ResourceDef(object): def __init__(self): self.tenant = None self.id = None self.name = None self.description = None self.parent_ids = None self.body = {} def get_obj_dict(self): body = {'display_name': self.name, 'description': self.description} if self.id: body['id'] = self.id return body @abc.abstractproperty def path_pattern(self): pass def get_section_path(self): return self.path_pattern % self.parent_ids def get_resource_path(self): if self.id: return self.get_section_path() + self.id return self.get_section_path() def get_resource_full_path(self): return '/' + self.get_resource_path() @property def get_last_section_dict_key(self): last_section = self.path_pattern.split("/")[-2] return last_section.replace('-', '_') @staticmethod def sub_entries_path(): pass def _get_body_from_kwargs(self, **kwargs): if 'body' in kwargs: body = kwargs['body'] else: body = {} return body def update_attributes_in_body(self, **kwargs): self.body = self._get_body_from_kwargs(**kwargs) if 'body' in kwargs: del kwargs['body'] for key, value in six.iteritems(kwargs): if key == 'body': continue if value is not None: if key == 'name': self.body['display_name'] = value else: self.body[key] = value entries_path = self.sub_entries_path() # make sure service entries are there if entries_path and entries_path not in self.body: self.body[entries_path] = [] @classmethod def get_single_entry(cls, obj_body): """Return the single sub-entry from the object body. If there are no entries, or more than 1 - return None. """ entries_path = cls.sub_entries_path() if not entries_path: # This sub class doesn't support this return if (entries_path not in obj_body or len(obj_body[entries_path]) != 1): return return obj_body[entries_path][0] class DomainDef(ResourceDef): def __init__(self, domain_id=None, name=None, description=None, tenant=policy_constants.POLICY_INFRA_TENANT): super(DomainDef, self).__init__() self.tenant = tenant self.id = domain_id self.name = name self.description = description self.parent_ids = (tenant) @property def path_pattern(self): return DOMAINS_PATH_PATTERN class Condition(object): def __init__(self, value, key=policy_constants.CONDITION_KEY_TAG, member_type=policy_constants.CONDITION_MEMBER_PORT, operator=policy_constants.CONDITION_OP_EQUALS): self.value = value self.key = key self.member_type = member_type self.operator = operator def get_obj_dict(self): return {'resource_type': 'Condition', 'member_type': self.member_type, 'key': self.key, 'value': self.value, 'operator': self.operator} class GroupDef(ResourceDef): def __init__(self, domain_id=None, group_id=None, name=None, description=None, conditions=None, tenant=policy_constants.POLICY_INFRA_TENANT): super(GroupDef, self).__init__() self.tenant = tenant self.id = group_id self.name = name self.description = description self.domain_id = domain_id self.parent_ids = (tenant, domain_id) if conditions and isinstance(conditions, Condition): self.conditions = [conditions] else: self.conditions = conditions @property def path_pattern(self): return DOMAINS_PATH_PATTERN + "%s/groups/" def get_obj_dict(self): body = super(GroupDef, self).get_obj_dict() if self.conditions: body['expression'] = [condition.get_obj_dict() for condition in self.conditions] return body def update_attributes_in_body(self, **kwargs): body = self._get_body_from_kwargs(**kwargs) if 'body' in kwargs: del kwargs['body'] # Fix params that need special conversions if kwargs.get('conditions') is not None: body['expression'] = [cond.get_obj_dict() for cond in kwargs['conditions']] del kwargs['conditions'] super(GroupDef, self).update_attributes_in_body(body=body, **kwargs) def get_realized_state_path(self, ep_id): return REALIZED_STATE_GROUP % (self.tenant, ep_id, self.domain_id, self.id) class ServiceDef(ResourceDef): def __init__(self, service_id=None, name=None, description=None, tenant=policy_constants.POLICY_INFRA_TENANT): super(ServiceDef, self).__init__() self.tenant = tenant self.id = service_id self.name = name self.description = description self.parent_ids = (tenant) self.service_entries = [] @property def path_pattern(self): return SERVICES_PATH_PATTERN def get_obj_dict(self): body = super(ServiceDef, self).get_obj_dict() body['service_entries'] = [entry.get_obj_dict() for entry in self.service_entries] return body @staticmethod def sub_entries_path(): return ServiceEntryDef().get_last_section_dict_key def get_realized_state_path(self, ep_id): return REALIZED_STATE_SERVICE % (self.tenant, ep_id, self.id) class ServiceEntryDef(ResourceDef): def __init__(self): super(ServiceEntryDef, self).__init__() @property def path_pattern(self): return SERVICES_PATH_PATTERN + "%s/service-entries/" class L4ServiceEntryDef(ServiceEntryDef): def __init__(self, service_id=None, service_entry_id=None, name=None, description=None, protocol=policy_constants.TCP, dest_ports=None, tenant=policy_constants.POLICY_INFRA_TENANT): super(L4ServiceEntryDef, self).__init__() self.tenant = tenant self.id = service_entry_id self.name = name self.description = description self.protocol = protocol.upper() self.dest_ports = dest_ports self.parent_ids = (tenant, service_id) def get_obj_dict(self): body = super(L4ServiceEntryDef, self).get_obj_dict() body['resource_type'] = 'L4PortSetServiceEntry' body['l4_protocol'] = self.protocol body['destination_ports'] = self.dest_ports return body def update_attributes_in_body(self, **kwargs): # Fix params that need special conversions body = self._get_body_from_kwargs(**kwargs) if 'body' in kwargs: del kwargs['body'] if kwargs.get('protocol') is not None: body['l4_protocol'] = kwargs['protocol'].upper() del kwargs['protocol'] if kwargs.get('dest_ports') is not None: body['destination_ports'] = kwargs['dest_ports'] del kwargs['dest_ports'] super(L4ServiceEntryDef, self).update_attributes_in_body( body=body, **kwargs) class IcmpServiceEntryDef(ServiceEntryDef): def __init__(self, service_id=None, service_entry_id=None, name=None, description=None, version=4, icmp_type=None, icmp_code=None, tenant=policy_constants.POLICY_INFRA_TENANT): super(IcmpServiceEntryDef, self).__init__() self.tenant = tenant self.id = service_entry_id self.name = name self.description = description self.version = version self.icmp_type = icmp_type self.icmp_code = icmp_code self.parent_ids = (tenant, service_id) def get_obj_dict(self): body = super(IcmpServiceEntryDef, self).get_obj_dict() body['resource_type'] = 'ICMPTypeServiceEntry' body['protocol'] = 'ICMPv' + str(self.version) if self.icmp_type: body['icmp_type'] = self.icmp_type if self.icmp_code: body['icmp_code'] = self.icmp_code return body def update_attributes_in_body(self, **kwargs): # Fix params that need special conversions body = self._get_body_from_kwargs(**kwargs) if 'body' in kwargs: del kwargs['body'] if kwargs.get('version') is not None: body['protocol'] = 'ICMPv' + str(kwargs.get('version')) del kwargs['version'] super(IcmpServiceEntryDef, self).update_attributes_in_body( body=body, **kwargs) class CommunicationProfileDef(ResourceDef): def __init__(self, profile_id=None, name=None, description=None, tenant=policy_constants.POLICY_INFRA_TENANT): super(CommunicationProfileDef, self).__init__() self.tenant = tenant self.id = profile_id self.name = name self.description = description self.parent_ids = (tenant) @property def path_pattern(self): return COMM_PROF_PATH_PATTERN def get_obj_dict(self): body = super(CommunicationProfileDef, self).get_obj_dict() body['communication_profile_entries'] = [] return body @staticmethod def sub_entries_path(): entryDef = CommunicationProfileEntryDef() return entryDef.get_last_section_dict_key def update_attributes_in_body(self, **kwargs): super(CommunicationProfileDef, self).update_attributes_in_body( **kwargs) # make sure entries are there entries_path = self.sub_entries_path() if entries_path not in self.body: self.body[entries_path] = [] class CommunicationProfileEntryDef(ResourceDef): def __init__(self, profile_id=None, profile_entry_id=None, name=None, description=None, services=None, action=policy_constants.ACTION_ALLOW, tenant=policy_constants.POLICY_INFRA_TENANT): super(CommunicationProfileEntryDef, self).__init__() self.tenant = tenant self.id = profile_entry_id self.name = name self.description = description self.services = services self.action = action.upper() self.parent_ids = (tenant, profile_id) @property def path_pattern(self): return COMM_PROF_PATH_PATTERN + "%s/communication-profile-entries/" def get_obj_dict(self): body = super(CommunicationProfileEntryDef, self).get_obj_dict() body['services'] = self.services body['action'] = self.action return body def update_attributes_in_body(self, **kwargs): body = self._get_body_from_kwargs(**kwargs) if 'body' in kwargs: del kwargs['body'] if kwargs.get('action') is not None: body['action'] = kwargs['action'].upper() del kwargs['action'] super(CommunicationProfileEntryDef, self).update_attributes_in_body( body=body, **kwargs) class CommunicationMapDef(ResourceDef): def __init__(self, domain_id=None, tenant=policy_constants.POLICY_INFRA_TENANT): super(CommunicationMapDef, self).__init__() self.tenant = tenant self.domain_id = domain_id self.parent_ids = (tenant, domain_id) @property def path_pattern(self): return (DOMAINS_PATH_PATTERN + "%s/communication-map/") def get_realized_state_path(self, ep_id): return REALIZED_STATE_COMM_MAP % (self.tenant, ep_id, self.domain_id) class CommunicationMapEntryDef(ResourceDef): def __init__(self, domain_id=None, map_id=None, sequence_number=None, source_groups=None, dest_groups=None, profile_id=None, name=None, description=None, tenant=policy_constants.POLICY_INFRA_TENANT): super(CommunicationMapEntryDef, self).__init__() self.tenant = tenant self.domain_id = domain_id self.id = map_id self.name = name self.description = description self.sequence_number = sequence_number self.source_groups = self.get_groups_path(domain_id, source_groups) self.dest_groups = self.get_groups_path(domain_id, dest_groups) self.profile_path = self.get_profile_path( profile_id) if profile_id else None self.parent_ids = (tenant, domain_id) # convert groups and communication profile to full path def get_groups_path(self, domain_id, group_ids): if not group_ids: return [policy_constants.ANY_GROUP] return [GroupDef(domain_id, group_id, tenant=self.tenant).get_resource_full_path() for group_id in group_ids] def get_profile_path(self, profile_id): return CommunicationProfileDef( profile_id, tenant=self.tenant).get_resource_full_path() @property def path_pattern(self): return (DOMAINS_PATH_PATTERN + "%s/communication-map/communication-entries/") def get_obj_dict(self): body = super(CommunicationMapEntryDef, self).get_obj_dict() body['source_groups'] = self.source_groups body['destination_groups'] = self.dest_groups body['sequence_number'] = self.sequence_number body['communication_profile_path'] = self.profile_path return body def update_attributes_in_body(self, **kwargs): body = self._get_body_from_kwargs(**kwargs) if 'body' in kwargs: del kwargs['body'] # Fix params that need special conversions if kwargs.get('profile_id') is not None: profile_path = self.get_profile_path(kwargs['profile_id']) body['communication_profile_path'] = profile_path del kwargs['profile_id'] if kwargs.get('dest_groups') is not None: groups = self.get_groups_path( self.domain_id, kwargs['dest_groups']) body['destination_groups'] = groups del kwargs['dest_groups'] if kwargs.get('source_groups') is not None: groups = self.get_groups_path( self.domain_id, kwargs['source_groups']) body['source_groups'] = groups del kwargs['source_groups'] super(CommunicationMapEntryDef, self).update_attributes_in_body( body=body, **kwargs) # Currently supports only NSXT class EnforcementPointDef(ResourceDef): def __init__(self, ep_id=None, name=None, description=None, ip_address=None, username=None, password=None, thumbprint=None, tenant=policy_constants.POLICY_INFRA_TENANT): super(EnforcementPointDef, self).__init__() self.id = ep_id self.name = name self.description = description self.tenant = tenant self.username = username self.password = password self.ip_address = ip_address self.thumbprint = thumbprint self.parent_ids = (tenant) @property def path_pattern(self): return (TENANTS_PATH_PATTERN + 'deployment-zones/default/enforcement-points/') def get_obj_dict(self): body = super(EnforcementPointDef, self).get_obj_dict() body['id'] = self.id body['connection_info'] = { 'thumbprint': self.thumbprint, 'username': self.username, 'password': self.password, 'enforcement_point_address': self.ip_address, 'resource_type': 'NSXTConnectionInfo'} body['resource_type'] = 'EnforcementPoint' return body def update_attributes_in_body(self, **kwargs): body = self._get_body_from_kwargs(**kwargs) if 'body' in kwargs: del kwargs['body'] # Fix params that need special conversions if body.get('connection_info'): body['connection_info'][0]['resource_type'] = 'NSXTConnectionInfo' for attr in ('username', 'password', 'ip_address', 'thumbprint'): if kwargs.get(attr) is not None: body['connection_info'][0][attr] = kwargs[attr] del kwargs[attr] super(EnforcementPointDef, self).update_attributes_in_body( body=body, **kwargs) def get_realized_state_path(self): return REALIZED_STATE_EF % (self.tenant, self.id) # Currently assumes one deployment point per id class DeploymentMapDef(ResourceDef): def __init__(self, map_id=None, name=None, description=None, domain_id=None, ep_id=None, tenant=policy_constants.POLICY_INFRA_TENANT): super(DeploymentMapDef, self).__init__() self.id = map_id self.name = name self.description = description # convert enforcement point id to path self.ep_path = EnforcementPointDef( ep_id, tenant=tenant).get_resource_full_path() if ep_id else None self.tenant = tenant self.parent_ids = (tenant, domain_id) @property def path_pattern(self): return (DOMAINS_PATH_PATTERN + '%s/domain-deployment-maps/') def get_obj_dict(self): body = super(DeploymentMapDef, self).get_obj_dict() body['id'] = self.id body['enforcement_point_path'] = self.ep_path return body def update_attributes_in_body(self, **kwargs): body = self._get_body_from_kwargs(**kwargs) if 'body' in kwargs: del kwargs['body'] # Fix params that need special conversions if kwargs.get('domain_id') is not None: domain_id = kwargs.get('domain_id') domain_path = DomainDef( domain_id, tenant=self.tenant).get_resource_full_path() body['domain_path'] = domain_path del kwargs['domain_id'] if kwargs.get('ep_id') is not None: ep_id = kwargs.get('ep_id') ep_path = EnforcementPointDef( ep_id, tenant=self.tenant).get_resource_full_path() body['enforcement_point_paths'] = [ep_path] del kwargs['ep_id'] super(DeploymentMapDef, self).update_attributes_in_body( body=body, **kwargs) class NsxPolicyApi(object): def __init__(self, client): self.client = client def create_or_update(self, resource_def): """Create or update a policy object. This api will update an existing object, or create a new one if it doesn't exist. The policy API supports POST for update too """ path = resource_def.get_resource_path() body = resource_def.body if not body: body = resource_def.get_obj_dict() return self.client.create(path, body) def create_with_parent(self, parent_def, resource_def): path = parent_def.get_resource_path() body = parent_def.get_obj_dict() if isinstance(resource_def, list): child_dict_key = resource_def[0].get_last_section_dict_key body[child_dict_key] = [r.get_obj_dict() for r in resource_def] else: child_dict_key = resource_def.get_last_section_dict_key body[child_dict_key] = [resource_def.get_obj_dict()] return self.client.create(path, body) def delete(self, resource_def): path = resource_def.get_resource_path() self.client.delete(path) def get(self, resource_def): path = resource_def.get_resource_path() return self.client.get(path) def list(self, resource_def): path = resource_def.get_section_path() return self.client.list(path) def get_by_path(self, path): return self.client.get(path) vmware-nsxlib-12.0.1/vmware_nsxlib/v3/__init__.py0000666000175100017510000003617713244535763021772 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from distutils import version from oslo_log import log import six from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import client from vmware_nsxlib.v3 import cluster from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import load_balancer from vmware_nsxlib.v3 import native_dhcp from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import policy_defs from vmware_nsxlib.v3 import policy_resources from vmware_nsxlib.v3 import resources from vmware_nsxlib.v3 import router from vmware_nsxlib.v3 import security from vmware_nsxlib.v3 import trust_management from vmware_nsxlib.v3 import utils from vmware_nsxlib.v3 import vpn_ipsec LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class NsxLibBase(object): def __init__(self, nsxlib_config): self.set_config(nsxlib_config) # create the Cluster self.cluster = cluster.NSXClusteredAPI(self.nsxlib_config) # create the Client self.client = client.NSX3Client( self.cluster, nsx_api_managers=self.nsxlib_config.nsx_api_managers, max_attempts=self.nsxlib_config.max_attempts, url_path_base=self.client_url_prefix, rate_limit_retry=self.nsxlib_config.rate_limit_retry) self.general_apis = utils.NsxLibApiBase( self.client, self.nsxlib_config) self.init_api() super(NsxLibBase, self).__init__() self.nsx_version = None def set_config(self, nsxlib_config): """Set config user provided and extend it according to application""" self.nsxlib_config = nsxlib_config self.nsxlib_config.extend(keepalive_section=self.keepalive_section, url_base=self.client_url_prefix) @abc.abstractproperty def client_url_prefix(self): pass @abc.abstractproperty def keepalive_section(self): pass @abc.abstractmethod def init_api(self): pass @abc.abstractmethod def feature_supported(self, feature): pass def build_v3_api_version_tag(self): return self.general_apis.build_v3_api_version_tag() def is_internal_resource(self, nsx_resource): return self.general_apis.is_internal_resource(nsx_resource) def build_v3_tags_payload(self, resource, resource_type, project_name): return self.general_apis.build_v3_tags_payload( resource, resource_type, project_name) def reinitialize_cluster(self, resource, event, trigger, payload=None): self.cluster._reinit_cluster() def subscribe(self, callback, event): self.cluster.subscribe(callback, event) # TODO(abhiraut): Revisit this method to generate complex boolean # queries to search resources. def search_by_tags(self, tags, resource_type=None, cursor=None, page_size=None): """Return the list of resources searched based on tags. Currently the query only supports AND boolean operator. :param tags: List of dictionaries containing tags. Each NSX tag dictionary is of the form: {'scope': , 'tag': } :param resource_type: Optional string parameter to limit the scope of the search to the given ResourceType. :param cursor: Opaque cursor to be used for getting next page of records (supplied by current result page). :param page_size: Maximum number of results to return in this page. """ if not tags: reason = _("Missing required argument 'tags'") raise exceptions.NsxSearchInvalidQuery(reason=reason) # Query will return nothing if the same scope is repeated. query_tags = self._build_query(tags) query = 'resource_type:%s' % resource_type if resource_type else None if query: query += " AND %s" % query_tags else: query = query_tags url = "search?query=%s" % query if cursor: url += "&cursor=%d" % cursor if page_size: url += "&page_size=%d" % page_size return self.client.url_get(url) def search_all_by_tags(self, tags, resource_type=None): """Return all the results searched based on tags.""" results = [] cursor = 0 while True: response = self.search_by_tags(resource_type=resource_type, tags=tags, cursor=cursor) if not response['results']: return results results.extend(response['results']) cursor = int(response['cursor']) result_count = int(response['result_count']) if cursor >= result_count: return results def get_id_by_resource_and_tag(self, resource_type, scope, tag, alert_not_found=False, alert_multiple=False): """Search a resource type by 1 scope&tag. Return the id of the result only if it is single. """ query_tags = [{'scope': utils.escape_tag_data(scope), 'tag': utils.escape_tag_data(tag)}] query_result = self.search_by_tags( tags=query_tags, resource_type=resource_type) if not query_result['result_count']: if alert_not_found: msg = _("No %(type)s found for tag '%(scope)s:%(tag)s'") % { 'type': resource_type, 'scope': scope, 'tag': tag} LOG.warning(msg) raise exceptions.ResourceNotFound( manager=self.nsxlib_config.nsx_api_managers, operation=msg) elif query_result['result_count'] == 1: return query_result['results'][0]['id'] else: # multiple results if alert_multiple: msg = _("Multiple %(type)s found for tag '%(scope)s:" "%(tag)s'") % { 'type': resource_type, 'scope': scope, 'tag': tag} LOG.warning(msg) raise exceptions.ManagerError( manager=self.nsxlib_config.nsx_api_managers, operation=msg, details='') def _build_tag_query(self, tag): # Validate that the correct keys are used if set(tag.keys()) - set(('scope', 'tag')): reason = _("Only 'scope' and 'tag' keys are supported") raise exceptions.NsxSearchInvalidQuery(reason=reason) _scope = tag.get('scope') _tag = tag.get('tag') if _scope and _tag: return 'tags.scope:%s AND tags.tag:%s' % (_scope, _tag) elif _scope: return 'tags.scope:%s' % _scope else: return 'tags.tag:%s' % _tag def _build_query(self, tags): return " AND ".join([self._build_tag_query(item) for item in tags]) def get_tag_limits(self): try: result = self.client.url_get('spec/vmware/types/Tag') scope_length = result['properties']['scope']['maxLength'] tag_length = result['properties']['tag']['maxLength'] except Exception as e: LOG.error("Unable to read tag limits. Reason: %s", e) scope_length = utils.MAX_RESOURCE_TYPE_LEN tag_length = utils.MAX_TAG_LEN try: result = self.client.url_get('spec/vmware/types/ManagedResource') max_tags = result['properties']['tags']['maxItems'] except Exception as e: LOG.error("Unable to read maximum tags. Reason: %s", e) max_tags = utils.MAX_TAGS return utils.TagLimits(scope_length, tag_length, max_tags) class NsxLib(NsxLibBase): def init_api(self): self.port_mirror = core_resources.NsxLibPortMirror( self.client, self.nsxlib_config, nsxlib=self) self.bridge_endpoint = core_resources.NsxLibBridgeEndpoint( self.client, self.nsxlib_config, nsxlib=self) self.logical_switch = core_resources.NsxLibLogicalSwitch( self.client, self.nsxlib_config, nsxlib=self) self.logical_router = core_resources.NsxLibLogicalRouter( self.client, self.nsxlib_config, nsxlib=self) self.switching_profile = core_resources.NsxLibSwitchingProfile( self.client, self.nsxlib_config, nsxlib=self) self.qos_switching_profile = core_resources.NsxLibQosSwitchingProfile( self.client, self.nsxlib_config, nsxlib=self) self.edge_cluster = core_resources.NsxLibEdgeCluster( self.client, self.nsxlib_config, nsxlib=self) self.bridge_cluster = core_resources.NsxLibBridgeCluster( self.client, self.nsxlib_config, nsxlib=self) self.transport_zone = core_resources.NsxLibTransportZone( self.client, self.nsxlib_config, nsxlib=self) self.relay_service = core_resources.NsxLibDhcpRelayService( self.client, self.nsxlib_config, nsxlib=self) self.relay_profile = core_resources.NsxLibDhcpRelayProfile( self.client, self.nsxlib_config, nsxlib=self) self.native_dhcp_profile = core_resources.NsxLibDhcpProfile( self.client, self.nsxlib_config, nsxlib=self) self.native_md_proxy = core_resources.NsxLibMetadataProxy( self.client, self.nsxlib_config, nsxlib=self) self.firewall_section = security.NsxLibFirewallSection( self.client, self.nsxlib_config, nsxlib=self) self.ns_group = security.NsxLibNsGroup( self.client, self.nsxlib_config, self.firewall_section, nsxlib=self) self.native_dhcp = native_dhcp.NsxLibNativeDhcp( self.client, self.nsxlib_config, nsxlib=self) self.ip_block_subnet = core_resources.NsxLibIpBlockSubnet( self.client, self.nsxlib_config, nsxlib=self) self.ip_block = core_resources.NsxLibIpBlock( self.client, self.nsxlib_config, nsxlib=self) self.ip_set = security.NsxLibIPSet( self.client, self.nsxlib_config, nsxlib=self) self.logical_port = resources.LogicalPort( self.client, self.nsxlib_config, nsxlib=self) self.logical_router_port = resources.LogicalRouterPort( self.client, self.nsxlib_config, nsxlib=self) self.dhcp_server = resources.LogicalDhcpServer( self.client, self.nsxlib_config, nsxlib=self) self.ip_pool = resources.IpPool( self.client, self.nsxlib_config, nsxlib=self) self.load_balancer = load_balancer.LoadBalancer( self.client, self.nsxlib_config) self.trust_management = trust_management.NsxLibTrustManagement( self.client, self.nsxlib_config) self.router = router.RouterLib( self.logical_router, self.logical_router_port, self) self.virtual_machine = core_resources.NsxLibFabricVirtualMachine( self.client, self.nsxlib_config, nsxlib=self) self.vif = core_resources.NsxLibFabricVirtualInterface( self.client, self.nsxlib_config, nsxlib=self) self.vpn_ipsec = vpn_ipsec.VpnIpSec( self.client, self.nsxlib_config, nsxlib=self) self.http_services = resources.NodeHttpServiceProperties( self.client, self.nsxlib_config, nsxlib=self) # Update tag limits self.tag_limits = self.get_tag_limits() utils.update_tag_limits(self.tag_limits) @property def keepalive_section(self): return 'transport-zones' def get_version(self): if self.nsx_version: return self.nsx_version node = self.client.get("node") self.nsx_version = node.get('node_version') return self.nsx_version def feature_supported(self, feature): if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_2_0)): # Features available since 2.2 if (feature == nsx_constants.FEATURE_VLAN_ROUTER_INTERFACE or feature == nsx_constants.FEATURE_IPSEC_VPN or feature == nsx_constants.FEATURE_ON_BEHALF_OF or feature == nsx_constants.FEATURE_RATE_LIMIT or feature == nsx_constants.FEATURE_TRUNK_VLAN): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_1_0)): # Features available since 2.1 if (feature == nsx_constants.FEATURE_LOAD_BALANCER): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_0_0)): # Features available since 2.0 if (feature == nsx_constants.FEATURE_EXCLUDE_PORT_BY_TAG or feature == nsx_constants.FEATURE_ROUTER_FIREWALL or feature == nsx_constants.FEATURE_DHCP_RELAY): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_1_1_0)): # Features available since 1.1 if (feature == nsx_constants.FEATURE_MAC_LEARNING or feature == nsx_constants.FEATURE_DYNAMIC_CRITERIA): return True return False @property def client_url_prefix(self): return client.NSX3Client.NSX_V1_API_PREFIX class NsxPolicyLib(NsxLibBase): def init_api(self): self.policy_api = policy_defs.NsxPolicyApi(self.client) self.domain = policy_resources.NsxPolicyDomainApi(self.policy_api) self.group = policy_resources.NsxPolicyGroupApi(self.policy_api) self.service = policy_resources.NsxPolicyL4ServiceApi(self.policy_api) self.icmp_service = policy_resources.NsxPolicyIcmpServiceApi( self.policy_api) self.comm_profile = policy_resources.NsxPolicyCommunicationProfileApi( self.policy_api) self.comm_map = policy_resources.NsxPolicyCommunicationMapApi( self.policy_api) self.enforcement_point = policy_resources.NsxPolicyEnforcementPointApi( self.policy_api) self.deployment_map = policy_resources.NsxPolicyDeploymentMapApi( self.policy_api) @property def keepalive_section(self): return 'infra' def feature_supported(self, feature): return (feature == nsx_constants.FEATURE_NSX_POLICY) @property def client_url_prefix(self): return client.NSX3Client.NSX_POLICY_V1_API_PREFIX vmware-nsxlib-12.0.1/vmware_nsxlib/v3/router.py0000666000175100017510000002452713244535763021547 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NSX-V3 Plugin router module """ import copy from oslo_log import log from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) MIN_EDGE_NODE_NUM = 1 TIER0_ROUTER_LINK_PORT_NAME = "TIER0-RouterLinkPort" TIER1_ROUTER_LINK_PORT_NAME = "TIER1-RouterLinkPort" ROUTER_INTF_PORT_NAME = "Tier1-RouterDownLinkPort" FIP_NAT_PRI = 900 GW_NAT_PRI = 1000 class RouterLib(object): def __init__(self, router_client, router_port_client, nsxlib): self._router_client = router_client self._router_port_client = router_port_client self.nsxlib = nsxlib def validate_tier0(self, tier0_groups_dict, tier0_uuid): err_msg = None try: lrouter = self._router_client.get(tier0_uuid) except exceptions.ResourceNotFound: err_msg = (_("Tier0 router %s not found at the backend. Either a " "valid UUID must be specified or a default tier0 " "router UUID must be configured in nsx.ini") % tier0_uuid) else: edge_cluster_uuid = lrouter.get('edge_cluster_id') if not edge_cluster_uuid: err_msg = _("Failed to get edge cluster uuid from tier0 " "router %s at the backend") % lrouter else: edge_cluster = self.nsxlib.edge_cluster.get(edge_cluster_uuid) member_index_list = [member['member_index'] for member in edge_cluster['members']] if len(member_index_list) < MIN_EDGE_NODE_NUM: err_msg = _("%(act_num)s edge members found in " "edge_cluster %(cluster_id)s, however we " "require at least %(exp_num)s edge nodes " "in edge cluster for use.") % { 'act_num': len(member_index_list), 'exp_num': MIN_EDGE_NODE_NUM, 'cluster_id': edge_cluster_uuid} if err_msg: raise exceptions.NsxLibInvalidInput(error_message=err_msg) else: tier0_groups_dict[tier0_uuid] = { 'edge_cluster_uuid': edge_cluster_uuid, 'member_index_list': member_index_list} def add_router_link_port(self, tier1_uuid, tier0_uuid, tags): # Create Tier0 logical router link port t0_tags = copy.copy(tags) t0_tags = utils.add_v3_tag(t0_tags, 'os-tier0-uuid', tier0_uuid) tier0_link_port = self._router_port_client.create( tier0_uuid, display_name=TIER0_ROUTER_LINK_PORT_NAME, tags=t0_tags, resource_type=nsx_constants.LROUTERPORT_LINKONTIER0, logical_port_id=None, address_groups=None) linked_logical_port_id = tier0_link_port['id'] # Create Tier1 logical router link port t1_tags = copy.copy(tags) t1_tags = utils.add_v3_tag(t1_tags, 'os-tier1-uuid', tier1_uuid) self._router_port_client.create( tier1_uuid, display_name=TIER1_ROUTER_LINK_PORT_NAME, tags=t1_tags, resource_type=nsx_constants.LROUTERPORT_LINKONTIER1, logical_port_id=linked_logical_port_id, address_groups=None) def remove_router_link_port(self, tier1_uuid, tier0_uuid): try: tier1_link_port = ( self._router_port_client.get_tier1_link_port(tier1_uuid)) except exceptions.ResourceNotFound: LOG.warning("Logical router link port for tier1 router: %s " "not found at the backend", tier1_uuid) return tier1_link_port_id = tier1_link_port['id'] tier0_link_port_id = ( tier1_link_port['linked_logical_router_port_id'].get('target_id')) self._router_port_client.delete(tier1_link_port_id) self._router_port_client.delete(tier0_link_port_id) def update_advertisement(self, logical_router_id, advertise_route_nat, advertise_route_connected, advertise_route_static=False, enabled=True, advertise_lb_vip=False, advertise_lb_snat_ip=False): return self.nsxlib.logical_router.update_advertisement( logical_router_id, advertise_nat_routes=advertise_route_nat, advertise_nsx_connected_routes=advertise_route_connected, advertise_static_routes=advertise_route_static, enabled=enabled, advertise_lb_vip=advertise_lb_vip, advertise_lb_snat_ip=advertise_lb_snat_ip) def delete_gw_snat_rule(self, logical_router_id, gw_ip): """Delete router snat rule matching the gw ip assuming there is only one """ return self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, translated_network=gw_ip) def delete_gw_snat_rule_by_source(self, logical_router_id, gw_ip, source_net, skip_not_found=False): """Delete router snat rule matching the gw ip & source""" return self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, translated_network=gw_ip, match_source_network=source_net, # Do not fail or warn if not found, unless asked for skip_not_found=skip_not_found, strict_mode=(not skip_not_found)) def delete_gw_snat_rules(self, logical_router_id, gw_ip): """Delete all the snat rules on the router with a specific gw ip""" return self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, translated_network=gw_ip, # Do not fail or warn if not found skip_not_found=True, strict_mode=False) def add_gw_snat_rule(self, logical_router_id, gw_ip, source_net=None, bypass_firewall=True): return self.nsxlib.logical_router.add_nat_rule( logical_router_id, action="SNAT", translated_network=gw_ip, source_net=source_net, rule_priority=GW_NAT_PRI, bypass_firewall=bypass_firewall) def update_router_edge_cluster(self, nsx_router_id, edge_cluster_uuid): return self._router_client.update(nsx_router_id, edge_cluster_id=edge_cluster_uuid) def create_logical_router_intf_port_by_ls_id(self, logical_router_id, display_name, tags, ls_id, logical_switch_port_id, address_groups, urpf_mode=None, relay_service_uuid=None, resource_type=None): try: port = self._router_port_client.get_by_lswitch_id(ls_id) except exceptions.ResourceNotFound: if resource_type is None: resource_type = nsx_constants.LROUTERPORT_DOWNLINK return self._router_port_client.create( logical_router_id, display_name, tags, resource_type, logical_switch_port_id, address_groups, urpf_mode=urpf_mode, relay_service_uuid=relay_service_uuid) else: return self._router_port_client.update( port['id'], subnets=address_groups, relay_service_uuid=relay_service_uuid) def add_fip_nat_rules(self, logical_router_id, ext_ip, int_ip, match_ports=None, bypass_firewall=True): self.nsxlib.logical_router.add_nat_rule( logical_router_id, action="SNAT", translated_network=ext_ip, source_net=int_ip, rule_priority=FIP_NAT_PRI, bypass_firewall=bypass_firewall) self.nsxlib.logical_router.add_nat_rule( logical_router_id, action="DNAT", translated_network=int_ip, dest_net=ext_ip, rule_priority=FIP_NAT_PRI, match_ports=match_ports, bypass_firewall=bypass_firewall) def delete_fip_nat_rules_by_internal_ip(self, logical_router_id, int_ip): self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, action="SNAT", match_source_network=int_ip) self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, action="DNAT", translated_network=int_ip) def delete_fip_nat_rules(self, logical_router_id, ext_ip, int_ip): self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, action="SNAT", translated_network=ext_ip, match_source_network=int_ip) self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, action="DNAT", translated_network=int_ip, match_destination_network=ext_ip) def add_static_routes(self, nsx_router_id, route): return self.nsxlib.logical_router.add_static_route( nsx_router_id, route['destination'], route['nexthop']) def delete_static_routes(self, nsx_router_id, route): return self.nsxlib.logical_router.delete_static_route_by_values( nsx_router_id, dest_cidr=route['destination'], nexthop=route['nexthop']) vmware-nsxlib-12.0.1/vmware_nsxlib/v3/policy_constants.py0000666000175100017510000000202213244535763023604 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TCP = 'TCP' UDP = 'UDP' POLICY_INFRA_TENANT = 'infra' ACTION_ALLOW = 'ALLOW' ACTION_DENY = 'DROP' ANY_GROUP = 'ANY' CONDITION_KEY_TAG = 'Tag' CONDITION_MEMBER_VM = 'VirtualMachine' CONDITION_MEMBER_PORT = 'LogicalPort' CONDITION_OP_EQUALS = 'EQUALS' CONDITION_OP_CONTAINS = 'CONTAINS' CONDITION_OP_STARTS_WITH = 'STARTSWITH' DEFAULT_THUMBPRINT = 'abc' STATE_REALIZED = 'REALIZED' STATE_UNREALIZED = 'UNREALIZED' vmware-nsxlib-12.0.1/vmware_nsxlib/v3/resources.py0000666000175100017510000006453113244535763022240 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import netaddr from oslo_log import log from oslo_log import versionutils import requests from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) # TODO(asarfaty): keeping this for backwards compatibility. # core_resources.SwitchingProfileTypeId and # core_resources.PacketAddressClassifier should be used. # This code will be removed in the future. SwitchingProfileTypeId = core_resources.SwitchingProfileTypeId PacketAddressClassifier = core_resources.PacketAddressClassifier class SwitchingProfileTypes(core_resources.SwitchingProfileTypes): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self): versionutils.report_deprecated_feature( LOG, 'resources.SwitchingProfileTypes is deprecated. ' 'Please use core_resources.SwitchingProfileTypes instead.') class WhiteListAddressTypes(core_resources.WhiteListAddressTypes): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self): versionutils.report_deprecated_feature( LOG, 'resources.WhiteListAddressTypes is deprecated. ' 'Please use core_resources.WhiteListAddressTypes instead.') class SwitchingProfile(core_resources.NsxLibSwitchingProfile): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self, rest_client, *args, **kwargs): versionutils.report_deprecated_feature( LOG, 'resources.SwitchingProfile is deprecated. ' 'Please use core_resources.NsxLibSwitchingProfile instead.') super(SwitchingProfile, self).__init__(rest_client) class LogicalPort(utils.NsxLibApiBase): @property def uri_segment(self): return 'logical-ports' @property def resource_type(self): return 'LogicalPort' def _build_body_attrs( self, display_name=None, admin_state=True, tags=None, address_bindings=None, switch_profile_ids=None, attachment=None, description=None): tags = tags or [] switch_profile_ids = switch_profile_ids or [] body = {} if tags: body['tags'] = tags if display_name is not None: body['display_name'] = display_name if admin_state is not None: if admin_state: body['admin_state'] = nsx_constants.ADMIN_STATE_UP else: body['admin_state'] = nsx_constants.ADMIN_STATE_DOWN if address_bindings: bindings = [] for binding in address_bindings: address_classifier = { 'ip_address': binding.ip_address, 'mac_address': binding.mac_address } if binding.vlan is not None: address_classifier['vlan'] = int(binding.vlan) bindings.append(address_classifier) body['address_bindings'] = bindings elif address_bindings is not None: body['address_bindings'] = [] if switch_profile_ids: profiles = [] for profile in switch_profile_ids: profiles.append({ 'value': profile.profile_id, 'key': profile.profile_type }) body['switching_profile_ids'] = profiles # Note that attachment could be None, meaning reset it. if attachment is not False: body['attachment'] = attachment if description is not None: body['description'] = description return body def _prepare_attachment(self, attachment_type, vif_uuid, allocate_addresses, vif_type, parent_vif_id, traffic_tag, app_id): if attachment_type and vif_uuid: attachment = {'attachment_type': attachment_type, 'id': vif_uuid} if vif_type: context = {'resource_type': nsx_constants.VIF_RESOURCE_TYPE, 'allocate_addresses': allocate_addresses, 'vif_type': vif_type} if parent_vif_id: context['parent_vif_id'] = parent_vif_id context['traffic_tag'] = traffic_tag context['app_id'] = app_id attachment['context'] = context return attachment elif attachment_type is None and vif_uuid is None: return None # reset attachment else: return False # no attachment change def create(self, lswitch_id, vif_uuid, tags=None, attachment_type=nsx_constants.ATTACHMENT_VIF, admin_state=True, name=None, address_bindings=None, parent_vif_id=None, traffic_tag=None, switch_profile_ids=None, vif_type=None, app_id=None, allocate_addresses=nsx_constants.ALLOCATE_ADDRESS_NONE, description=None): tags = tags or [] body = {'logical_switch_id': lswitch_id} # NOTE(arosen): If parent_vif_id is specified we need to use # CIF attachment type. attachment = self._prepare_attachment(attachment_type, vif_uuid, allocate_addresses, vif_type, parent_vif_id, traffic_tag, app_id) body.update(self._build_body_attrs( display_name=name, admin_state=admin_state, tags=tags, address_bindings=address_bindings, switch_profile_ids=switch_profile_ids, attachment=attachment, description=description)) return self.client.create(self.get_path(), body=body) def delete(self, lport_id): self._delete_with_retry('%s?detach=true' % lport_id) def update(self, lport_id, vif_uuid, name=None, admin_state=None, address_bindings=None, switch_profile_ids=None, tags_update=None, attachment_type=nsx_constants.ATTACHMENT_VIF, parent_vif_id=None, traffic_tag=None, vif_type=None, app_id=None, allocate_addresses=nsx_constants.ALLOCATE_ADDRESS_NONE, description=None): attachment = self._prepare_attachment(attachment_type, vif_uuid, allocate_addresses, vif_type, parent_vif_id, traffic_tag, app_id) lport = {} if tags_update is not None: lport['tags_update'] = tags_update lport.update(self._build_body_attrs( display_name=name, admin_state=admin_state, address_bindings=address_bindings, switch_profile_ids=switch_profile_ids, attachment=attachment, description=description)) return self._update_resource( self.get_path(lport_id), lport, retry=True) def get_by_attachment(self, attachment_type, attachment_id): """Return all logical port matching the attachment type and Id""" url_suffix = ('?attachment_type=%s&attachment_id=%s' % (attachment_type, attachment_id)) return self.client.get(self.get_path(url_suffix)) class LogicalRouter(core_resources.NsxLibLogicalRouter): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self, rest_client, *args, **kwargs): versionutils.report_deprecated_feature( LOG, 'resources.LogicalRouter is deprecated. ' 'Please use core_resources.NsxLibLogicalRouter instead.') super(LogicalRouter, self).__init__(rest_client) class LogicalRouterPort(utils.NsxLibApiBase): @property def uri_segment(self): return 'logical-router-ports' @staticmethod def _get_relay_binding(relay_service_uuid): return {'service_id': {'target_type': 'LogicalService', 'target_id': relay_service_uuid}} def create(self, logical_router_id, display_name, tags, resource_type, logical_port_id, address_groups, edge_cluster_member_index=None, urpf_mode=None, relay_service_uuid=None): body = {'display_name': display_name, 'resource_type': resource_type, 'logical_router_id': logical_router_id, 'tags': tags or []} if address_groups: body['subnets'] = address_groups if resource_type in [nsx_constants.LROUTERPORT_UPLINK, nsx_constants.LROUTERPORT_DOWNLINK, nsx_constants.LROUTERPORT_CENTRALIZED]: body['linked_logical_switch_port_id'] = { 'target_id': logical_port_id} elif resource_type == nsx_constants.LROUTERPORT_LINKONTIER1: body['linked_logical_router_port_id'] = { 'target_id': logical_port_id} elif logical_port_id: body['linked_logical_router_port_id'] = logical_port_id if edge_cluster_member_index: body['edge_cluster_member_index'] = edge_cluster_member_index if urpf_mode: body['urpf_mode'] = urpf_mode if relay_service_uuid: if (self.nsxlib and self.nsxlib.feature_supported( nsx_constants.FEATURE_DHCP_RELAY)): body['service_bindings'] = [self._get_relay_binding( relay_service_uuid)] else: LOG.error("Ignoring relay_service_uuid for router %s port: " "This feature is not supported.", logical_router_id) return self.client.create(self.get_path(), body=body) def update(self, logical_port_id, **kwargs): logical_router_port = {} # special treatment for updating/removing the relay service if 'relay_service_uuid' in kwargs: if kwargs['relay_service_uuid']: if (self.nsxlib and self.nsxlib.feature_supported( nsx_constants.FEATURE_DHCP_RELAY)): logical_router_port['service_bindings'] = [ self._get_relay_binding( kwargs['relay_service_uuid'])] else: LOG.error("Ignoring relay_service_uuid for router " "port %s: This feature is not supported.", logical_port_id) else: # delete the current one if 'service_bindings' in logical_router_port: logical_router_port['service_bindings'] = [] del kwargs['relay_service_uuid'] for k in kwargs: logical_router_port[k] = kwargs[k] return self._update_resource( self.get_path(logical_port_id), logical_router_port, retry=True) def delete(self, logical_port_id): self._delete_with_retry(logical_port_id) def get_by_lswitch_id(self, logical_switch_id): resource = '?logical_switch_id=%s' % logical_switch_id router_ports = self.client.url_get(self.get_path(resource)) result_count = int(router_ports.get('result_count', "0")) if result_count >= 2: raise exceptions.ManagerError( details=_("Can't support more than one logical router ports " "on same logical switch %s ") % logical_switch_id) elif result_count == 1: return router_ports['results'][0] else: err_msg = (_("Logical router link port not found on logical " "switch %s") % logical_switch_id) raise exceptions.ResourceNotFound( manager=self.client.nsx_api_managers, operation=err_msg) def update_by_lswitch_id(self, logical_router_id, ls_id, **payload): port = self.get_by_lswitch_id(ls_id) return self.update(port['id'], **payload) def delete_by_lswitch_id(self, ls_id): port = self.get_by_lswitch_id(ls_id) self.delete(port['id']) def get_by_router_id(self, logical_router_id): resource = '?logical_router_id=%s' % logical_router_id logical_router_ports = self.client.url_get(self.get_path(resource)) return logical_router_ports['results'] def get_tier1_link_port(self, logical_router_id): logical_router_ports = self.get_by_router_id(logical_router_id) for port in logical_router_ports: if port['resource_type'] == nsx_constants.LROUTERPORT_LINKONTIER1: return port raise exceptions.ResourceNotFound( manager=self.client.nsx_api_managers, operation="get router link port") class MetaDataProxy(core_resources.NsxLibMetadataProxy): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self, rest_client, *args, **kwargs): versionutils.report_deprecated_feature( LOG, 'resources.MetaDataProxy is deprecated. ' 'Please use core_resources.NsxLibMetadataProxy instead.') super(MetaDataProxy, self).__init__(rest_client) class DhcpProfile(core_resources.NsxLibDhcpProfile): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self, rest_client, *args, **kwargs): versionutils.report_deprecated_feature( LOG, 'resources.DhcpProfile is deprecated. ' 'Please use core_resources.NsxLibDhcpProfile instead.') super(DhcpProfile, self).__init__(rest_client) class LogicalDhcpServer(utils.NsxLibApiBase): def get_dhcp_opt_code(self, name): _supported_options = { 'subnet-mask': 1, 'time-offset': 2, 'router': 3, 'dns-name': 6, 'host-name': 12, 'boot-file-size': 13, 'domain-name': 15, 'ip-forwarding': 19, 'interface-mtu': 26, 'broadcast-address': 28, 'arp-cache-timeout': 35, 'nis-domain': 40, 'nis-servers': 41, 'ntp-servers': 42, 'netbios-name-servers': 44, 'netbios-dd-server': 45, 'netbios-node-type': 46, 'netbios-scope': 47, 'dhcp-renewal-time': 58, 'dhcp-rebinding-time': 59, 'class-id': 60, 'dhcp-client-identifier': 61, 'nisplus-domain': 64, 'nisplus-servers': 65, 'tftp-server': 66, 'tftp-server-name': 66, 'bootfile-name': 67, 'system-architecture': 93, 'interface-id': 94, 'machine-id': 97, 'name-search': 117, 'subnet-selection': 118, 'domain-search': 119, 'classless-static-route': 121, 'tftp-server-address': 150, 'etherboot': 175, 'config-file': 209, 'path-prefix': 210, 'reboot-time': 211, } return _supported_options.get(name) @property def uri_segment(self): return 'dhcp/servers' @property def resource_type(self): return 'LogicalDhcpServer' def _construct_server(self, body, dhcp_profile_id=None, server_ip=None, name=None, dns_nameservers=None, domain_name=None, gateway_ip=False, options=None, tags=None): if name: body['display_name'] = name if dhcp_profile_id: body['dhcp_profile_id'] = dhcp_profile_id if server_ip: body['ipv4_dhcp_server']['dhcp_server_ip'] = server_ip if dns_nameservers is not None: # Note that [] is valid for dns_nameservers, means deleting it. body['ipv4_dhcp_server']['dns_nameservers'] = dns_nameservers if domain_name: body['ipv4_dhcp_server']['domain_name'] = domain_name if gateway_ip is not False: # Note that None is valid for gateway_ip, means deleting it. body['ipv4_dhcp_server']['gateway_ip'] = gateway_ip if options: body['ipv4_dhcp_server']['options'] = options if tags: body['tags'] = tags def create(self, dhcp_profile_id, server_ip, name=None, dns_nameservers=None, domain_name=None, gateway_ip=False, options=None, tags=None): body = {'ipv4_dhcp_server': {}} self._construct_server(body, dhcp_profile_id, server_ip, name, dns_nameservers, domain_name, gateway_ip, options, tags) return self.client.create(self.get_path(), body=body) def update(self, uuid, dhcp_profile_id=None, server_ip=None, name=None, dns_nameservers=None, domain_name=None, gateway_ip=False, options=None, tags=None): body = {'ipv4_dhcp_server': {}} self._construct_server(body, dhcp_profile_id, server_ip, name, dns_nameservers, domain_name, gateway_ip, options, tags) return self._update_with_retry(uuid, body) def create_binding(self, server_uuid, mac, ip, hostname=None, lease_time=None, options=None, gateway_ip=False): body = {'mac_address': mac, 'ip_address': ip} if hostname: body['host_name'] = hostname if lease_time: body['lease_time'] = lease_time if options: body['options'] = options if gateway_ip is not False: # Note that None is valid for gateway_ip, means deleting it. body['gateway_ip'] = gateway_ip url = "%s/static-bindings" % server_uuid return self.client.url_post(self.get_path(url), body) def get_binding(self, server_uuid, binding_uuid): url = "%s/static-bindings/%s" % (server_uuid, binding_uuid) return self.get(url) def update_binding(self, server_uuid, binding_uuid, **kwargs): body = {} body.update(kwargs) url = "%s/static-bindings/%s" % (server_uuid, binding_uuid) self._update_resource(self.get_path(url), body, retry=True) def delete_binding(self, server_uuid, binding_uuid): url = "%s/static-bindings/%s" % (server_uuid, binding_uuid) return self.delete(url) class IpPool(utils.NsxLibApiBase): @property def uri_segment(self): return 'pools/ip-pools' @property def resource_type(self): return 'IpPool' def _generate_ranges(self, cidr, gateway_ip): """Create list of ranges from the given cidr. Ignore the gateway_ip, if defined """ ip_set = netaddr.IPSet(netaddr.IPNetwork(cidr)) if gateway_ip: ip_set.remove(gateway_ip) return [{"start": str(r[0]), "end": str(r[-1])} for r in ip_set.iter_ipranges()] def create(self, cidr, allocation_ranges=None, display_name=None, description=None, gateway_ip=None, dns_nameservers=None, tags=None): """Create an IpPool. Arguments: cidr: (required) allocation_ranges: (optional) a list of dictionaries, each with 'start' and 'end' keys, and IP values. If None: the cidr will be used to create the ranges, excluding the gateway. display_name: (optional) description: (optional) gateway_ip: (optional) dns_nameservers: (optional) list of addresses """ if not cidr: raise exceptions.InvalidInput(operation="IP Pool create", arg_name="cidr", arg_val=cidr) if not allocation_ranges: # generate ranges from (cidr - gateway) allocation_ranges = self._generate_ranges(cidr, gateway_ip) subnet = {"allocation_ranges": allocation_ranges, "cidr": cidr} if gateway_ip: subnet["gateway_ip"] = gateway_ip if dns_nameservers: subnet["dns_nameservers"] = dns_nameservers body = {"subnets": [subnet]} if description: body["description"] = description if display_name: body["display_name"] = display_name if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) def _update_param_in_pool(self, args_dict, key, pool_data): # update the arg only if it exists in the args dictionary if key in args_dict: if args_dict[key]: pool_data[key] = args_dict[key] else: # remove the current value del pool_data[key] def update(self, pool_id, **kwargs): """Update the given attributes in the current pool configuration.""" # Get the current pool, and remove irrelevant fields pool = self.get(pool_id) for key in ["resource_type", "_create_time", "_create_user" "_last_modified_user", "_last_modified_time"]: pool.pop(key, None) # update only the attributes in kwargs self._update_param_in_pool(kwargs, 'display_name', pool) self._update_param_in_pool(kwargs, 'description', pool) self._update_param_in_pool(kwargs, 'tags', pool) self._update_param_in_pool(kwargs, 'gateway_ip', pool["subnets"][0]) self._update_param_in_pool(kwargs, 'dns_nameservers', pool["subnets"][0]) self._update_param_in_pool(kwargs, 'allocation_ranges', pool["subnets"][0]) self._update_param_in_pool(kwargs, 'cidr', pool["subnets"][0]) return self.client.update(self.get_path(pool_id), pool) def allocate(self, pool_id, ip_addr=None, display_name=None, tags=None): """Allocate an IP from a pool.""" # Note: Currently the backend does not support allocation of a # specific IP, so an exception will be raised by the backend. # Depending on the backend version, this may be allowed in the future url = "%s?action=ALLOCATE" % pool_id body = {"allocation_id": ip_addr} if tags is not None: body['tags'] = tags if display_name is not None: body['display_name'] = display_name return self.client.url_post(self.get_path(url), body=body) def release(self, pool_id, ip_addr): """Release an IP back to a pool.""" url = "%s?action=RELEASE" % pool_id body = {"allocation_id": ip_addr} return self.client.url_post(self.get_path(url), body=body) def get_allocations(self, pool_id): """Return information about the allocated IPs in the pool.""" url = "%s/allocations" % pool_id return self.client.url_get(self.get_path(url)) class NodeHttpServiceProperties(utils.NsxLibApiBase): @property def uri_segment(self): return 'node/services/http' @property def resource_type(self): return 'NodeHttpServiceProperties' def get_properties(self): return self.client.get(self.get_path()) def get_rate_limit(self): if (self.nsxlib and not self.nsxlib.feature_supported( nsx_constants.FEATURE_RATE_LIMIT)): msg = (_("Rate limit is not supported by NSX version %s") % self.nsxlib.get_version()) raise exceptions.ManagerError(details=msg) properties = self.get_properties() return properties.get('service_properties', {}).get( 'client_api_rate_limit') def update_rate_limit(self, value): """update the NSX rate limit. default value is 40. 0 means no limit""" if (self.nsxlib and not self.nsxlib.feature_supported( nsx_constants.FEATURE_RATE_LIMIT)): msg = (_("Rate limit is not supported by NSX version %s") % self.nsxlib.get_version()) raise exceptions.ManagerError(details=msg) properties = self.get_properties() if 'service_properties' in properties: properties['service_properties'][ 'client_api_rate_limit'] = int(value) # update the value using a PUT command, which is expected to return 202 expected_results = [requests.codes.accepted] self.client.update(self.uri_segment, properties, expected_results=expected_results) # restart the http service using POST, which is expected to return 202 restart_url = self.uri_segment + '?action=restart' self.client.create(restart_url, expected_results=expected_results) def delete(self, uuid): """Not supported""" msg = _("Delete is not supported for %s") % self.uri_segment raise exceptions.ManagerError(details=msg) def get(self, uuid): """Not supported""" msg = _("Get is not supported for %s") % self.uri_segment raise exceptions.ManagerError(details=msg) def list(self): """Not supported""" msg = _("List is not supported for %s") % self.uri_segment raise exceptions.ManagerError(details=msg) def find_by_display_name(self, display_name): """Not supported""" msg = _("Find is not supported for %s") % self.uri_segment raise exceptions.ManagerError(details=msg) vmware-nsxlib-12.0.1/vmware_nsxlib/v3/constants.py0000666000175100017510000000620213244535763022231 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. IPv4_ANY = '0.0.0.0/0' # Protocol names and numbers for Security Groups/Firewalls PROTO_NAME_AH = 'ah' PROTO_NAME_DCCP = 'dccp' PROTO_NAME_EGP = 'egp' PROTO_NAME_ESP = 'esp' PROTO_NAME_GRE = 'gre' PROTO_NAME_ICMP = 'icmp' PROTO_NAME_IGMP = 'igmp' PROTO_NAME_IPV6_ENCAP = 'ipv6-encap' PROTO_NAME_IPV6_FRAG = 'ipv6-frag' PROTO_NAME_IPV6_ICMP = 'ipv6-icmp' # For backward-compatibility of security group rule API, we keep the old value # for IPv6 ICMP. It should be clean up in the future. PROTO_NAME_IPV6_ICMP_LEGACY = 'icmpv6' PROTO_NAME_IPV6_NONXT = 'ipv6-nonxt' PROTO_NAME_IPV6_OPTS = 'ipv6-opts' PROTO_NAME_IPV6_ROUTE = 'ipv6-route' PROTO_NAME_OSPF = 'ospf' PROTO_NAME_PGM = 'pgm' PROTO_NAME_RSVP = 'rsvp' PROTO_NAME_SCTP = 'sctp' PROTO_NAME_TCP = 'tcp' PROTO_NAME_UDP = 'udp' PROTO_NAME_UDPLITE = 'udplite' PROTO_NAME_VRRP = 'vrrp' PROTO_NUM_AH = 51 PROTO_NUM_DCCP = 33 PROTO_NUM_EGP = 8 PROTO_NUM_ESP = 50 PROTO_NUM_GRE = 47 PROTO_NUM_ICMP = 1 PROTO_NUM_IGMP = 2 PROTO_NUM_IPV6_ENCAP = 41 PROTO_NUM_IPV6_FRAG = 44 PROTO_NUM_IPV6_ICMP = 58 PROTO_NUM_IPV6_NONXT = 59 PROTO_NUM_IPV6_OPTS = 60 PROTO_NUM_IPV6_ROUTE = 43 PROTO_NUM_OSPF = 89 PROTO_NUM_PGM = 113 PROTO_NUM_RSVP = 46 PROTO_NUM_SCTP = 132 PROTO_NUM_TCP = 6 PROTO_NUM_UDP = 17 PROTO_NUM_UDPLITE = 136 PROTO_NUM_VRRP = 112 IP_PROTOCOL_MAP = {PROTO_NAME_AH: PROTO_NUM_AH, PROTO_NAME_DCCP: PROTO_NUM_DCCP, PROTO_NAME_EGP: PROTO_NUM_EGP, PROTO_NAME_ESP: PROTO_NUM_ESP, PROTO_NAME_GRE: PROTO_NUM_GRE, PROTO_NAME_ICMP: PROTO_NUM_ICMP, PROTO_NAME_IGMP: PROTO_NUM_IGMP, PROTO_NAME_IPV6_ENCAP: PROTO_NUM_IPV6_ENCAP, PROTO_NAME_IPV6_FRAG: PROTO_NUM_IPV6_FRAG, PROTO_NAME_IPV6_ICMP: PROTO_NUM_IPV6_ICMP, # For backward-compatibility of security group rule API PROTO_NAME_IPV6_ICMP_LEGACY: PROTO_NUM_IPV6_ICMP, PROTO_NAME_IPV6_NONXT: PROTO_NUM_IPV6_NONXT, PROTO_NAME_IPV6_OPTS: PROTO_NUM_IPV6_OPTS, PROTO_NAME_IPV6_ROUTE: PROTO_NUM_IPV6_ROUTE, PROTO_NAME_OSPF: PROTO_NUM_OSPF, PROTO_NAME_PGM: PROTO_NUM_PGM, PROTO_NAME_RSVP: PROTO_NUM_RSVP, PROTO_NAME_SCTP: PROTO_NUM_SCTP, PROTO_NAME_TCP: PROTO_NUM_TCP, PROTO_NAME_UDP: PROTO_NUM_UDP, PROTO_NAME_UDPLITE: PROTO_NUM_UDPLITE, PROTO_NAME_VRRP: PROTO_NUM_VRRP} vmware-nsxlib-12.0.1/vmware_nsxlib/v3/exceptions.py0000666000175100017510000001033513244535763022400 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_utils import excutils import six from vmware_nsxlib._i18n import _ class NsxLibException(Exception): """Base NsxLib Exception. To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") def __init__(self, **kwargs): try: super(NsxLibException, self).__init__(self.message % kwargs) self.msg = self.message % kwargs except Exception: with excutils.save_and_reraise_exception() as ctxt: if not self.use_fatal_exceptions(): ctxt.reraise = False # at least get the core message out if something happened super(NsxLibException, self).__init__(self.message) if six.PY2: def __unicode__(self): return unicode(self.msg) def __str__(self): return self.msg def use_fatal_exceptions(self): return False class ObjectAlreadyExists(NsxLibException): message = _("%(object_type)s already exists") class ObjectNotGenerated(NsxLibException): message = _("%(object_type)s was not generated") class CertificateError(NsxLibException): message = _("Certificate error: %(msg)s") class NsxLibInvalidInput(NsxLibException): message = _("Invalid input for operation: %(error_message)s.") class ManagerError(NsxLibException): message = _("Unexpected error from backend manager (%(manager)s) " "for %(operation)s %(details)s") def __init__(self, **kwargs): details = kwargs.get('details', '') kwargs['details'] = ': %s' % details super(ManagerError, self).__init__(**kwargs) try: self.msg = self.message % kwargs except KeyError: self.msg = details self.error_code = kwargs.get('error_code') class ResourceNotFound(ManagerError): message = _("Resource could not be found on backend (%(manager)s) for " "%(operation)s") class BackendResourceNotFound(ResourceNotFound): message = _("%(details)s On backend (%(manager)s) with Operation: " "%(operation)s") class InvalidInput(ManagerError): message = _("%(operation)s failed: Invalid input %(arg_val)s " "for %(arg_name)s") class StaleRevision(ManagerError): pass class TooManyRequests(ManagerError): pass class ClientCertificateNotTrusted(ManagerError): message = _("Certificate not trusted") class BadXSRFToken(ManagerError): message = _("Bad or expired XSRF token") class ServiceClusterUnavailable(ManagerError): message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, " "check NSX setup and/or configuration") class NSGroupMemberNotFound(ManagerError): message = _("Could not find NSGroup %(nsgroup_id)s member %(member_id)s " "for removal.") class NSGroupIsFull(ManagerError): message = _("NSGroup %(nsgroup_id)s contains has reached its maximum " "capacity, unable to add additional members.") class NumberOfNsgroupCriteriaTagsReached(ManagerError): message = _("Port can be associated with at most %(max_num)s " "security-groups.") class SecurityGroupMaximumCapacityReached(ManagerError): message = _("Security Group %(sg_id)s has reached its maximum capacity, " "no more ports can be associated with this security-group.") class NsxSearchInvalidQuery(NsxLibException): message = _("Invalid input for NSX search query. Reason: %(reason)s") vmware-nsxlib-12.0.1/vmware_nsxlib/v3/load_balancer.py0000666000175100017510000004725213244535763022775 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class ApplicationProfileTypes(object): """LoadBalancer Application Profile types""" HTTP = "LbHttpProfile" FAST_TCP = "LbFastTcpProfile" FAST_UDP = "LbFastUdpProfile" class PersistenceProfileTypes(object): """LoadBalancer Persistence Profile types""" COOKIE = "LbCookiePersistenceProfile" SOURCE_IP = "LbSourceIpPersistenceProfile" class MonitorTypes(object): """LoadBalancer Monitor types""" HTTP = "LbHttpMonitor" HTTPS = "LbHttpsMonitor" ICMP = "LbIcmpMonitor" PASSIVE = "LbPassiveMonitor" TCP = "LbTcpMonitor" UDP = "LbUdpMonitor" class LoadBalancerBase(utils.NsxLibApiBase): resource = '' @staticmethod def _build_args(body, display_name=None, description=None, tags=None, resource_type=None, **kwargs): if display_name: body['display_name'] = display_name if description: body['description'] = description if tags: body['tags'] = tags if resource_type: body['resource_type'] = resource_type body.update(kwargs) return body def add_to_list(self, resource_id, item_id, item_key): """Add item_id to resource item_key list :param resource_id: resource id, e.g. pool_id, virtual_server_id :param item_id: item to be added to the list :param item_key: item list in the resource, e.g. rule_ids in virtual server :return: client update response """ # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + resource_id body = self.client.get(object_url) if item_key in body: item_list = body[item_key] if item_id not in item_list: item_list.append(item_id) else: LOG.error('Item %s is already in resource %s', item_id, item_key) return body else: item_list = [item_id] body[item_key] = item_list return self.client.update(object_url, body) return do_update() def remove_from_list(self, resource_id, item_id, item_key): """Remove item_id from resource item_key list :param resource_id: resource id, e.g. pool_id, virtual_server_id :param item_id: item to be removed from the list :param item_key: item list in the resource, e.g. rule_ids in virtual server :return: client update response """ # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + resource_id body = self.client.get(object_url) item_list = body.get(item_key) if item_list and item_id in item_list: item_list.remove(item_id) body[item_key] = item_list return self.client.update(object_url, body) else: ops = ('removing item %s from resource %s %s as it is not in ' 'the list', item_id, item_key, item_list) raise nsxlib_exc.ResourceNotFound( manager=self.client.nsx_api_managers, operation=ops) return do_update() def create(self, display_name=None, description=None, tags=None, resource_type=None, **kwargs): orig_body = {} body = self._build_args(orig_body, display_name, description, tags, resource_type, **kwargs) return self.client.create(self.resource, body) def list(self): return self.client.list(resource=self.resource) def get(self, object_id): object_url = self.resource + '/' + object_id return self.client.get(object_url) def update(self, object_id, display_name=None, description=None, tags=None, resource_type=None, **kwargs): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + object_id orig_body = self.client.get(object_url) body = self._build_args(orig_body, display_name, description, tags, resource_type, **kwargs) return self.client.update(object_url, body) return do_update() def delete(self, object_id): object_url = self.resource + '/' + object_id return self.client.delete(object_url) class ApplicationProfile(LoadBalancerBase): resource = 'loadbalancer/application-profiles' @staticmethod def _build_args(body, display_name=None, description=None, tags=None, resource_type=None, **kwargs): if display_name: body['display_name'] = display_name if description: body['description'] = description if tags: body['tags'] = tags if resource_type is None: return body if resource_type == ApplicationProfileTypes.HTTP: body['resource_type'] = resource_type extra_args = ['http_redirect_to', 'http_redirect_to_https', 'ntlm', 'request_header_size', 'x_forwarded_for', 'idle_timeout'] return utils.build_extra_args(body, extra_args, **kwargs) elif (resource_type == ApplicationProfileTypes.FAST_TCP or resource_type == ApplicationProfileTypes.FAST_UDP): body['resource_type'] = resource_type extra_args = ['ha_flow_mirroring_enabled', 'idle_timeout'] return utils.build_extra_args(body, extra_args, **kwargs) else: raise nsxlib_exc.InvalidInput( operation='create_application_profile', arg_val=resource_type, arg_name='resource_type') class PersistenceProfile(LoadBalancerBase): resource = 'loadbalancer/persistence-profiles' @staticmethod def _build_args(body, display_name=None, description=None, tags=None, resource_type=None, **kwargs): if display_name: body['display_name'] = display_name if description: body['description'] = description if tags: body['tags'] = tags if resource_type == PersistenceProfileTypes.COOKIE: body['resource_type'] = resource_type extra_args = ['cookie_domain', 'cookie_fallback', 'cookie_garble', 'cookie_mode', 'cookie_name', 'cookie_path', 'cookie_time'] return utils.build_extra_args(body, extra_args, **kwargs) elif resource_type == PersistenceProfileTypes.SOURCE_IP: body['resource_type'] = resource_type extra_args = ['ha_persistence_mirroring_enabled', 'purge', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) else: raise nsxlib_exc.InvalidInput( operation='create_persistence_profile', arg_val=resource_type, arg_name='resource_type') class Rule(LoadBalancerBase): resource = 'loadbalancer/rules' class ClientSslProfile(LoadBalancerBase): resource = 'loadbalancer/client-ssl-profiles' class ServerSslProfile(LoadBalancerBase): resource = 'loadbalancer/server-ssl-profiles' class Monitor(LoadBalancerBase): resource = 'loadbalancer/monitors' @staticmethod def _build_args(body, display_name=None, description=None, tags=None, resource_type=None, **kwargs): if display_name: body['display_name'] = display_name if description: body['description'] = description if tags: body['tags'] = tags if resource_type == MonitorTypes.HTTP: body['resource_type'] = resource_type extra_args = ['fall_count', 'interval', 'monitor_port', 'request_body', 'request_method', 'request_url', 'request_version', 'response_body', 'response_status', 'rise_count', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) elif resource_type == MonitorTypes.HTTPS: body['resource_type'] = resource_type extra_args = ['certificate_chain_depth', 'ciphers', 'client_certificate_id', 'fall_count', 'interval', 'monitor_port', 'protocols', 'request_body', 'request_method', 'request_url', 'request_version', 'response_body', 'response_status', 'rise_count', 'server_auth', 'server_auth_ca_ids', 'server_auth_crl_ids', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) elif resource_type == MonitorTypes.ICMP: body['resource_type'] = resource_type extra_args = ['data_length', 'fall_count', 'interval', 'monitor_port', 'rise_count', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) elif resource_type == MonitorTypes.PASSIVE: body['resource_type'] = resource_type extra_args = ['max_fails', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) elif (resource_type == MonitorTypes.TCP or resource_type == MonitorTypes.UDP): body['resource_type'] = resource_type extra_args = ['fall_count', 'interval', 'monitor_port', 'receive', 'rise_count', 'send', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) else: raise nsxlib_exc.InvalidInput( operation='create_monitor', arg_val=resource_type, arg_name='resource_type') class Pool(LoadBalancerBase): resource = 'loadbalancer/pools' def update_pool_with_members(self, pool_id, members): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + pool_id body = self.client.get(object_url) body['members'] = members return self.client.update(object_url, body) return do_update() def add_monitor_to_pool(self, pool_id, monitor_id): self.add_to_list(pool_id, monitor_id, 'active_monitor_ids') def remove_monitor_from_pool(self, pool_id, monitor_id): self.remove_from_list(pool_id, monitor_id, 'active_monitor_ids') class VirtualServer(LoadBalancerBase): resource = 'loadbalancer/virtual-servers' def update_virtual_server_with_pool(self, virtual_server_id, pool_id): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + virtual_server_id body = self.client.get(object_url) body['pool_id'] = pool_id return self.client.update(object_url, body) return do_update() def update_virtual_server_with_profiles(self, virtual_server_id, application_profile_id=None, persistence_profile_id=None, ip_protocol=None): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + virtual_server_id body = self.client.get(object_url) if application_profile_id: body['application_profile_id'] = application_profile_id if persistence_profile_id: body['persistence_profile_id'] = persistence_profile_id # In case the application profile is updated and its protocol # is updated as well, backend requires us to pass the new # protocol in the virtual server body. if ip_protocol: body['ip_protocol'] = ip_protocol return self.client.update(object_url, body) return do_update() def update_virtual_server_with_vip(self, virtual_server_id, vip): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + virtual_server_id body = self.client.get(object_url) body['ip_address'] = vip return self.client.update(object_url, body) return do_update() def add_rule(self, vs_id, rule_id): self.add_to_list(vs_id, rule_id, 'rule_ids') def remove_rule(self, vs_id, rule_id): self.remove_from_list(vs_id, rule_id, 'rule_ids') def add_client_ssl_profile_binding(self, virtual_server_id, ssl_profile_id, default_certificate_id, sni_certificate_ids=None, **kwargs): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): binding = {'ssl_profile_id': ssl_profile_id, 'default_certificate_id': default_certificate_id} if sni_certificate_ids: binding.update({'sni_certificate_ids': sni_certificate_ids}) valid_args = ['client_auth_ca_ids', 'client_auth_crl_ids', 'certificate_chain_depth', 'client_auth'] # Remove the args that is not in the valid_args list or the # keyword argument doesn't have value. for arg in kwargs: if arg in valid_args and kwargs.get(arg): binding[arg] = kwargs.get(arg) object_url = self.resource + '/' + virtual_server_id body = self.client.get(object_url) body['client_ssl_profile_binding'] = binding return self.client.update(object_url, body) return do_update() def add_server_ssl_profile_binding(self, virtual_server_id, ssl_profile_id, **kwargs): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): binding = {'ssl_profile_id': ssl_profile_id} valid_args = ['server_auth_ca_ids', 'server_auth_crl_ids', 'certificate_chain_depth', 'server_auth', 'client_certificate_id'] # Remove the args that is not in the valid_args list or the # keyword argument doesn't have value. for arg in kwargs: if arg in valid_args and kwargs.get(arg): binding[arg] = kwargs[arg] object_url = self.resource + '/' + virtual_server_id body = self.client.get(object_url) body['server_ssl_profile_binding'] = binding return self.client.update(object_url, body) return do_update() class Service(LoadBalancerBase): resource = 'loadbalancer/services' def update_service_with_virtual_servers(self, service_id, virtual_server_ids): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + service_id body = self.client.get(object_url) body['virtual_server_ids'] = virtual_server_ids return self.client.update(object_url, body) return do_update() def update_service_with_attachment(self, service_id, logical_router_id): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + service_id body = self.client.get(object_url) body['attachment'] = {'target_id': logical_router_id, 'target_type': 'LogicalRouter'} return self.client.update(object_url, body) return do_update() def add_virtual_server(self, service_id, vs_id): self.add_to_list(service_id, vs_id, 'virtual_server_ids') def remove_virtual_server(self, service_id, vs_id): self.remove_from_list(service_id, vs_id, 'virtual_server_ids') def get_router_lb_service(self, nsx_router_id): lb_services = self.list()['results'] for service in lb_services: if service.get('attachment'): if service['attachment']['target_id'] == nsx_router_id: return service def get_status(self, service_id): object_url = '%s/%s/%s' % (self.resource, service_id, 'status') return self.client.get(object_url) def get_stats(self, service_id, source='realtime'): object_url = '%s/%s/%s?source=%s' % (self.resource, service_id, 'statistics', source) return self.client.get(object_url) class LoadBalancer(object): """This is the class that have all load balancer resource clients""" def __init__(self, client, nsxlib_config=None): self.service = Service(client, nsxlib_config) self.virtual_server = VirtualServer(client, nsxlib_config) self.pool = Pool(client, nsxlib_config) self.monitor = Monitor(client, nsxlib_config) self.application_profile = ApplicationProfile(client, nsxlib_config) self.persistence_profile = PersistenceProfile(client, nsxlib_config) self.client_ssl_profile = ClientSslProfile(client, nsxlib_config) self.server_ssl_profile = ServerSslProfile(client, nsxlib_config) self.rule = Rule(client, nsxlib_config) vmware-nsxlib-12.0.1/ChangeLog0000664000175100017510000002417213244536264016206 0ustar zuulzuul00000000000000CHANGES ======= 12.0.1 ------ * VPN DPD timeout changes * VPN policy rules update * Update UPPER\_CONSTRAINTS\_FILE for stable/queens * Update .gitreview for stable/queens * Fix rate-limit attribute name * Enable router interface to have 'centralized' type * VLAN ID and trunk spec are exclusive - can only set one * Enable search\_by\_tags to use only scope or tag * Ensure that max\_attempts is set * Remove obsolete tempest-lib * Updated from global requirements * Fix VPN local endpoint structure * Fix VPN api as the NSX api changed * Updated from global requirements * Support get & update for rate limit * NSXv3: Enhance NSGroup create and update functions * Refactor security modules and retry * Raise StaleResource when a 409 is returned by NSX * NSX rate limit support * Add router advertisement rules support * initial vpn ipsec resources * Updated from global requirements * Updated from global requirements * Allow creating firewall section with empty rule * Ensure delete retry for ip set resources * Ensure update retry for load balancing resources * Add in a retry decoractor to loadbalancer updates * Mock the update tags limits code in unittests * Logical switch trunk vlan support * Support NSX tag limitations * NSXv3: Return body if resource\_type is None * Add find cert by pem data method * Updated from global requirements * Remove neutron-lib from the dependencies 11.1.4 ------ * Updated from global requirements * Updated from global requirements * Allow add\_rule(s) method to accept 'operation' as an arg * Re-apply skipped cluster test * Add in feature for 'on behalf of' * Add IPSEC VPN feature flag and update version number * use new payload objects for \*\_INIT callbacks * Fix typos in comments * Updated from global requirements 11.1.3 ------ * Provide a callback to inject headers to the NSX * Add support to retrieve VIFs and VirtualMachines * NSXv3: Update stats api * Fix failing unit test * Add ssl profile binding methods for LB virtual server * Add router to nsxlib class * Add private\_key and passphrase to cert creation 11.1.2 ------ * Cache Get results for some nsxlib resources 11.1.1 ------ * Updated from global requirements * Add supported feature for VLAN router interfaces * Update ip\_protocol during loadbalancer app profile updates * Support ENS transport zone * Updated from global requirements * Update update\_advertisement depending on NSX version * Add LB related flags for update\_route\_advertisement * Move LBaaS to 2.1 supported feature * Support DHCP Relay profile * Add feature to update NSGroup and FirewallSection tags * Refactor resources tests * Updated from global requirements * Fix client cert authentication * Fix transport zones mock for testcase * Fix request with retry code * New api: update metadata proxy server * Add log messages to retry attempts * Write and delete client cert for each request * Add FW\_INSERT\_AFTER constants to nsx\_constants * Add the nsxlib to all NsxLibApiBase resources * Handle bad or expired XSRF token * Add allow-overwrite default header by config * Create session for XSRF token * New api: list logical routers by type * Ensure retry on stale rule update or deletion * Updated from global requirements * Get transport zone type api * Updated from global requirements * Remove unused code * Allow setting the description of switches and ports * Policy: Delete service entries * Policy: Delete communication profile entries * Ensure retry on stale resources for exclude list updates * Use domain in policy deployment maps api * LBaaS: Some API changes on LB class * Revert "Write and delete client cert for each request" * Remove test requirement WebTest * Write and delete client cert for each request * LBaaS: Fix a typo in add\_to\_list method * LBaaS: Add common add\_to\_list and remove\_from\_list * Nsx Policy: support ICMP service * Nsx policy: adjust to latest backend changes * DHCP: add method to get static routes * Updated from global requirements * NSX Policy: Adjust to changes in backend API * Updated from global requirements * VMware-NSXLib:Remove Invalid Link * Add Load Balancer Application Rule * Updated from global requirements * Api for getting a resource id by the type & tag * Add DHPC relay service to router port * Add resource type to resource definitions * Add policy apis to check realized state 10.1.2 ------ * Support getting all results from search API * Use flake8-import-order plugin * Support different options for deleting NAT rules * Updated from global requirements * Adding optional source network to GW SNAT rule creation * Add constants for PAREMT and CHILD vif types * Extend QosSwitchingProfile actions * Add TODO for load balancer feature support * list & update methods for router NAT rules * LB: Add methods to add/remove monitor from pool * Add NSXv3 2.1.0 version constant * Updated from global requirements * Catch another error type for missing certificate * Support bypass-firewall param for router NAT rules * Add LB methods for neutron LBaaS * New api: Get the default rule in a firewall section * Extend client silent mode * Updated from global requirements * Adjust to cosmetic changes in policy API * Remove support for py34 * Avoid version API call for policy lib * Updated from global requirements * Updated from global requirements * Update policy resources apis * Add ellapsed time to REST response logs * Add location header to response mocks * Change log level of cert-realated SSL errors * Mask passwords while logging REST requests * Change resource type for LB resources * Adding API for checking features availabilty * Updated from global requirements * LBaaS: Add status and stats APIs * Remove \n from log messages * New API to get the firewall section of a router * Support tftp-server dhcp option * Updated from global requirements * nsxv3: Add API wrapper for Load Balancer * Updated from global requirements * Updated from global requirements * Add informative error message for 404 responses * New api to get logical port by attachment * Refactor nsxlib resources * Updated from global requirements * Correct param nsx\_api\_managers description error 10.1.1 ------ * Add IPAM error code * Add method get\_excludelist to the class NsxLibFirewallSection * NSX Policy cosmetic changes * NSX Policy resources * Updated from global requirements * NSXv3: Fix init of default firewall section * Updated from global requirements * Add support to force delete routers * NSX Policy preparations * Drop log translations 10.1.0.a1 --------- * Updated from global requirements * Fix for hacking N536 * Add list method to the qos profile * Name and ID validation may be paginated * Fix FIP DNAT rule match\_ports bug * [IPSet]: Allow updating IPSet with empty list * Add util method to retrieve complex expressions for NSGroup * Support multiple client certificate per identity * Adding Optional default dns values for native dhcp * Deprecate unused dhcp\_profile\_uuid from config * Pass node ID and user permissions when creating NSX identity * Updated from global requirements * Updated from global requirements * Fix FW rule dictionary * Replace client cert file with cert provider * Add get\_code to LogicalDhcpServer * Fix parameter args * IpPools: pass tags on create/update operations * Update interface about NSX IPAM and CIF API change * Updated from global requirements 0.7.1 ----- * Get list of IP block and IP block subnet * Add validation for client certificate subject * Mute log for endpoint connection validation * Prevent downtime when client cert is regenerated 0.7.0 ----- * Support client certificate import * [NSX Search]: Append resource\_type while limiting scope for a resource * Add support for IPSet CRUD operations * Updated from global requirements * Add in tox -s cover support 0.7.3 ----- * Add methods for firewall section and rule * Updated from global requirements * Use project-id instead of tenant -id in nsxlib * Add 'applied\_tos' arg while creating FirewallRule 0.7.2 ----- * Add support to update tags for FirewallSections * Allow passing args of type list for NSGroup and firewall rule methods * Add support to search resources based on tags or resource type * Fix logical switch name update * Add method to security module * Fix address bindings in logical port update * Allow setting QoS shaper values to 0 * Updated from global requirements * Fix bugs in certificate management exceptions * Disable uRPF check on lrp on container LS * Add match\_ports argument while adding NAT rule * Add support to create/delete ip block subnet on backend * Add IP POOL ID during port create/update 0.6.0 ----- * Basic support for client cert authentication * NSXv3: Add support for dns-integration extension * Client certificate management for NSXV3 authentication * Support ip-pool update * Support router description * Expand PEP8 tests on nsxlib * Add Constraints support * Unit tests: Allow multiple responses in mocked client 0.5.0 ----- * IpPools support * Add gateway\_ip arg for static bindings * NSXv3: Search for the default section from the end * NSXv3: Do not allow empty tag values on resources 0.4.0 ----- * NSXv3: Require target\_type when adding to firewall exclude list 0.3.0 ----- * NSXv3: Add support for firewall exclude list API * NSXv3 Client: Add paginated response * Ensure that correct exception is raised * Using assertIsNone() instead of assertEqual(None) * NSXv3: Remove duplicate method definition * Remove retry from nsgroup member update call * Fix exception handling * NSXv3: Fix string format in logging message * NSXv3: Fix a router port name update issue * Updated from global requirements * Remove vmware-nsxlib bug link * Support both egress and ingress directions on QoS profile * Add NSGroup manager tests 0.2.0 ----- * Updated from global requirements * Updated from global requirements * NSXv3: Fix allowed address pairs switching profile * Replace retrying with tenacity * NSX|V3 fix nsxlib raised error with managers * NSX|v3 replace dhcp profile and metadata proxy uuids with names * Fix nsxlib tox init to not fail on upper constraints 0.1.0 ----- * Enable release notes translation * Move all nsxlib code and tests to vmware\_nsxlib * Updated from global requirements * Cleanup tox.ini: Remove obsolete constraints * Add initial framework using cookiecutter * Added .gitreview vmware-nsxlib-12.0.1/.testr.conf0000666000175100017510000000037213244535763016523 0ustar zuulzuul00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./vmware_nsxlib/tests/unit} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list vmware-nsxlib-12.0.1/HACKING.rst0000666000175100017510000000022613244536000016212 0ustar zuulzuul00000000000000vmware-nsxlib Style Commandments ================================ Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ vmware-nsxlib-12.0.1/requirements.txt0000666000175100017510000000123613244535763017721 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 enum34>=1.0.4;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT netaddr>=0.7.18 # BSD tenacity>=3.2.1 # Apache-2.0 six>=1.10.0 # MIT oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service!=1.28.1,>=1.24.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 pyOpenSSL>=16.2.0 # Apache-2.0 vmware-nsxlib-12.0.1/.coveragerc0000666000175100017510000000020513244535763016551 0ustar zuulzuul00000000000000[run] branch = True source = vmware_nsxlib omit = vmware_nsxlib/tests/* [report] ignore_errors = True [report] ignore_errors = True vmware-nsxlib-12.0.1/run_tests.sh0000666000175100017510000001746713244535763017034 0ustar zuulzuul00000000000000#!/usr/bin/env bash set -eu function usage { echo "Usage: $0 [OPTION]..." echo "Run Neutron's test suite(s)" echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." echo " -n, --no-recreate-db Don't recreate the test database." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -u, --update Update the virtual environment with any newer package versions" echo " -p, --pep8 Just run PEP8 and HACKING compliance check" echo " -8, --pep8-only-changed []" echo " Just run PEP8 and HACKING compliance check on files changed since HEAD~1 (or )" echo " -P, --no-pep8 Don't run static code checks" echo " -c, --coverage Generate coverage report" echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." echo " -h, --help Print this usage message" echo " --virtual-env-path Location of the virtualenv directory" echo " Default: \$(pwd)" echo " --virtual-env-name Name of the virtualenv directory" echo " Default: .venv" echo " --tools-path Location of the tools directory" echo " Default: \$(pwd)" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." exit } function process_options { i=1 while [ $i -le $# ]; do case "${!i}" in -h|--help) usage;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -s|--no-site-packages) no_site_packages=1;; -r|--recreate-db) recreate_db=1;; -n|--no-recreate-db) recreate_db=0;; -f|--force) force=1;; -u|--update) update=1;; -p|--pep8) just_pep8=1;; -8|--pep8-only-changed) just_pep8_changed=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -d|--debug) debug=1;; --virtual-env-path) (( i++ )) venv_path=${!i} ;; --virtual-env-name) (( i++ )) venv_dir=${!i} ;; --tools-path) (( i++ )) tools_path=${!i} ;; -*) testopts="$testopts ${!i}";; *) testargs="$testargs ${!i}" esac (( i++ )) done } tool_path=${tools_path:-$(pwd)} venv_path=${venv_path:-$(pwd)} venv_dir=${venv_name:-.venv} with_venv=tools/with_venv.sh always_venv=0 never_venv=0 force=0 no_site_packages=0 installvenvopts= testargs= testopts= wrapper="" just_pep8=0 just_pep8_changed=0 no_pep8=0 coverage=0 debug=0 recreate_db=1 update=0 LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C process_options $@ # Make our paths available to other scripts we call export venv_path export venv_dir export venv_name export tools_dir export venv=${venv_path}/${venv_dir} if [ $no_site_packages -eq 1 ]; then installvenvopts="--no-site-packages" fi function run_tests { # Cleanup *pyc ${wrapper} find . -type f -name "*.pyc" -delete if [ $debug -eq 1 ]; then if [ "$testopts" = "" ] && [ "$testargs" = "" ]; then # Default to running all tests if specific test is not # provided. testargs="discover ./vmware_nsxlib/tests" fi ${wrapper} python -m testtools.run $testopts $testargs # Short circuit because all of the testr and coverage stuff # below does not make sense when running testtools.run for # debugging purposes. return $? fi if [ $coverage -eq 1 ]; then TESTRTESTS="$TESTRTESTS --coverage" else TESTRTESTS="$TESTRTESTS --slowest" fi # Just run the test suites in current environment set +e testargs=`echo "$testargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testopts $testargs'" OS_TEST_PATH=`echo $testargs|grep -o 'vmware_nsxlib\neutron\.tests[^[:space:]:]\+'|tr . /` if [ -n "$OS_TEST_PATH" ]; then os_test_dir=$(dirname "$OS_TEST_PATH") else os_test_dir='' fi if [ -d "$OS_TEST_PATH" ]; then wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper" elif [ -d "$os_test_dir" ]; then wrapper="OS_TEST_PATH=$os_test_dir $wrapper" fi echo "Running \`${wrapper} $TESTRTESTS\`" bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit" RESULT=$? set -e copy_subunit_log if [ $coverage -eq 1 ]; then echo "Generating coverage report in covhtml/" # Don't compute coverage for common code, which is tested elsewhere ${wrapper} coverage combine ${wrapper} coverage html --include='neutron/*' --omit='neutron/openstack/common/*' -d covhtml -i fi return $RESULT } function copy_subunit_log { LOGNAME=`cat .testrepository/next-stream` LOGNAME=$(($LOGNAME - 1)) LOGNAME=".testrepository/${LOGNAME}" cp $LOGNAME subunit.log } function warn_on_flake8_without_venv { if [ $never_venv -eq 1 ]; then echo "**WARNING**:" echo "Running flake8 without virtual env may miss OpenStack HACKING detection" fi } function run_pep8 { echo "Running flake8 ..." warn_on_flake8_without_venv ${wrapper} flake8 } function run_pep8_changed { # NOTE(gilliard) We want use flake8 to check the entirety of every file that has # a change in it. Unfortunately the --filenames argument to flake8 only accepts # file *names* and there are no files named (eg) "nova/compute/manager.py". The # --diff argument behaves surprisingly as well, because although you feed it a # diff, it actually checks the file on disk anyway. local target=${testargs:-HEAD~1} local files=$(git diff --name-only $target | tr '\n' ' ') echo "Running flake8 on ${files}" warn_on_flake8_without_venv diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff } TESTRTESTS="python setup.py testr" if [ $never_venv -eq 0 ] then # Remove the virtual environment if --force used if [ $force -eq 1 ]; then echo "Cleaning virtualenv..." rm -rf ${venv} fi if [ $update -eq 1 ]; then echo "Updating virtualenv..." python tools/install_venv.py $installvenvopts fi if [ -e ${venv} ]; then wrapper="${with_venv}" else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv python tools/install_venv.py $installvenvopts wrapper="${with_venv}" else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py $installvenvopts wrapper=${with_venv} fi fi fi fi # Delete old coverage data from previous runs if [ $coverage -eq 1 ]; then ${wrapper} coverage erase fi if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $just_pep8_changed -eq 1 ]; then run_pep8_changed exit fi if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to # distinguish between options (testopts), which begin with a '-', and # arguments (testargs). if [ -z "$testargs" ]; then if [ $no_pep8 -eq 0 ]; then run_pep8 fi fi vmware-nsxlib-12.0.1/AUTHORS0000664000175100017510000000144213244536264015477 0ustar zuulzuul00000000000000Aaron Rosen Abhishek Raut Adit Sarfaty Andreas Jaeger Anna Khmelnitsky Boden R Chuck Short Danting Liu Gary Kotton Jon Schlueter Quan Tian Roey Chen Salvatore Orlando Shih-Hao Li Tong Liu Tony Breeds Vu Cong Tuan YuYang Zuul dantingl garyk lyliu melissaml zhanghongtao vmware-nsxlib-12.0.1/doc/0000775000175100017510000000000013244536266015175 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/doc/source/0000775000175100017510000000000013244536266016475 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/doc/source/readme.rst0000666000175100017510000000003513244535763020465 0ustar zuulzuul00000000000000.. include:: ../../README.rstvmware-nsxlib-12.0.1/doc/source/usage.rst0000666000175100017510000000012113244535763020330 0ustar zuulzuul00000000000000===== Usage ===== To use vmware-nsxlib in a project:: import vmware_nsxlib vmware-nsxlib-12.0.1/doc/source/contributing.rst0000666000175100017510000000011213244535763021733 0ustar zuulzuul00000000000000============ Contributing ============ .. include:: ../../CONTRIBUTING.rstvmware-nsxlib-12.0.1/doc/source/conf.py0000666000175100017510000000463713244535763020011 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', # 'sphinx.ext.intersphinx', 'oslosphinx' ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'vmware-nsxlib' copyright = u'2016, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. # intersphinx_mapping = {'http://docs.python.org/': None} vmware-nsxlib-12.0.1/doc/source/index.rst0000666000175100017510000000075513244535763020350 0ustar zuulzuul00000000000000.. vmware-nsxlib documentation master file, created by sphinx-quickstart on Tue Jul 9 22:26:36 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to vmware-nsxlib's documentation! ========================================= Contents: .. toctree:: :maxdepth: 2 readme installation usage contributing Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` vmware-nsxlib-12.0.1/doc/source/installation.rst0000666000175100017510000000031713244535763021734 0ustar zuulzuul00000000000000============ Installation ============ At the command line:: $ pip install vmware-nsxlib Or, if you have virtualenvwrapper installed:: $ mkvirtualenv vmware-nsxlib $ pip install vmware-nsxlibvmware-nsxlib-12.0.1/LICENSE0000666000175100017510000002363613244535763015452 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. vmware-nsxlib-12.0.1/test-requirements.txt0000666000175100017510000000137713244535763020704 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking<0.12,>=0.11.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD flake8-import-order==0.12 # LGPLv3 mock>=2.0.0 # BSD python-subunit>=1.0.0 # Apache-2.0/BSD sphinx!=1.6.6,>=1.6.2 # BSD oslosphinx>=4.7.0 # Apache-2.0 oslotest>=3.2.0 # Apache-2.0 testrepository>=0.0.18 # Apache-2.0/BSD testresources>=2.0.0 # Apache-2.0/BSD testtools>=2.2.0 # MIT testscenarios>=0.4 # Apache-2.0/BSD reno>=2.5.0 # Apache-2.0 bandit>=1.1.0 # Apache-2.0 tempest>=17.1.0 # Apache-2.0 pylint==1.4.5 # GPLv2 requests-mock>=1.1.0 # Apache-2.0 vmware-nsxlib-12.0.1/setup.py0000666000175100017510000000200613244535763016143 0ustar zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) vmware-nsxlib-12.0.1/MANIFEST.in0000666000175100017510000000013513244535763016170 0ustar zuulzuul00000000000000include AUTHORS include ChangeLog exclude .gitignore exclude .gitreview global-exclude *.pycvmware-nsxlib-12.0.1/vmware_nsxlib.egg-info/0000775000175100017510000000000013244536266021002 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/vmware_nsxlib.egg-info/not-zip-safe0000664000175100017510000000000113244536232023221 0ustar zuulzuul00000000000000 vmware-nsxlib-12.0.1/vmware_nsxlib.egg-info/top_level.txt0000664000175100017510000000001613244536264023527 0ustar zuulzuul00000000000000vmware_nsxlib vmware-nsxlib-12.0.1/vmware_nsxlib.egg-info/PKG-INFO0000664000175100017510000000204613244536264022077 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: vmware-nsxlib Version: 12.0.1 Summary: A common library that interfaces with VMware NSX Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ============= vmware-nsxlib ============= * Free software: Apache license * Source: http://git.openstack.org/cgit/openstack/vmware-nsxlib Features -------- * TODO Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 vmware-nsxlib-12.0.1/vmware_nsxlib.egg-info/requires.txt0000664000175100017510000000040213244536264023374 0ustar zuulzuul00000000000000pbr!=2.1.0,>=2.0.0 enum34>=1.0.4 eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 netaddr>=0.7.18 tenacity>=3.2.1 six>=1.10.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.service!=1.28.1,>=1.24.0 oslo.utils>=3.33.0 pyOpenSSL>=16.2.0 vmware-nsxlib-12.0.1/vmware_nsxlib.egg-info/dependency_links.txt0000664000175100017510000000000113244536264025046 0ustar zuulzuul00000000000000 vmware-nsxlib-12.0.1/vmware_nsxlib.egg-info/pbr.json0000664000175100017510000000005613244536264022457 0ustar zuulzuul00000000000000{"git_version": "3b1dfc4", "is_release": true}vmware-nsxlib-12.0.1/vmware_nsxlib.egg-info/SOURCES.txt0000664000175100017510000000465513244536266022700 0ustar zuulzuul00000000000000.coveragerc .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MANIFEST.in README.rst babel.cfg requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tox.ini doc/source/conf.py doc/source/contributing.rst doc/source/index.rst doc/source/installation.rst doc/source/readme.rst doc/source/usage.rst releasenotes/notes/.placeholder releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/__init__.py tools/ostestr_compat_shim.sh tools/tox_install.sh vmware_nsxlib/__init__.py vmware_nsxlib/_i18n.py vmware_nsxlib/version.py vmware_nsxlib.egg-info/PKG-INFO vmware_nsxlib.egg-info/SOURCES.txt vmware_nsxlib.egg-info/dependency_links.txt vmware_nsxlib.egg-info/not-zip-safe vmware_nsxlib.egg-info/pbr.json vmware_nsxlib.egg-info/requires.txt vmware_nsxlib.egg-info/top_level.txt vmware_nsxlib/tests/__init__.py vmware_nsxlib/tests/base.py vmware_nsxlib/tests/unit/__init__.py vmware_nsxlib/tests/unit/v3/__init__.py vmware_nsxlib/tests/unit/v3/mocks.py vmware_nsxlib/tests/unit/v3/nsxlib_testcase.py vmware_nsxlib/tests/unit/v3/test_cert.py vmware_nsxlib/tests/unit/v3/test_client.py vmware_nsxlib/tests/unit/v3/test_cluster.py vmware_nsxlib/tests/unit/v3/test_constants.py vmware_nsxlib/tests/unit/v3/test_load_balancer.py vmware_nsxlib/tests/unit/v3/test_native_dhcp.py vmware_nsxlib/tests/unit/v3/test_ns_group_manager.py vmware_nsxlib/tests/unit/v3/test_policy_api.py vmware_nsxlib/tests/unit/v3/test_policy_resources.py vmware_nsxlib/tests/unit/v3/test_qos_switching_profile.py vmware_nsxlib/tests/unit/v3/test_resources.py vmware_nsxlib/tests/unit/v3/test_security.py vmware_nsxlib/tests/unit/v3/test_utils.py vmware_nsxlib/tests/unit/v3/test_vpn_ipsec.py vmware_nsxlib/v3/__init__.py vmware_nsxlib/v3/client.py vmware_nsxlib/v3/client_cert.py vmware_nsxlib/v3/cluster.py vmware_nsxlib/v3/config.py vmware_nsxlib/v3/constants.py vmware_nsxlib/v3/core_resources.py vmware_nsxlib/v3/exceptions.py vmware_nsxlib/v3/load_balancer.py vmware_nsxlib/v3/native_dhcp.py vmware_nsxlib/v3/ns_group_manager.py vmware_nsxlib/v3/nsx_constants.py vmware_nsxlib/v3/policy_constants.py vmware_nsxlib/v3/policy_defs.py vmware_nsxlib/v3/policy_resources.py vmware_nsxlib/v3/resources.py vmware_nsxlib/v3/router.py vmware_nsxlib/v3/security.py vmware_nsxlib/v3/trust_management.py vmware_nsxlib/v3/utils.py vmware_nsxlib/v3/vpn_ipsec.pyvmware-nsxlib-12.0.1/tools/0000775000175100017510000000000013244536266015570 5ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/tools/ostestr_compat_shim.sh0000777000175100017510000000035213244535763022220 0ustar zuulzuul00000000000000#!/bin/sh # Copied from neutron/tools. Otherwise no units tests are found. # preserve old behavior of using an arg as a regex when '--' is not present case $@ in (*--*) ostestr $@;; ('') ostestr;; (*) ostestr --regex "$@" esac vmware-nsxlib-12.0.1/tools/__init__.py0000666000175100017510000000000013244535763017672 0ustar zuulzuul00000000000000vmware-nsxlib-12.0.1/tools/tox_install.sh0000777000175100017510000000203613244535763020473 0ustar zuulzuul00000000000000#!/usr/bin/env bash # Client constraint file contains this client version pin that is in conflict # with installing the client from source. We should remove the version pin in # the constraints file before applying it for from-source installation. CONSTRAINTS_FILE="$1" shift 1 set -e # NOTE(tonyb): Place this in the tox enviroment's log dir so it will get # published to logs.openstack.org for easy debugging. localfile="$VIRTUAL_ENV/log/upper-constraints.txt" if [[ "$CONSTRAINTS_FILE" != http* ]]; then CONSTRAINTS_FILE="file://$CONSTRAINTS_FILE" fi # NOTE(tonyb): need to add curl to bindep.txt if the project supports bindep curl "$CONSTRAINTS_FILE" --insecure --progress-bar --output "$localfile" pip install -c"$localfile" openstack-requirements # This is the main purpose of the script: Allow local installation of # the current repo. It is listed in constraints file and thus any # install will be constrained and we need to unconstrain it. edit-constraints "$localfile" -- "$CLIENT_NAME" pip install -c"$localfile" -U "$@" exit $? vmware-nsxlib-12.0.1/tox.ini0000666000175100017510000000531013244536000015726 0ustar zuulzuul00000000000000[tox] envlist = py35,py27,pep8,docs minversion = 2.0 skipsdist = True [testenv] install_command = {toxinidir}/tools/tox_install.sh {env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?stable/queens} {opts} {packages} setenv = VIRTUAL_ENV={envdir} BRANCH_NAME=master CLIENT_NAME=vmware-nsxlib PYTHONWARNINGS=default::DeprecationWarning passenv = TRACE_FAILONLY GENERATE_HASHES http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY usedevelop = True deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = sh commands = {toxinidir}/tools/ostestr_compat_shim.sh {posargs} # there is also secret magic in ostestr which lets you run in a fail only # mode. To do this define the TRACE_FAILONLY environmental variable. [testenv:common] # Fake job to define environment variables shared between dsvm/non-dsvm jobs setenv = {[testenv]setenv} OS_TEST_TIMEOUT=180 commands = false [testenv:functional] basepython = python2.7 setenv = {[testenv]setenv} {[testenv:common]setenv} OS_TEST_PATH=./vmware_nsxlib/tests/functional OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} deps = {[testenv]deps} -r{toxinidir}/vmware_nsxlib/tests/functional/requirements.txt [testenv:dsvm-functional] basepython = python2.7 setenv = {[testenv]setenv} OS_SUDO_TESTING=1 OS_FAIL_ON_MISSING_DEPS=1 OS_TEST_TIMEOUT=180 sitepackages=True deps = {[testenv:functional]deps} commands = [tox:jenkins] sitepackages = True [testenv:releasenotes] commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:pep8] basepython = python2.7 deps = {[testenv]deps} commands = # Checks for coding and style guidelines flake8 {[testenv:genconfig]commands} whitelist_externals = sh bash [testenv:bandit] deps = -r{toxinidir}/test-requirements.txt commands = bandit -r vmware_nsxlib -n 5 -ll [testenv:cover] commands = python setup.py test --coverage --coverage-package-name=vmware_nsxlib --testr-args='{posargs}' coverage report [testenv:venv] commands = {posargs} [testenv:docs] commands = sphinx-build -W -b html doc/source doc/build/html [flake8] # E125 continuation line does not distinguish itself from next logical line # E129 visually indented line with same indent as next logical line # N530 direct neutron imports not allowed # N531 translations hints ignore = N530,E125,E129,N531 show-source = true builtins = _ exclude = build,dist import-order-style = pep8 [hacking] import_exceptions = vmware_nsxlib._i18n [testenv:genconfig] commands = [testenv:uuidgen] commands = check-uuid --fix