vmware-nsxlib-15.0.6/0000775000175000017500000000000013623151652014427 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/babel.cfg0000664000175000017500000000002013623151571016145 0ustar zuulzuul00000000000000[python: **.py] vmware-nsxlib-15.0.6/releasenotes/0000775000175000017500000000000013623151652017120 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/releasenotes/source/0000775000175000017500000000000013623151652020420 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/releasenotes/source/conf.py0000664000175000017500000002153213623151571021722 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Glance Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'vmware_nsxlib Release Notes' copyright = u'2016, OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'GlanceReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation', u'Glance Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'glancereleasenotes', u'Glance Release Notes Documentation', [u'Glance Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation', u'Glance Developers', 'GlanceReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] vmware-nsxlib-15.0.6/releasenotes/source/_static/0000775000175000017500000000000013623151652022046 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/releasenotes/source/_static/.placeholder0000664000175000017500000000000013623151571024317 0ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/releasenotes/source/_templates/0000775000175000017500000000000013623151652022555 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/releasenotes/source/_templates/.placeholder0000664000175000017500000000000013623151571025026 0ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/releasenotes/source/unreleased.rst0000664000175000017500000000015713623151571023304 0ustar zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes::vmware-nsxlib-15.0.6/releasenotes/source/index.rst0000664000175000017500000000020413623151571022255 0ustar zuulzuul00000000000000============================ vmware_nsxlib Release Notes ============================ .. toctree:: :maxdepth: 1 unreleased vmware-nsxlib-15.0.6/releasenotes/notes/0000775000175000017500000000000013623151652020250 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/releasenotes/notes/.placeholder0000664000175000017500000000000013623151571022521 0ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/run_tests.sh0000664000175000017500000001745413623151571017024 0ustar zuulzuul00000000000000#!/usr/bin/env bash set -eu function usage { echo "Usage: $0 [OPTION]..." echo "Run Neutron's test suite(s)" echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment" echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." echo " -n, --no-recreate-db Don't recreate the test database." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -u, --update Update the virtual environment with any newer package versions" echo " -p, --pep8 Just run PEP8 and HACKING compliance check" echo " -8, --pep8-only-changed []" echo " Just run PEP8 and HACKING compliance check on files changed since HEAD~1 (or )" echo " -P, --no-pep8 Don't run static code checks" echo " -c, --coverage Generate coverage report" echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." echo " -h, --help Print this usage message" echo " --virtual-env-path Location of the virtualenv directory" echo " Default: \$(pwd)" echo " --virtual-env-name Name of the virtualenv directory" echo " Default: .venv" echo " --tools-path Location of the tools directory" echo " Default: \$(pwd)" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." exit } function process_options { i=1 while [ $i -le $# ]; do case "${!i}" in -h|--help) usage;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -s|--no-site-packages) no_site_packages=1;; -r|--recreate-db) recreate_db=1;; -n|--no-recreate-db) recreate_db=0;; -f|--force) force=1;; -u|--update) update=1;; -p|--pep8) just_pep8=1;; -8|--pep8-only-changed) just_pep8_changed=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -d|--debug) debug=1;; --virtual-env-path) (( i++ )) venv_path=${!i} ;; --virtual-env-name) (( i++ )) venv_dir=${!i} ;; --tools-path) (( i++ )) tools_path=${!i} ;; -*) testopts="$testopts ${!i}";; *) testargs="$testargs ${!i}" esac (( i++ )) done } tool_path=${tools_path:-$(pwd)} venv_path=${venv_path:-$(pwd)} venv_dir=${venv_name:-.venv} with_venv=tools/with_venv.sh always_venv=0 never_venv=0 force=0 no_site_packages=0 installvenvopts= testargs= testopts= wrapper="" just_pep8=0 just_pep8_changed=0 no_pep8=0 coverage=0 debug=0 recreate_db=1 update=0 LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C process_options $@ # Make our paths available to other scripts we call export venv_path export venv_dir export venv_name export tools_dir export venv=${venv_path}/${venv_dir} if [ $no_site_packages -eq 1 ]; then installvenvopts="--no-site-packages" fi function run_tests { # Cleanup *pyc ${wrapper} find . -type f -name "*.pyc" -delete if [ $debug -eq 1 ]; then if [ "$testopts" = "" ] && [ "$testargs" = "" ]; then # Default to running all tests if specific test is not # provided. testargs="discover ./vmware_nsxlib/tests" fi ${wrapper} python -m testtools.run $testopts $testargs # Short circuit because all of the testr and coverage stuff # below does not make sense when running testtools.run for # debugging purposes. return $? fi if [ $coverage -eq 1 ]; then TESTRTESTS="$TESTRTESTS --coverage" else TESTRTESTS="$TESTRTESTS --slowest" fi # Just run the test suites in current environment set +e testargs=`echo "$testargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testopts $testargs'" OS_TEST_PATH=`echo $testargs|grep -o 'vmware_nsxlib\neutron\.tests[^[:space:]:]\+'|tr . /` if [ -n "$OS_TEST_PATH" ]; then os_test_dir=$(dirname "$OS_TEST_PATH") else os_test_dir='' fi if [ -d "$OS_TEST_PATH" ]; then wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper" elif [ -d "$os_test_dir" ]; then wrapper="OS_TEST_PATH=$os_test_dir $wrapper" fi echo "Running \`${wrapper} $TESTRTESTS\`" bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit" RESULT=$? set -e copy_subunit_log if [ $coverage -eq 1 ]; then echo "Generating coverage report in covhtml/" # Don't compute coverage for common code, which is tested elsewhere ${wrapper} coverage combine ${wrapper} coverage html --include='neutron/*' --omit='neutron/openstack/common/*' -d covhtml -i fi return $RESULT } function copy_subunit_log { LOGNAME=`cat .testrepository/next-stream` LOGNAME=$(($LOGNAME - 1)) LOGNAME=".testrepository/${LOGNAME}" cp $LOGNAME subunit.log } function warn_on_flake8_without_venv { if [ $never_venv -eq 1 ]; then echo "**WARNING**:" echo "Running flake8 without virtual env may miss OpenStack HACKING detection" fi } function run_pep8 { echo "Running flake8 ..." warn_on_flake8_without_venv ${wrapper} flake8 } function run_pep8_changed { # NOTE(gilliard) We want use flake8 to check the entirety of every file that has # a change in it. Unfortunately the --filenames argument to flake8 only accepts # file *names* and there are no files named (eg) "nova/compute/manager.py". The # --diff argument behaves surprisingly as well, because although you feed it a # diff, it actually checks the file on disk anyway. local target=${testargs:-HEAD~1} local files=$(git diff --name-only $target | tr '\n' ' ') echo "Running flake8 on ${files}" warn_on_flake8_without_venv diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff } TESTRTESTS="stestr run" if [ $never_venv -eq 0 ] then # Remove the virtual environment if --force used if [ $force -eq 1 ]; then echo "Cleaning virtualenv..." rm -rf ${venv} fi if [ $update -eq 1 ]; then echo "Updating virtualenv..." python tools/install_venv.py $installvenvopts fi if [ -e ${venv} ]; then wrapper="${with_venv}" else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv python tools/install_venv.py $installvenvopts wrapper="${with_venv}" else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py $installvenvopts wrapper=${with_venv} fi fi fi fi # Delete old coverage data from previous runs if [ $coverage -eq 1 ]; then ${wrapper} coverage erase fi if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $just_pep8_changed -eq 1 ]; then run_pep8_changed exit fi if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to # distinguish between options (testopts), which begin with a '-', and # arguments (testargs). if [ -z "$testargs" ]; then if [ $no_pep8 -eq 0 ]; then run_pep8 fi fi vmware-nsxlib-15.0.6/doc/0000775000175000017500000000000013623151652015174 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/doc/source/0000775000175000017500000000000013623151652016474 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/doc/source/conf.py0000664000175000017500000000463713623151571020005 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', # 'sphinx.ext.intersphinx', 'oslosphinx' ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'vmware-nsxlib' copyright = u'2016, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. # intersphinx_mapping = {'http://docs.python.org/': None} vmware-nsxlib-15.0.6/doc/source/contributing.rst0000664000175000017500000000011213623151571021727 0ustar zuulzuul00000000000000============ Contributing ============ .. include:: ../../CONTRIBUTING.rstvmware-nsxlib-15.0.6/doc/source/readme.rst0000664000175000017500000000003513623151571020461 0ustar zuulzuul00000000000000.. include:: ../../README.rstvmware-nsxlib-15.0.6/doc/source/usage.rst0000664000175000017500000000012113623151571020324 0ustar zuulzuul00000000000000===== Usage ===== To use vmware-nsxlib in a project:: import vmware_nsxlib vmware-nsxlib-15.0.6/doc/source/index.rst0000664000175000017500000000075513623151571020344 0ustar zuulzuul00000000000000.. vmware-nsxlib documentation master file, created by sphinx-quickstart on Tue Jul 9 22:26:36 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to vmware-nsxlib's documentation! ========================================= Contents: .. toctree:: :maxdepth: 2 readme installation usage contributing Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` vmware-nsxlib-15.0.6/doc/source/installation.rst0000664000175000017500000000031713623151571021730 0ustar zuulzuul00000000000000============ Installation ============ At the command line:: $ pip install vmware-nsxlib Or, if you have virtualenvwrapper installed:: $ mkvirtualenv vmware-nsxlib $ pip install vmware-nsxlibvmware-nsxlib-15.0.6/doc/requirements.txt0000664000175000017500000000071413623151571020462 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. sphinx!=1.6.6,!=1.6.7,>=1.6.2,<2.0.0;python_version=='2.7' # BSD sphinx!=1.6.6,!=1.6.7,>=1.6.2;python_version>='3.4' # BSD oslosphinx>=4.7.0 # Apache-2.0 openstackdocstheme>=1.18.1 # Apache-2.0 oslotest>=3.2.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 vmware-nsxlib-15.0.6/vmware_nsxlib.egg-info/0000775000175000017500000000000013623151652021001 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib.egg-info/pbr.json0000664000175000017500000000005613623151652022460 0ustar zuulzuul00000000000000{"git_version": "886854f", "is_release": true}vmware-nsxlib-15.0.6/vmware_nsxlib.egg-info/requires.txt0000664000175000017500000000032113623151652023375 0ustar zuulzuul00000000000000pbr>=4.0.0 decorator>=4.3.0 eventlet>=0.24.1 netaddr>=0.7.18 tenacity>=5.0.1 six>=1.10.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.serialization>=2.28.1 oslo.service>=1.31.0 oslo.utils>=3.33.0 pyOpenSSL>=17.1.0 vmware-nsxlib-15.0.6/vmware_nsxlib.egg-info/SOURCES.txt0000664000175000017500000000630613623151652022672 0ustar zuulzuul00000000000000.coveragerc .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MANIFEST.in README.rst babel.cfg lower-constraints.txt requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tox.ini doc/requirements.txt doc/source/conf.py doc/source/contributing.rst doc/source/index.rst doc/source/installation.rst doc/source/readme.rst doc/source/usage.rst releasenotes/notes/.placeholder releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder vmware_nsxlib/__init__.py vmware_nsxlib/_i18n.py vmware_nsxlib/version.py vmware_nsxlib.egg-info/PKG-INFO vmware_nsxlib.egg-info/SOURCES.txt vmware_nsxlib.egg-info/dependency_links.txt vmware_nsxlib.egg-info/not-zip-safe vmware_nsxlib.egg-info/pbr.json vmware_nsxlib.egg-info/requires.txt vmware_nsxlib.egg-info/top_level.txt vmware_nsxlib/tests/__init__.py vmware_nsxlib/tests/base.py vmware_nsxlib/tests/unit/__init__.py vmware_nsxlib/tests/unit/v3/__init__.py vmware_nsxlib/tests/unit/v3/mocks.py vmware_nsxlib/tests/unit/v3/nsxlib_testcase.py vmware_nsxlib/tests/unit/v3/test_cert.py vmware_nsxlib/tests/unit/v3/test_client.py vmware_nsxlib/tests/unit/v3/test_cluster.py vmware_nsxlib/tests/unit/v3/test_cluster_management.py vmware_nsxlib/tests/unit/v3/test_constants.py vmware_nsxlib/tests/unit/v3/test_load_balancer.py vmware_nsxlib/tests/unit/v3/test_native_dhcp.py vmware_nsxlib/tests/unit/v3/test_ns_group_manager.py vmware_nsxlib/tests/unit/v3/test_qos_switching_profile.py vmware_nsxlib/tests/unit/v3/test_resources.py vmware_nsxlib/tests/unit/v3/test_router.py vmware_nsxlib/tests/unit/v3/test_security.py vmware_nsxlib/tests/unit/v3/test_trust_management.py vmware_nsxlib/tests/unit/v3/test_utils.py vmware_nsxlib/tests/unit/v3/test_vpn_ipsec.py vmware_nsxlib/tests/unit/v3/policy/__init__.py vmware_nsxlib/tests/unit/v3/policy/policy_testcase.py vmware_nsxlib/tests/unit/v3/policy/test_api.py vmware_nsxlib/tests/unit/v3/policy/test_ipsec_vpn_resources.py vmware_nsxlib/tests/unit/v3/policy/test_lb_resources.py vmware_nsxlib/tests/unit/v3/policy/test_resources.py vmware_nsxlib/tests/unit/v3/policy/test_transaction.py vmware_nsxlib/v3/__init__.py vmware_nsxlib/v3/client.py vmware_nsxlib/v3/client_cert.py vmware_nsxlib/v3/cluster.py vmware_nsxlib/v3/cluster_management.py vmware_nsxlib/v3/config.py vmware_nsxlib/v3/constants.py vmware_nsxlib/v3/core_resources.py vmware_nsxlib/v3/exceptions.py vmware_nsxlib/v3/lib.py vmware_nsxlib/v3/load_balancer.py vmware_nsxlib/v3/native_dhcp.py vmware_nsxlib/v3/ns_group_manager.py vmware_nsxlib/v3/nsx_constants.py vmware_nsxlib/v3/resources.py vmware_nsxlib/v3/router.py vmware_nsxlib/v3/security.py vmware_nsxlib/v3/token_provider.py vmware_nsxlib/v3/trust_management.py vmware_nsxlib/v3/utils.py vmware_nsxlib/v3/vpn_ipsec.py vmware_nsxlib/v3/policy/__init__.py vmware_nsxlib/v3/policy/constants.py vmware_nsxlib/v3/policy/core_defs.py vmware_nsxlib/v3/policy/core_resources.py vmware_nsxlib/v3/policy/ipsec_vpn_defs.py vmware_nsxlib/v3/policy/ipsec_vpn_resources.py vmware_nsxlib/v3/policy/lb_defs.py vmware_nsxlib/v3/policy/lb_resources.py vmware_nsxlib/v3/policy/transaction.py vmware_nsxlib/v3/policy/utils.pyvmware-nsxlib-15.0.6/vmware_nsxlib.egg-info/top_level.txt0000664000175000017500000000001613623151652023530 0ustar zuulzuul00000000000000vmware_nsxlib vmware-nsxlib-15.0.6/vmware_nsxlib.egg-info/dependency_links.txt0000664000175000017500000000000113623151652025047 0ustar zuulzuul00000000000000 vmware-nsxlib-15.0.6/vmware_nsxlib.egg-info/PKG-INFO0000664000175000017500000000172013623151652022076 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: vmware-nsxlib Version: 15.0.6 Summary: A common library that interfaces with VMware NSX Home-page: https://opendev.org/x/vmware-nsxlib Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ============= vmware-nsxlib ============= * Free software: Apache license * Source: https://opendev.org/x/vmware-nsxlib Features -------- * TODO Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 vmware-nsxlib-15.0.6/vmware_nsxlib.egg-info/not-zip-safe0000664000175000017500000000000113623151652023227 0ustar zuulzuul00000000000000 vmware-nsxlib-15.0.6/AUTHORS0000664000175000017500000000302313623151651015474 0ustar zuulzuul00000000000000Aaron Rosen Abhishek Raut Adit Sarfaty Andreas Jaeger Anna Khmelnitsky Boden R Chuck Short Danting Liu Doug Hellmann Durgesh Rane Enhao Cui Erica Liu Erica Liu Gary Kotton Gordon Zhang Jinhao Tang Jon Schlueter Julie Pichon Kobi Samoray Mengdie Song Michal Kelner Mishali Qian Sun Quan Tian Ran Gu Roey Chen Salvatore Orlando Shawn Wang Shih-Hao Li Spark Fan Tong Liu Tony Breeds Vu Cong Tuan Xiaopei Liu YuYang Zhengsheng Zhou asarfaty dantingl garyk huang.zhiping lyliu melissaml ranp root sean vicky liu wangqi zhanghongtao vmware-nsxlib-15.0.6/LICENSE0000664000175000017500000002363613623151571015446 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. vmware-nsxlib-15.0.6/ChangeLog0000664000175000017500000006671713623151651016221 0ustar zuulzuul00000000000000CHANGES ======= 15.0.6 ------ * Fix group def in Policy API * Fix segment PATCH data 15.0.5 ------ * Avoid logging sensitive information in http header * Remove misplaced admin\_state check * Support querying resources with tags plus attributes * Retry on internal server error 98 * Refactor version dependant attributes in policy 15.0.4 ------ * Set default ipv6 ndra profile instead of empty value 15.0.3 ------ * Expose HTTP Status Code in ManagerError * Support policy segment & port admin state * Expose failover\_mode option for T1 Router in MP * Update Lb App Profile attributes * Add policy DHCP related resources support * Remove server ssl profile when removing client ssl profile * Support for adding raw service entries in security policy rules * Server ssl profiles * Fix argument typo in log message * Also call the superclass's tearDown() method when overriding it * Support remove client\_ssl\_profile\_binding from virtual server * Add Tier0RouteRedistributionConfig methods * Use updated search api path for version 3 & up * Add support for pool\_allocation in Tier1 * Allow updating NAT rule firewall\_match field * Fix test pep8 issue * Remove recursive call in \_proxy * Get BGP config from T0 locale service * Add search index out of sync exception * Support policy ipv6 routing via global config * Api for searching resource path by its realized type and id * Add support for using thumbprint to verify Manager certificate * Add RouteMap and PrefixList policy methods * Support setting hyperbus mode for Tier1SegmentPort * Update supported NSX version for relax\_scale\_validation * Support transaction for Tier1 NAT rules create & delete * Add Arg to Force Non-partial Update in Policy VS * Updating segment port with empty attachment type * Support get\_restore\_status in cluster\_management * Support first\_ip for allocate\_block\_subnet * Add T0 redistribution interfaces * Add transaction support in SecurityPolicies and Rules * Support LBAccessListControl for NSX 3.0.0 * Stop testing python 2 * Fix policy LB member deletion * Find OverlayTransportZone from host switch * Find tier0's tranzport\_zone via advanced\_config 14.0.4 ------ * Fix policy tier1 get\_realized\_id bug 15.0.0 ------ * Add caching for mdproxy & edge clusters for TZ validations * Add VPN TODO * Include error code in RealizationErrorStateError in Policy * Fix few policy transactions issues * Add exception for vlan conflicts * Add retry upon LB virtual server update conflicts * Add support for policy Metadata proxy resource * Use realization ID if not found by search api * Support setting hyperbus\_mode in SegmentPort Api * Add support for relax\_scale\_validation in LbService * Do not use None values on patch partial update calls * Add JWT provider abstract class * Do not wait in \_get\_realized\_id\_using\_search upon ERROR * Add a method to get edge cluster path * Allow user-provided update\_payload\_callback function * Reload enforcement-point api * Add ':' to the special character list for NSX tags * Use search-by-tags to get segment/tier1 realization Id * Add rule Tag parameter to build\_entry api * Support PATCH partial updates * Add silent option to get\_realization\_info apis * Reduce unittest time by changing the default max\_attempt * Update tenacity version and usage * NSXP: complete port attach/detach methods * Use policy realization retry parameters when necessary 14.0.3 ------ * Add support to force overwrite on updating adv rule * Fix realization retry parameters * Support policy edge cluster nodes getter * update for python3 train jobs 14.0.2 ------ * Add ability to set Spoofguard profiles to Segments * RA\_ONLY ipv6 ndra mode is no longer supported * Update NSX ver 3.0 features * Add support for hierachical call for T1 locale service * Retry on segment deletion if VMs not deleted yet * Add and Get centralized service port with Manager API * Add ability to control policy realization interval * Support NCP system health status API * Allow manager api add\_static\_route to take tags * Support force update logical port * Add support for wait\_until\_realized for policy LB resources * Retry on create-with-parent in NsxPendingDelete event * Handle overlap ips errors * Add realization support for NsxPolicyLoadBalancer API * Update default tag limits due to backend changes * NSXP: List tier1 segment interfaces * Support ipsec local endpoints advertisment flag * Add host switch profiles api * Fix get\_tag\_limits to be more careful * Add feature flag for ENS with QOS since 2.5 * Add generic session persistence api * Modified Tier1InterfaceDef to take dictionary for subnets * Add realization methods for NsxPolicyIpPoolApi * NSXP: add rule tag support * Allow updating logical port tags * Support advertisment rules update for policy tier1 * update hacking and bandit * Allow option to force allocate subnets * Support updating Policy tier1 route adv together with tier0 connectivity * Pass NDRA profile id only when specified * Remove double StaleRevision handling from FW rules * Fix removal of tier0\_path of a tier1 * Support getting realtime LBS usage in policy * Initial support for Policy IPSEC VPN resources * Suppress InsecureRequestWarning warnings * Use empty string instead of None for removing edge-cluster * NSX|V3+P: Change max allowed host routes * Fix policy remove\_edge\_cluster * Add more information to realization errors logging * Add exception for deleting an object in use * Retry policy create by PATCH in case of NsxPendingDelete error * Add ability to set Tier-0 to an Infra Segment * Add ip\_pool\_id attr to Segment resource * Remove unused pool\_data parameter from policy LB pool actions * update zuul config * NSXP LB: use \_update instead of update() * NSXP LB: Fix SSL profile handling * Fix Policy LB pool monitor actions * Define and use realization exceptions * Adding the option to configure disabled mac profile * misc build clean-ups * Remove unused tools * Fix policy LB statistics & status getters * Add category to policy update entries * Generic solution for partial updates * Use correct extra\_config format * NSXP: Additional pool member attributes * Revert "Check manager status for policy using passthough healthcheck api" * move zuul config to repo * Check manager status for policy using passthough healthcheck api * Fix updating Policy LB pool with no members * Adding LB rule as last if the possition is too big * Add get\_path methods for policy LB resources * OpenDev Migration Patch * Fix T1 partial update * Add "tags" to \_update\_helper * Inventory api changes * NSXP: Support router advertisement rules * NSXP: Pool member delete API * Ensure service when creating T1 service interface * NSXP LB: Protect members from overwrite * Fix ndra profile in partial T1 update * Enhance updating a group and security policy * Fixes for Policy LB resources * NSXLib create cert should return a list * Fix update\_entry in SecurityPolicyApi * NSX|P: Remove passthrough API from enable\_standby\_relocation * Set random security policy rule id if not set in build\_entry * Add ipv6 ndra profile to Tier1Interface * Update Policy exclude list api * Fix update\_entries in security policy * Add extra\_configs param for LogicalPort * NSXP LB: add max concurrent connections to VS object * Wait until realized in silent * Include icmp\_type and icmp\_code in the payload even they are 0 * Remove oslosphinx from constraints * Add IPPool static subnet policy support * Add the ability to create union expressions for Group * Add more group member types for Condition * Add create API to inventory client * Add support for policy exclude list * Improve wait-for-realization * Add Policy support for WAF profiles * Fix NDRA profile value * Add Service class that allows multiple/mixed entries * Add search error code & retry * Allow updating a single rule entry in a Security Policy * Add Ipv6 NDRA profiles for policy * NSXP LB: Monitor attributes handling * NSXP LB: Change HM name attribute for consistency * Add ability to enable/disable ipv6 forwarding * NSXP LB: Protect pool algorithm from overwrite * Add tags to LB service update * Revert "Fix removal of tier0\_path of a tier1" * Support inventory APIs * Allow empty attachment type in policy port * NSXP: Avoid overwrite object properties on update * Fix removal of tier0\_path of a tier1 * Support searching resources by attributes * Support for Bridge Endpoint Profiles * NSXP: LB rules unit tests * NSXP: LB member update method * NSXP: LB rule APIs should have tenant parameter * NSXP: add update method for LB rules * NSX|T: Backend parameter for max subnet static routes * Fix attribute name in Tier1 router object * Add enabled and sequence number for NAT rule * Add sequence number to the creation of SecurityPolicy * NSX|T: Add NSX limit of IP address association to port * Use a new node health api for MP status * Fix a pep8 module import error * Add function to get security policy rule * Support ip version in policy security rules * NSXP: Allow setting of LB L7 rule order * NSXP LB: Work around backend patch bug for VS * Add wait\_until\_realized func to policyGroup API * Add policy group get\_path api * Add policy Tier1 interface for vlan backed segments * NSXP: loadbalancer monitor API * Fix service creation under transaction * Add wait\_until\_realized method to Tier1SegmentPort * Add policy gayeway-policy support * Support policy transaction in comm map create * Fix nsgroup update to access the logging field safely * Verify validate\_tier0 gets tier0 to produce the right error * Change to \_create\_or\_store for lb resources to enable transaction * NSXP LB: Remove monitor only if pool has it * Add connectivity\_path parameter in Lb\_Service def * Remove certificate decoding if not needed * Policy api to get tier0/tier1/edge cluster path * Add category to policy communication map update calls * Add disable\_firewall attribute to tier0/1 & fix set\_standby\_relocation * Add the ability to build Subnet object for Segments * Improve policy resources unit tests 13.1.0 ------ * Added retries if API call fails due to MP cluster reconfig * Enhance support for policy transaction * Add API to retrieve realized info for IpBlockSubnet * NSXP: expose path\_to\_id method * Retry http requests on timeouts * Fix check\_manager\_status to support older NSX versions * Fix get methods for some policy resources * Add return value to add\_router\_link\_port method * Fix missing traffic tag in SegmentPort create method * Return related error code with ManagerError * Support DHCP config path for policy Tier1 router * Remove retry from tier1 creation * Improve Cluster validation checks * Add API to retrieve realized info for IpAllocations * Add advanced config to Tier1Segment resource * Fix attribute name for IpBlockSubnet resource * Use original tier1/segment name in update if not given * Fix policy transaction * Add apis to get tier0 uplink cidrs and not just ips * Add Tier0Natrule APIs to core\_resources * Update policy segment & port admin state using passthrough api * Support response status codes for LB HM * Update home-page * Add decorator to requirements.txt * Move get\_dhcp\_opt\_code to utils * Add Certificate to policy API * Enable router standby relocation for policy tier1 * Add policy Edge Cluster resource * Add new policy tier0 actions * Policy DHCP relay support * Policy DHCP relay support using passthrough api * Add decorator for verifing passthrough is allowed * NSX|T: Support T1 LR HA relocation * Re-Add policy unit tests * Move policy code to dedicated folder * Add manager status validation to validate connection * Full support for policy QoS profile * Add specific realization getter for segment * Add configuration for policy realization retries * Add retry on policy tier1 creation * Disallow update of sec policy category * Add retries parameters to wait\_until\_realized * Add optional project id to build\_v3\_api\_version\_project\_tag * Use template for lower-constraints * Add support for removing subnets from a policy segment * Add protection against empty realization results * Add policy attachment types constants * Add caching for Policy get commands * Add segment ports profiles binding support * Add loadbalancer policy API into vmware-nsxlib * Add static routes support * Policy pass-through api support * Improve policy realization state apis * Update policy NAT firewall actions * Fix policy router advertisment api * Allow getting transport zone ID from T1 DR * Change openstack-dev to openstack-discuss * Add policy segemnt profiles api * Add Tier1 NAT rules support * Handle get\_default\_headers errors * Add silent argument in policy get APIs * Add Policy IpPool resource APIs * Add NSX policy IpBlock resource * Add NSX policy tier1 segment port resource * Enhance Policy routers apis * Router: check if service router exists on backend * Change policy update APIs to allow unset * Deprecate openstack-based code in DHCP * Remove the precedence attribute from the policy communication maps * Initial support for policy transactions * Policy: use new realization API * Support update tier0 in policy tier1 router * Readonly support for Policy transport zones * NSX|T: Add handling of disable/enable firewall * Fix Policy resources list apis * Add utility to generate tags with api version & project name * Add tags to policy api service resources * Enhanced support for updating router advertisement rules * Make a common RouterDef parent for T0 and T1 * Rename communication policy APIs * Change return values for policy resource APIs * Refactor policy update and add missing tests 13.0.2 ------ * Update the max NS groups criteria tags number dynamically * Make resource\_type property of policy def * Fix multi-cluster connectivity * Correct validate\_dhcp\_params -> validate\_icmp\_params * Support silent GET for policy resources * WIP: Policy: Add Tier0 resource * Add policy port resource * Add get\_usage API to loadbalancer service * Rename deployment-zones * Adjust policy Networks to platform changes * Fix policy realization state issues * Update the logged fields of all entries in a map * Add a constant of the default domain * Refactor policy resources * Fix cluster connectivity * Amend allowed ICMP types and codes in strict mode * Fix policy communication maps categories * Add IP address expression as policy group condition * Add 'logged' field on policy communication map entries * Fix the revision needed for security rules version * Policy: support Segment and NetworkSegment * Enhanced support for policy services * Enhance the groups support * Enhance communication maps support * Allow Policy Resources to add and update tags * Add 'direction' field to CommunicationMapEntry * Add feature for policy networking support * Add support to get overaly\_tz id from Tier0 router * Add nsx policy network and segment resources 13.0.1 ------ * Update Allocation Profile feature constant * New API for deleting IP pool forcefully * Update UPPER\_CONSTRAINTS\_FILE for stable/rocky * Expose allocation pool to router creation * NSX|V3: Support new icmp codes and types list * New api for getting VPN session status * New api for getting the LB virtual servers status 13.0.0 ------ * New resource for getting the managers IPs of the cluster * Fix get\_connected\_t0\_transit\_net ot return the IP we need * New api for getting router port transit cidr * Add router lib unittests * Add None default for an unused parameter in rotuer API * Validate ICMP type & code for section rules * Support MP cluster unavailable managers * Add version 2.4 and initial features list * Fix service ports for egress firewall rule * Remove unused context arguments from nsgroup/section apis * Fix section rules protocols handling 12.1.1 ------ * Add retry to firewall section delete * Fix an issue: TypeError: \_\_str\_\_ returned non-string (type NoneType) * Switch to stestr * Remove the redundant space and colon from the log * make python 3 the default for tox.ini * Escape illegal chars in LS name * Fix nsxlib UT mocks to return the right output format * Retry on 503 Service Unavailable * Remove sha224 from supported client cert hash algs * add stestr to test requirements * Add logging when initializing a default FW section * Enabled naming nat rules * Add transport\_node\_uuid in VifAttachmentContext * Retry is IOError is received * Add method to check NSX export type * Add server-ip-address to the suppoprted dhcp options * Handle cluster connection closed by server * Add router ports apis * Add revision\_id when creating FW rule * uncap eventlet * Fix pep8 new warnings * Add lower-constraints job * Replace uuid.uuid4() with uuidutils.generate\_uuid() * Remove unsupported VPN algorithms * tox: Remove unnecessary configuration * Updated from global requirements * Add exception for policy deleted objects * Updated from global requirements * Really remove tox\_install.sh * Adding vpn + dhcp unit tests * Policy: support multiple services in a communication map * Support NO\_SNAT & NO\_DNAT rules in NSX 2.2 * Policy make edge cluster & transport zones optional * Add more update options to VPN objects * Retry search calls on error * Updated from global requirements * Avoid tools/tox\_install.sh 12.1.0 ------ * Updated from global requirements * NsxLibMetadataProxy: get md proxy status function * Updated from global requirements * Separate native DHCP apis to allow external usage * Remove some old TODOs from the code * Enabled adding tags to nat rules * Add router transport zone * Add Transport Nodes api support & router TZ support * migrate to stestr * Policy: Add connection information to enforcement points * Refactor policy code to match the new NSX * fix typo in tox\_install.sh * change http with https * Add support for http PATCH method * VPN DPD timeout changes * VPN policy rules update * Fix rate-limit attribute name * fix error url * Enable router interface to have 'centralized' type * VLAN ID and trunk spec are exclusive - can only set one * Enable search\_by\_tags to use only scope or tag * Ensure that max\_attempts is set * Remove obsolete tempest-lib * Updated from global requirements * Fix VPN local endpoint structure * Fix VPN api as the NSX api changed * Updated from global requirements * Support get & update for rate limit * NSXv3: Enhance NSGroup create and update functions * Refactor security modules and retry * Raise StaleResource when a 409 is returned by NSX * NSX rate limit support * Add router advertisement rules support * initial vpn ipsec resources * Updated from global requirements * Updated from global requirements * Allow creating firewall section with empty rule * Ensure delete retry for ip set resources * Ensure update retry for load balancing resources * Add in a retry decoractor to loadbalancer updates * Mock the update tags limits code in unittests * Logical switch trunk vlan support * Support NSX tag limitations * NSXv3: Return body if resource\_type is None * Add find cert by pem data method * Updated from global requirements * Remove neutron-lib from the dependencies 11.1.4 ------ * Updated from global requirements * Updated from global requirements * Allow add\_rule(s) method to accept 'operation' as an arg * Re-apply skipped cluster test * Add in feature for 'on behalf of' * Add IPSEC VPN feature flag and update version number * use new payload objects for \*\_INIT callbacks * Fix typos in comments * Updated from global requirements 11.1.3 ------ * Provide a callback to inject headers to the NSX * Add support to retrieve VIFs and VirtualMachines * NSXv3: Update stats api * Fix failing unit test * Add ssl profile binding methods for LB virtual server * Add router to nsxlib class * Add private\_key and passphrase to cert creation 11.1.2 ------ * Cache Get results for some nsxlib resources 11.1.1 ------ * Updated from global requirements * Add supported feature for VLAN router interfaces * Update ip\_protocol during loadbalancer app profile updates * Support ENS transport zone * Updated from global requirements * Update update\_advertisement depending on NSX version * Add LB related flags for update\_route\_advertisement * Move LBaaS to 2.1 supported feature * Support DHCP Relay profile * Add feature to update NSGroup and FirewallSection tags * Refactor resources tests * Updated from global requirements * Fix client cert authentication * Fix transport zones mock for testcase * Fix request with retry code * New api: update metadata proxy server * Add log messages to retry attempts * Write and delete client cert for each request * Add FW\_INSERT\_AFTER constants to nsx\_constants * Add the nsxlib to all NsxLibApiBase resources * Handle bad or expired XSRF token * Add allow-overwrite default header by config * Create session for XSRF token * New api: list logical routers by type * Ensure retry on stale rule update or deletion * Updated from global requirements * Get transport zone type api * Updated from global requirements * Remove unused code * Allow setting the description of switches and ports * Policy: Delete service entries * Policy: Delete communication profile entries * Ensure retry on stale resources for exclude list updates * Use domain in policy deployment maps api * LBaaS: Some API changes on LB class * Revert "Write and delete client cert for each request" * Remove test requirement WebTest * Write and delete client cert for each request * LBaaS: Fix a typo in add\_to\_list method * LBaaS: Add common add\_to\_list and remove\_from\_list * Nsx Policy: support ICMP service * Nsx policy: adjust to latest backend changes * DHCP: add method to get static routes * Updated from global requirements * NSX Policy: Adjust to changes in backend API * Updated from global requirements * VMware-NSXLib:Remove Invalid Link * Add Load Balancer Application Rule * Updated from global requirements * Api for getting a resource id by the type & tag * Add DHPC relay service to router port * Add resource type to resource definitions * Add policy apis to check realized state 10.1.2 ------ * Support getting all results from search API * Use flake8-import-order plugin * Support different options for deleting NAT rules * Updated from global requirements * Adding optional source network to GW SNAT rule creation * Add constants for PAREMT and CHILD vif types * Extend QosSwitchingProfile actions * Add TODO for load balancer feature support * list & update methods for router NAT rules * LB: Add methods to add/remove monitor from pool * Add NSXv3 2.1.0 version constant * Updated from global requirements * Catch another error type for missing certificate * Support bypass-firewall param for router NAT rules * Add LB methods for neutron LBaaS * New api: Get the default rule in a firewall section * Extend client silent mode * Updated from global requirements * Adjust to cosmetic changes in policy API * Remove support for py34 * Avoid version API call for policy lib * Updated from global requirements * Updated from global requirements * Update policy resources apis * Add ellapsed time to REST response logs * Add location header to response mocks * Change log level of cert-realated SSL errors * Mask passwords while logging REST requests * Change resource type for LB resources * Adding API for checking features availabilty * Updated from global requirements * LBaaS: Add status and stats APIs * Remove \n from log messages * New API to get the firewall section of a router * Support tftp-server dhcp option * Updated from global requirements * nsxv3: Add API wrapper for Load Balancer * Updated from global requirements * Updated from global requirements * Add informative error message for 404 responses * New api to get logical port by attachment * Refactor nsxlib resources * Updated from global requirements * Correct param nsx\_api\_managers description error 10.1.1 ------ * Add IPAM error code * Add method get\_excludelist to the class NsxLibFirewallSection * NSX Policy cosmetic changes * NSX Policy resources * Updated from global requirements * NSXv3: Fix init of default firewall section * Updated from global requirements * Add support to force delete routers * NSX Policy preparations * Drop log translations 10.1.0.a1 --------- * Updated from global requirements * Fix for hacking N536 * Add list method to the qos profile * Name and ID validation may be paginated * Fix FIP DNAT rule match\_ports bug * [IPSet]: Allow updating IPSet with empty list * Add util method to retrieve complex expressions for NSGroup * Support multiple client certificate per identity * Adding Optional default dns values for native dhcp * Deprecate unused dhcp\_profile\_uuid from config * Pass node ID and user permissions when creating NSX identity * Updated from global requirements * Updated from global requirements * Fix FW rule dictionary * Replace client cert file with cert provider * Add get\_code to LogicalDhcpServer * Fix parameter args * IpPools: pass tags on create/update operations * Update interface about NSX IPAM and CIF API change * Updated from global requirements 0.7.1 ----- * Get list of IP block and IP block subnet * Add validation for client certificate subject * Mute log for endpoint connection validation * Prevent downtime when client cert is regenerated 0.7.0 ----- * Support client certificate import * [NSX Search]: Append resource\_type while limiting scope for a resource * Add support for IPSet CRUD operations * Updated from global requirements * Add in tox -s cover support 0.7.3 ----- * Add methods for firewall section and rule * Updated from global requirements * Use project-id instead of tenant -id in nsxlib * Add 'applied\_tos' arg while creating FirewallRule 0.7.2 ----- * Add support to update tags for FirewallSections * Allow passing args of type list for NSGroup and firewall rule methods * Add support to search resources based on tags or resource type * Fix logical switch name update * Add method to security module * Fix address bindings in logical port update * Allow setting QoS shaper values to 0 * Updated from global requirements * Fix bugs in certificate management exceptions * Disable uRPF check on lrp on container LS * Add match\_ports argument while adding NAT rule * Add support to create/delete ip block subnet on backend * Add IP POOL ID during port create/update 0.6.0 ----- * Basic support for client cert authentication * NSXv3: Add support for dns-integration extension * Client certificate management for NSXV3 authentication * Support ip-pool update * Support router description * Expand PEP8 tests on nsxlib * Add Constraints support * Unit tests: Allow multiple responses in mocked client 0.5.0 ----- * IpPools support * Add gateway\_ip arg for static bindings * NSXv3: Search for the default section from the end * NSXv3: Do not allow empty tag values on resources 0.4.0 ----- * NSXv3: Require target\_type when adding to firewall exclude list 0.3.0 ----- * NSXv3: Add support for firewall exclude list API * NSXv3 Client: Add paginated response * Ensure that correct exception is raised * Using assertIsNone() instead of assertEqual(None) * NSXv3: Remove duplicate method definition * Remove retry from nsgroup member update call * Fix exception handling * NSXv3: Fix string format in logging message * NSXv3: Fix a router port name update issue * Updated from global requirements * Remove vmware-nsxlib bug link * Support both egress and ingress directions on QoS profile * Add NSGroup manager tests 0.2.0 ----- * Updated from global requirements * Updated from global requirements * NSXv3: Fix allowed address pairs switching profile * Replace retrying with tenacity * NSX|V3 fix nsxlib raised error with managers * NSX|v3 replace dhcp profile and metadata proxy uuids with names * Fix nsxlib tox init to not fail on upper constraints 0.1.0 ----- * Enable release notes translation * Move all nsxlib code and tests to vmware\_nsxlib * Updated from global requirements * Cleanup tox.ini: Remove obsolete constraints * Add initial framework using cookiecutter * Added .gitreview vmware-nsxlib-15.0.6/.zuul.yaml0000664000175000017500000000020413623151571016364 0ustar zuulzuul00000000000000- project: templates: - openstack-lower-constraints-jobs - openstack-python3-ussuri-jobs - check-requirements vmware-nsxlib-15.0.6/vmware_nsxlib/0000775000175000017500000000000013623151652017307 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib/_i18n.py0000664000175000017500000000210113623151571020571 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n DOMAIN = "vmware_nsxlib" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) vmware-nsxlib-15.0.6/vmware_nsxlib/__init__.py0000664000175000017500000000123513623151571021421 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version __version__ = pbr.version.VersionInfo( 'vmware_nsxlib').version_string() vmware-nsxlib-15.0.6/vmware_nsxlib/v3/0000775000175000017500000000000013623151652017637 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib/v3/client_cert.py0000664000175000017500000003053013623151571022505 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from time import time from OpenSSL import crypto from oslo_log import log from oslo_utils import uuidutils from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions as nsxlib_exceptions LOG = log.getLogger(__name__) CERT_SUBJECT_COUNTRY = 'country' CERT_SUBJECT_STATE = 'state' CERT_SUBJECT_ORG = 'organization' CERT_SUBJECT_UNIT = 'unit' CERT_SUBJECT_HOST = 'hostname' def validate_cert_params(key_size, valid_for_days, signature_alg, subject): """Validate parameters for certificate""" expected_key_sizes = (2048, 4096) if key_size not in expected_key_sizes: raise nsxlib_exceptions.NsxLibInvalidInput( error_message=_('Invalid key size %(value)d' '(must be one of %(list)s)') % {'value': key_size, 'list': expected_key_sizes}) expected_signature_algs = ('sha256') if signature_alg not in expected_signature_algs: raise nsxlib_exceptions.NsxLibInvalidInput( error_message=_('Invalid signature algorithm %(value)s' '(must be one of %(list)s)') % {'value': signature_alg, 'list': expected_signature_algs}) if (CERT_SUBJECT_COUNTRY in subject and (len(subject[CERT_SUBJECT_COUNTRY]) != 2)): raise nsxlib_exceptions.NsxLibInvalidInput( error_message=_('Invalid country %s: ' 'must be exactly 2 characters') % subject[CERT_SUBJECT_COUNTRY]) # values defined in rfc5280 max_len_constraints = {CERT_SUBJECT_STATE: 128, CERT_SUBJECT_ORG: 64, CERT_SUBJECT_UNIT: 64, CERT_SUBJECT_HOST: 64} for field, max_len in max_len_constraints.items(): if field in subject and (len(subject[field]) > max_len): raise nsxlib_exceptions.NsxLibInvalidInput( error_message=_('Invalid %(field)s [%(value)s]: ' 'must not exceed %(max)d characters') % {'field': field, 'value': subject[field], 'max': max_len}) def generate_self_signed_cert_pair(key_size, valid_for_days, signature_alg, subject): """Generate self signed certificate and key pair""" validate_cert_params(key_size, valid_for_days, signature_alg, subject) # generate key pair key = crypto.PKey() key.generate_key(crypto.TYPE_RSA, key_size) # generate certificate cert = crypto.X509() cert.get_subject().C = subject.get(CERT_SUBJECT_COUNTRY, 'US') cert.get_subject().ST = subject.get(CERT_SUBJECT_STATE, 'California') cert.get_subject().O = subject.get(CERT_SUBJECT_ORG, 'MyOrg') cert.get_subject().OU = subject.get(CERT_SUBJECT_UNIT, 'MyUnit') cert.get_subject().CN = subject.get(CERT_SUBJECT_HOST, 'myorg.com') cert.gmtime_adj_notBefore(0) cert.gmtime_adj_notAfter(valid_for_days * 24 * 60 * 60) cert.set_issuer(cert.get_subject()) cert.set_pubkey(key) cert.set_serial_number(int(time())) cert.sign(key, signature_alg) return cert, key class ClientCertificateManager(object): """Manage Client Certificate for backend authentication There should be single client certificate associated with certain principal identity. Certificate and PK storage is pluggable. Storage API (similar to neutron-lbaas barbican API): store_cert(purpose, certificate, private_key) get_cert(purpose) delete_cert(purpose) """ def __init__(self, identity, nsx_trust_management, storage_driver): self._cert = None self._key = None self._storage_driver = storage_driver self._identity = identity self._nsx_trust_management = nsx_trust_management def __enter__(self): """Load cert from storage This is an optimization to avoid repeated storage access. Usage example: with cert_manager as c: if c.exists(): date = c.expires_on() days = c.exires_in_days() """ self._cert, self._key = self.get_cert_and_key() return self def __exit__(self, type, value, traceback): self._cert = None self._key = None def generate(self, subject, key_size=2048, valid_for_days=3650, signature_alg='sha256', node_id=None): """Generate new certificate and register it in the system Generate certificate with RSA key based on arguments provided, register and associate it to principal identity on backend, and store it in storage. If certificate already exists, fail. """ self._validate_empty() cert, key = generate_self_signed_cert_pair(key_size, valid_for_days, signature_alg, subject) # register on backend self._register_cert(cert, node_id or uuidutils.generate_uuid()) # save in storage cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) key_pem = crypto.dump_privatekey(crypto.FILETYPE_PEM, key) self._storage_driver.store_cert(self._identity, cert_pem, key_pem) LOG.debug("Client certificate generated successfully") def delete(self): """Delete existing certificate from storage and backend""" cert_pem, key_pem = self.get_pem() if not cert_pem: return ok = True try: self._nsx_trust_management.delete_cert_and_identity( self._identity, cert_pem) except nsxlib_exceptions.ManagerError as e: LOG.error("Failed to clear certificate on backend: %s", e) ok = False try: self._storage_driver.delete_cert(self._identity) except Exception as e: LOG.error("Failed to clear certificate in storage: %s", e) ok = False self._cert = None self._key = None if ok: LOG.debug("Client certificate removed successfully") def exists(self): """Check if certificate was created for given identity""" if self._cert: return True cert_pem, key_pem = self._storage_driver.get_cert(self._identity) return cert_pem is not None def _get_cert_from_file(self, filename): with open(filename, 'r') as f: cert_pem = f.read() if not cert_pem: raise nsxlib_exceptions.CertificateError( msg=_("Failed to read certificate from %s") % filename) # validate correct crypto try: cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem) except crypto.Error: raise nsxlib_exceptions.CertificateError( msg=_("Failed to import client certificate")) return cert def import_pem(self, filename, node_id=None): """Import and register existing certificate in PEM format""" # TODO(annak): support PK import as well self._validate_empty() cert = self._get_cert_from_file(filename) # register on backend self._register_cert(cert, node_id or uuidutils.generate_uuid()) cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) self._storage_driver.store_cert(self._identity, cert_pem, None) LOG.debug("Client certificate imported successfully") def delete_pem(self, filename): """Delete specified client certificate without storage verification""" # This file may contain private key # passing the pem through crypto will perform validation and # strip off the key cert = self._get_cert_from_file(filename) cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) self._nsx_trust_management.delete_cert_and_identity(self._identity, cert_pem) self._storage_driver.delete_cert(self._identity) def _load_from_storage(self): """Returns certificate and key pair in PEM format""" cert_pem, key_pem = self._storage_driver.get_cert(self._identity) if cert_pem is None: return None, None return (cert_pem, key_pem) def get_pem(self): return self._load_from_storage() def export_pem(self, filename): """Exports certificate and key pair to file""" self._validate_exists() cert_pem, key_pem = self._load_from_storage() with open(filename, 'w') as f: f.write(cert_pem) f.write(key_pem) def expires_on(self): """Returns certificate expiration timestamp""" self._validate_exists() cert, key = self.get_cert_and_key() converted = datetime.datetime.strptime( cert.get_notAfter().decode(), "%Y%m%d%H%M%SZ") return converted def expires_in_days(self): """Returns in how many days the certificate expires""" delta = self.expires_on() - datetime.datetime.utcnow() return delta.days def get_subject(self): self._validate_exists() cert, key = self.get_cert_and_key() return {CERT_SUBJECT_COUNTRY: cert.get_subject().C, CERT_SUBJECT_STATE: cert.get_subject().ST, CERT_SUBJECT_ORG: cert.get_subject().O, CERT_SUBJECT_UNIT: cert.get_subject().OU, CERT_SUBJECT_HOST: cert.get_subject().CN} def get_signature_alg(self): self._validate_exists() cert, key = self.get_cert_and_key() return cert.get_signature_algorithm() def get_key_size(self): self._validate_exists() cert, key = self.get_cert_and_key() return key.bits() def _validate_empty(self): if self.exists(): raise nsxlib_exceptions.ObjectAlreadyExists( object_type='Client Certificate') def _validate_exists(self): if not self.exists(): raise nsxlib_exceptions.ObjectNotGenerated( object_type='Client Certificate') def get_cert_and_key(self): """Load cert and key from storage""" if self._cert and self._key: return self._cert, self._key cert_pem, key_pem = self._load_from_storage() if cert_pem is None: return None, None try: cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem) key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem) except crypto.Error: raise nsxlib_exceptions.CertificateError( msg="Failed to load client certificate") return cert, key def _register_cert(self, cert, node_id): cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) self._nsx_trust_management.create_cert_and_identity(self._identity, cert_pem, node_id) class ClientCertProvider(object): """Basic implementation for client certificate provider Responsible for preparing, providing and disposing client certificate file. Basic implementation assumes the file exists in the file system and does not take responsibility of deleting this sensitive information after use. Inheriting objects should make use of __enter__ and __exit__ APIs to prepare and dispose the certificate file data. """ def __init__(self, filename): self._filename = filename def __enter__(self): return self def __exit__(self, type, value, traceback): pass def filename(self): return self._filename vmware-nsxlib-15.0.6/vmware_nsxlib/v3/cluster.py0000664000175000017500000006753413623151571021711 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import contextlib import copy import datetime import inspect import itertools import logging import re import eventlet from eventlet import greenpool from eventlet import pools import OpenSSL from oslo_log import log from oslo_service import loopingcall import requests from requests import adapters from requests import exceptions as requests_exceptions import six import six.moves.urllib.parse as urlparse import tenacity import urllib3 from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import client as nsx_client from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) # disable warning message for each HTTP retry logging.getLogger( "urllib3.connectionpool").setLevel(logging.ERROR) # Hide the InsecureRequestWarning from urllib3 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) @six.add_metaclass(abc.ABCMeta) class AbstractHTTPProvider(object): """Interface for providers of HTTP connections. which are responsible for creating and validating connections for their underlying HTTP support. """ @property def default_scheme(self): return 'https' @abc.abstractproperty def provider_id(self): """A unique string name for this provider.""" pass @abc.abstractmethod def validate_connection(self, cluster_api, endpoint, conn): """Validate the said connection for the given endpoint and cluster.""" pass @abc.abstractmethod def new_connection(self, cluster_api, provider): """Create a new http connection. Create a new http connection for the said cluster and cluster provider. The actual connection should duck type requests.Session http methods (get(), put(), etc.). """ pass @abc.abstractmethod def is_connection_exception(self, exception): """Determine if the given exception is related to connection failure. Return True if it's a connection exception and False otherwise. """ @abc.abstractmethod def is_timeout_exception(self, exception): """Determine if the given exception is related to timeout. Return True if it's a timeout exception and False otherwise. """ class TimeoutSession(requests.Session): """Extends requests.Session to support timeout at the session level.""" def __init__(self, timeout, read_timeout): self.timeout = timeout self.read_timeout = read_timeout self.cert_provider = None super(TimeoutSession, self).__init__() @property def cert_provider(self): return self._cert_provider @cert_provider.setter def cert_provider(self, value): self._cert_provider = value # wrapper timeouts at the session level # see: https://goo.gl/xNk7aM def request(self, *args, **kwargs): def request_with_retry_on_ssl_error(*args, **kwargs): try: return super(TimeoutSession, self).request(*args, **kwargs) except (IOError, OpenSSL.SSL.Error): # This can happen when connection tries to access certificate # file it was opened with (renegotiation?) # Proper way to solve this would be to pass in-memory cert # to ssl C code. # Retrying here works around the problem return super(TimeoutSession, self).request(*args, **kwargs) def get_cert_provider(): if inspect.isclass(self._cert_provider): # If client provided certificate provider as a class, # we spawn an instance here return self._cert_provider() return self._cert_provider if 'timeout' not in kwargs: kwargs['timeout'] = (self.timeout, self.read_timeout) if not self.cert_provider: # No client certificate needed return super(TimeoutSession, self).request(*args, **kwargs) if self.cert is not None: # Recursive call - shouldn't happen return request_with_retry_on_ssl_error(*args, **kwargs) # The following with statement allows for preparing certificate and # private key file and dispose it at the end of request # (since PK is sensitive information, immediate disposal is # important). # It would be optimal to populate certificate once per connection, # per request. Unfortunately requests library verifies cert file # existence regardless of whether certificate is going to be used # for this request. # Optimal solution for this would be to expose certificate as variable # and not as a file to the SSL library with get_cert_provider() as provider: self.cert = provider.filename() try: ret = request_with_retry_on_ssl_error(*args, **kwargs) except Exception as e: self.cert = None raise e self.cert = None return ret class NSXRequestsHTTPProvider(AbstractHTTPProvider): """Concrete implementation of AbstractHTTPProvider. using requests.Session() as the underlying connection. """ SESSION_CREATE_URL = '/api/session/create' COOKIE_FIELD = 'Cookie' SET_COOKIE_FIELD = 'Set-Cookie' XSRF_TOKEN = 'X-XSRF-TOKEN' JSESSIONID = 'JSESSIONID' @property def provider_id(self): return "%s-%s" % (requests.__title__, requests.__version__) def validate_connection(self, cluster_api, endpoint, conn): client = nsx_client.NSX3Client( conn, url_prefix=endpoint.provider.url, url_path_base=cluster_api.nsxlib_config.url_base, default_headers=conn.default_headers) # Check the manager state directly if cluster_api.nsxlib_config.validate_connection_method: cluster_api.nsxlib_config.validate_connection_method( client, endpoint.provider.url) # If keeplive section returns a list, it is assumed to be non-empty keepalive_section = cluster_api.nsxlib_config.keepalive_section result = client.get(keepalive_section, silent=True) if not result or result.get('result_count', 1) <= 0: msg = _("No %(section)s found " "for '%(url)s'") % {'section': keepalive_section, 'url': endpoint.provider.url} LOG.warning(msg) raise exceptions.ResourceNotFound( manager=endpoint.provider.url, operation=msg) def new_connection(self, cluster_api, provider): config = cluster_api.nsxlib_config session = TimeoutSession(config.http_timeout, config.http_read_timeout) if config.client_cert_provider: session.cert_provider = config.client_cert_provider # Set the headers with Auth info when token provider is set, # otherwise set the username and password elif not config.token_provider: session.auth = (provider.username, provider.password) # NSX v3 doesn't use redirects session.max_redirects = 0 if config.insecure: # no verification on server certificate session.verify = False thumbprint = None elif provider.ca_file: # verify using the said ca bundle path session.verify = provider.ca_file thumbprint = None elif provider.thumbprint: # verify using the thumbprint session.verify = None thumbprint = provider.thumbprint else: # verify using the default system root CAs session.verify = True thumbprint = None # we are pooling with eventlet in the cluster class adapter = NSXHTTPAdapter( pool_connections=1, pool_maxsize=1, max_retries=config.retries, pool_block=False, thumbprint=thumbprint) session.mount('http://', adapter) session.mount('https://', adapter) self.get_default_headers(session, provider, config.allow_overwrite_header, config.token_provider) return session def is_connection_exception(self, exception): return isinstance(exception, requests_exceptions.ConnectionError) def is_timeout_exception(self, exception): return isinstance(exception, requests_exceptions.Timeout) def is_conn_open_exception(self, exception): return isinstance(exception, requests_exceptions.ConnectTimeout) def get_default_headers(self, session, provider, allow_overwrite_header, token_provider=None): """Get the default headers that should be added to future requests""" session.default_headers = {} # Add allow-overwrite if configured if allow_overwrite_header: session.default_headers['X-Allow-Overwrite'] = 'true' # Perform the initial session create and get the relevant jsessionid & # X-XSRF-TOKEN for future requests req_data = '' req_headers = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded'} # Insert the JWT in Auth header if using tokens for auth if token_provider: try: token_value = token_provider.get_token() bearer_token = token_provider.get_header_value(token_value) token_header = {"Authorization": bearer_token} session.default_headers.update(token_header) req_headers.update(token_header) except exceptions.BadJSONWebTokenProviderRequest as e: LOG.error("Session create failed for endpoint %s due to " "error in retrieving JSON Web Token: %s", provider.url, e) elif not session.cert_provider: # With client certificate authentication, username and password # may not be provided. # If provided, backend treats these credentials as authentication # and ignores client cert as principal identity indication. req_data = 'j_username=%s&j_password=%s' % (provider.username, provider.password) # Cannot use the certificate at this stage, because it is used for # the certificate generation try: resp = session.request( 'post', provider.url + self.SESSION_CREATE_URL, data=req_data, headers=req_headers) except Exception as e: # Error 403 might be because the backend does not support this for # all versions. LOG.warning("Session create failed for endpoint %s with error %s", provider.url, e) else: if resp.status_code != 200 and resp.status_code != 201: LOG.warning("Session create failed for endpoint %s with " "response %s", provider.url, resp.status_code) # this may will later cause the endpoint to be Down else: for header_name in resp.headers: if self.SET_COOKIE_FIELD.lower() == header_name.lower(): m = re.match('%s=.*?\;' % self.JSESSIONID, # noqa resp.headers[header_name]) if m: session.default_headers[self.COOKIE_FIELD] = ( m.group()) if self.XSRF_TOKEN.lower() == header_name.lower(): session.default_headers[self.XSRF_TOKEN] = ( resp.headers[header_name]) LOG.info("Session create succeeded for endpoint %(url)s with " "headers %(hdr)s", {'url': provider.url, 'hdr': utils.censor_headers(session.default_headers)}) class NSXHTTPAdapter(adapters.HTTPAdapter): def __init__(self, *args, **kwargs): self.thumbprint = kwargs.pop("thumbprint", None) super(NSXHTTPAdapter, self).__init__(*args, **kwargs) def init_poolmanager(self, *args, **kwargs): if self.thumbprint: kwargs["assert_fingerprint"] = self.thumbprint super(NSXHTTPAdapter, self).init_poolmanager(*args, **kwargs) class ClusterHealth(object): """Indicator of overall cluster health. with respect to the connectivity of the clusters managed endpoints. """ # all endpoints are UP GREEN = 'GREEN' # at least 1 endpoint is UP, but 1 or more are DOWN ORANGE = 'ORANGE' # all endpoints are DOWN RED = 'RED' class EndpointState(object): """Tracks the connectivity state for a said endpoint.""" # no UP or DOWN state recorded yet INITIALIZED = 'INITIALIZED' # endpoint has been validate and is good UP = 'UP' # endpoint can't be reached or validated DOWN = 'DOWN' class Provider(object): """Data holder for a provider Which has a unique id a connection URL, and the credential details. """ def __init__(self, provider_id, provider_url, username, password, ca_file, thumbprint=None): self.id = provider_id self.url = provider_url self.username = username self.password = password self.ca_file = ca_file self.thumbprint = thumbprint def __str__(self): return str(self.url) class Endpoint(object): """A single NSX manager endpoint (host). A single NSX manager endpoint (host) which includes related information such as the endpoint's provider, state, etc.. A pool is used to hold connections to the endpoint which are doled out when proxying HTTP methods to the underlying connections. """ def __init__(self, provider, pool): self.provider = provider self.pool = pool self._state = EndpointState.INITIALIZED self._last_updated = datetime.datetime.now() def regenerate_pool(self): self.pool = pools.Pool(min_size=self.pool.min_size, max_size=self.pool.max_size, order_as_stack=True, create=self.pool.create) @property def last_updated(self): return self._last_updated @property def state(self): return self._state def set_state(self, state): if self.state != state: LOG.info("Endpoint '%(ep)s' changing from state" " '%(old)s' to '%(new)s'", {'ep': self.provider, 'old': self.state, 'new': state}) old_state = self._state self._state = state self._last_updated = datetime.datetime.now() return old_state def __str__(self): return "[%s] %s" % (self.state, self.provider) class EndpointConnection(object): """Simple data holder Which contains an endpoint and a connection for that endpoint. """ def __init__(self, endpoint, connection): self.endpoint = endpoint self.connection = connection class ClusteredAPI(object): """Duck types the major HTTP based methods of a requests.Session Such as get(), put(), post(), etc. and transparently proxies those calls to one of its managed NSX manager endpoints. """ _HTTP_VERBS = ['get', 'delete', 'head', 'put', 'post', 'patch', 'create'] def __init__(self, providers, http_provider, min_conns_per_pool=0, max_conns_per_pool=20, keepalive_interval=33): self._http_provider = http_provider self._keepalive_interval = keepalive_interval def _init_cluster(*args, **kwargs): self._init_endpoints(providers, min_conns_per_pool, max_conns_per_pool) _init_cluster() # keep this internal method for reinitialize upon fork # for api workers to ensure each process has its own keepalive # loops + state self._reinit_cluster = _init_cluster def _init_endpoints(self, providers, min_conns_per_pool, max_conns_per_pool): LOG.debug("Initializing API endpoints") def _create_conn(p): def _conn(): return self._http_provider.new_connection(self, p) return _conn self._endpoints = {} for provider in providers: pool = pools.Pool( min_size=min_conns_per_pool, max_size=max_conns_per_pool, order_as_stack=True, create=_create_conn(provider)) endpoint = Endpoint(provider, pool) self._endpoints[provider.id] = endpoint # service requests using round robin self._endpoint_schedule = itertools.cycle(self._endpoints.values()) # duck type to proxy http invocations for method in ClusteredAPI._HTTP_VERBS: setattr(self, method, self._proxy_stub(method)) conns = greenpool.GreenPool() for endpoint in self._endpoints.values(): conns.spawn(self._validate, endpoint) eventlet.sleep(0) while conns.running(): if (self.health == ClusterHealth.GREEN or self.health == ClusterHealth.ORANGE): # only wait for 1 or more endpoints to reduce init time break eventlet.sleep(0.5) for endpoint in self._endpoints.values(): # dynamic loop for each endpoint to ensure connectivity loop = loopingcall.DynamicLoopingCall( self._endpoint_keepalive, endpoint) loop.start(initial_delay=self._keepalive_interval, periodic_interval_max=self._keepalive_interval, stop_on_exception=False) LOG.debug("Done initializing API endpoint(s). " "API cluster health: %s", self.health) def _endpoint_keepalive(self, endpoint): delta = datetime.datetime.now() - endpoint.last_updated if delta.seconds >= self._keepalive_interval: # TODO(boden): backoff on validation failure self._validate(endpoint) return self._keepalive_interval return self._keepalive_interval - delta.seconds @property def providers(self): return [ep.provider for ep in self._endpoints.values()] @property def endpoints(self): return copy.copy(self._endpoints) @property def http_provider(self): return self._http_provider @property def health(self): down = 0 up = 0 for endpoint in self._endpoints.values(): if endpoint.state != EndpointState.UP: down += 1 else: up += 1 if down == len(self._endpoints): return ClusterHealth.RED return (ClusterHealth.GREEN if up == len(self._endpoints) else ClusterHealth.ORANGE) def _validate(self, endpoint): try: with endpoint.pool.item() as conn: self._http_provider.validate_connection(self, endpoint, conn) endpoint.set_state(EndpointState.UP) except exceptions.ClientCertificateNotTrusted: LOG.warning("Failed to validate API cluster endpoint " "'%(ep)s' due to untrusted client certificate", {'ep': endpoint}) # regenerate connection pool based on new certificate endpoint.regenerate_pool() except exceptions.BadXSRFToken: LOG.warning("Failed to validate API cluster endpoint " "'%(ep)s' due to expired XSRF token", {'ep': endpoint}) # regenerate connection pool based on token endpoint.regenerate_pool() except Exception as e: endpoint.set_state(EndpointState.DOWN) LOG.warning("Failed to validate API cluster endpoint " "'%(ep)s' due to: %(err)s", {'ep': endpoint, 'err': e}) def _select_endpoint(self): """Return an endpoint in UP state. Go over all endpoint and return the next one which is UP If all endpoints are currently DOWN, depending on the configuration retry it until one is UP (or max retries exceeded) """ def _select_endpoint_internal(refresh=False): # check for UP state until exhausting all endpoints seen, total = 0, len(self._endpoints.values()) while seen < total: endpoint = next(self._endpoint_schedule) if refresh: self._validate(endpoint) if endpoint.state == EndpointState.UP: return endpoint seen += 1 @utils.retry_upon_none_result(self.nsxlib_config.max_attempts) def _select_endpoint_internal_with_retry(): # redo endpoint selection with refreshing states return _select_endpoint_internal(refresh=True) # First attempt to get an UP endpoint endpoint = _select_endpoint_internal() if endpoint or not self.nsxlib_config.cluster_unavailable_retry: return endpoint # Retry the selection while refreshing the endpoints state try: return _select_endpoint_internal_with_retry() except tenacity.RetryError: # exhausted number of retries return None def endpoint_for_connection(self, conn): # check all endpoint pools for endpoint in self._endpoints.values(): if (conn in endpoint.pool.channel.queue or conn in endpoint.pool.free_items): return endpoint @property def cluster_id(self): return ','.join([str(ep.provider.url) for ep in self._endpoints.values()]) @contextlib.contextmanager def connection(self): with self.endpoint_connection() as conn_data: yield conn_data.connection @contextlib.contextmanager def endpoint_connection(self): endpoint = self._select_endpoint() if not endpoint: LOG.debug("All endpoints down for: %s" % [str(ep) for ep in self._endpoints.values()]) # all endpoints are DOWN and will have their next # state updated as per _endpoint_keepalive() raise exceptions.ServiceClusterUnavailable( cluster_id=self.cluster_id) if endpoint.pool.free() == 0: LOG.info("API endpoint %(ep)s at connection " "capacity %(max)s and has %(waiting)s waiting", {'ep': endpoint, 'max': endpoint.pool.max_size, 'waiting': endpoint.pool.waiting()}) # pool.item() will wait if pool has 0 free with endpoint.pool.item() as conn: yield EndpointConnection(endpoint, conn) def _proxy_stub(self, proxy_for): def _call_proxy(url, *args, **kwargs): return self._proxy(proxy_for, url, *args, **kwargs) return _call_proxy def _proxy(self, proxy_for, uri, *args, **kwargs): @utils.retry_upon_none_result(self.nsxlib_config.max_attempts) def _proxy_internal(proxy_for, uri, *args, **kwargs): # proxy http request call to an avail endpoint with self.endpoint_connection() as conn_data: conn = conn_data.connection endpoint = conn_data.endpoint # http conn must support requests style interface do_request = getattr(conn, proxy_for) if not uri.startswith('/'): uri = "/%s" % uri url = "%s%s" % (endpoint.provider.url, uri) try: LOG.debug("API cluster proxy %s %s to %s", proxy_for.upper(), uri, url) # Add the connection default headers if conn.default_headers: kwargs['headers'] = kwargs.get('headers', {}) kwargs['headers'].update(conn.default_headers) # call the actual connection method to do the # http request/response over the wire response = do_request(url, *args, **kwargs) endpoint.set_state(EndpointState.UP) return response except Exception as e: LOG.warning("Request failed due to: %s", e) if (not self._http_provider.is_connection_exception(e) and not self._http_provider.is_timeout_exception(e)): # only trap and retry connection & timeout errors raise e if self._http_provider.is_conn_open_exception(e): # unable to establish new connection - endpoint is # inaccessible endpoint.set_state(EndpointState.DOWN) LOG.info("Connection to %s failed, checking additional " "connections and endpoints" % url) # this might be a result of server closing connection # return None so it will retry upto max_attempts. return _proxy_internal(proxy_for, uri, *args, **kwargs) class NSXClusteredAPI(ClusteredAPI): """Extends ClusteredAPI to get conf values and setup the NSXv3 cluster.""" def __init__(self, nsxlib_config): self.nsxlib_config = nsxlib_config self._http_provider = (nsxlib_config.http_provider or NSXRequestsHTTPProvider()) super(NSXClusteredAPI, self).__init__( self._build_conf_providers(), self._http_provider, max_conns_per_pool=self.nsxlib_config.concurrent_connections, keepalive_interval=self.nsxlib_config.conn_idle_timeout) LOG.debug("Created NSX clustered API with '%s' " "provider", self._http_provider.provider_id) def _build_conf_providers(self): def _schemed_url(uri): uri = uri.strip('/') return urlparse.urlparse( uri if uri.startswith('http') else "%s://%s" % (self._http_provider.default_scheme, uri)) conf_urls = self.nsxlib_config.nsx_api_managers[:] urls = [] providers = [] provider_index = -1 for conf_url in conf_urls: provider_index += 1 conf_url = _schemed_url(conf_url) if conf_url in urls: LOG.warning("'%s' already defined in configuration file. " "Skipping.", urlparse.urlunparse(conf_url)) continue urls.append(conf_url) providers.append( Provider( conf_url.netloc, urlparse.urlunparse(conf_url), self.nsxlib_config.username(provider_index), self.nsxlib_config.password(provider_index), self.nsxlib_config.ca_file(provider_index), self.nsxlib_config.thumbprint(provider_index))) return providers vmware-nsxlib-15.0.6/vmware_nsxlib/v3/resources.py0000664000175000017500000010046013623151571022224 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import netaddr from oslo_log import log from oslo_log import versionutils import requests from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) # TODO(asarfaty): keeping this for backwards compatibility. # core_resources.SwitchingProfileTypeId and # core_resources.PacketAddressClassifier should be used. # This code will be removed in the future. SwitchingProfileTypeId = core_resources.SwitchingProfileTypeId PacketAddressClassifier = core_resources.PacketAddressClassifier class SwitchingProfileTypes(core_resources.SwitchingProfileTypes): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self): versionutils.report_deprecated_feature( LOG, 'resources.SwitchingProfileTypes is deprecated. ' 'Please use core_resources.SwitchingProfileTypes instead.') class WhiteListAddressTypes(core_resources.WhiteListAddressTypes): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self): versionutils.report_deprecated_feature( LOG, 'resources.WhiteListAddressTypes is deprecated. ' 'Please use core_resources.WhiteListAddressTypes instead.') class SwitchingProfile(core_resources.NsxLibSwitchingProfile): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self, rest_client, *args, **kwargs): versionutils.report_deprecated_feature( LOG, 'resources.SwitchingProfile is deprecated. ' 'Please use core_resources.NsxLibSwitchingProfile instead.') super(SwitchingProfile, self).__init__(rest_client) class LogicalPort(utils.NsxLibApiBase): @property def uri_segment(self): return 'logical-ports' @property def resource_type(self): return 'LogicalPort' def _build_body_attrs( self, display_name=None, admin_state=True, tags=None, address_bindings=None, switch_profile_ids=None, attachment=None, description=None, extra_configs=None): tags = tags or [] switch_profile_ids = switch_profile_ids or [] body = {} if tags: body['tags'] = tags if display_name is not None: body['display_name'] = display_name if admin_state is not None: if admin_state: body['admin_state'] = nsx_constants.ADMIN_STATE_UP else: body['admin_state'] = nsx_constants.ADMIN_STATE_DOWN if address_bindings: bindings = [] for binding in address_bindings: address_classifier = { 'ip_address': binding.ip_address, 'mac_address': binding.mac_address } if binding.vlan is not None: address_classifier['vlan'] = int(binding.vlan) bindings.append(address_classifier) body['address_bindings'] = bindings elif address_bindings is not None: body['address_bindings'] = [] if switch_profile_ids: profiles = [] for profile in switch_profile_ids: profiles.append({ 'value': profile.profile_id, 'key': profile.profile_type }) body['switching_profile_ids'] = profiles # Note that attachment could be None, meaning reset it. if attachment is not False: body['attachment'] = attachment if description is not None: body['description'] = description if extra_configs: body['extra_configs'] = extra_configs return body def _prepare_attachment(self, attachment_type, vif_uuid, allocate_addresses, vif_type, parent_vif_id, traffic_tag, app_id, tn_uuid): if attachment_type and vif_uuid: attachment = {'attachment_type': attachment_type, 'id': vif_uuid} if vif_type: context = {'resource_type': nsx_constants.VIF_RESOURCE_TYPE, 'allocate_addresses': allocate_addresses, 'vif_type': vif_type} if parent_vif_id: context['parent_vif_id'] = parent_vif_id context['traffic_tag'] = traffic_tag context['app_id'] = app_id elif tn_uuid: context['transport_node_uuid'] = tn_uuid context['app_id'] = app_id attachment['context'] = context return attachment elif attachment_type is None and vif_uuid is None: return None # reset attachment else: return False # no attachment change def create(self, lswitch_id, vif_uuid, tags=None, attachment_type=nsx_constants.ATTACHMENT_VIF, admin_state=True, name=None, address_bindings=None, parent_vif_id=None, traffic_tag=None, switch_profile_ids=None, vif_type=None, app_id=None, allocate_addresses=nsx_constants.ALLOCATE_ADDRESS_NONE, description=None, tn_uuid=None, extra_configs=None): tags = tags or [] body = {'logical_switch_id': lswitch_id} # NOTE(arosen): If parent_vif_id is specified we need to use # CIF attachment type. attachment = self._prepare_attachment(attachment_type, vif_uuid, allocate_addresses, vif_type, parent_vif_id, traffic_tag, app_id, tn_uuid) body.update(self._build_body_attrs( display_name=name, admin_state=admin_state, tags=tags, address_bindings=address_bindings, switch_profile_ids=switch_profile_ids, attachment=attachment, description=description, extra_configs=extra_configs)) return self.client.create(self.get_path(), body=body) def delete(self, lport_id): self._delete_with_retry('%s?detach=true' % lport_id) def update(self, lport_id, vif_uuid, name=None, admin_state=None, address_bindings=None, switch_profile_ids=None, tags_update=None, tags=None, attachment_type=nsx_constants.ATTACHMENT_VIF, parent_vif_id=None, traffic_tag=None, vif_type=None, app_id=None, allocate_addresses=nsx_constants.ALLOCATE_ADDRESS_NONE, description=None, tn_uuid=None, extra_configs=None, force=False): # Do not allow tags & tags_update at the same call if tags_update and tags: raise exceptions.ManagerError( details=_("Can't support updating logical port %s both with " "tags and tags_update attributes") % lport_id) attachment = self._prepare_attachment(attachment_type, vif_uuid, allocate_addresses, vif_type, parent_vif_id, traffic_tag, app_id, tn_uuid) lport = {} if tags_update is not None: lport['tags_update'] = tags_update lport.update(self._build_body_attrs( display_name=name, admin_state=admin_state, address_bindings=address_bindings, switch_profile_ids=switch_profile_ids, attachment=attachment, description=description, extra_configs=extra_configs, tags=tags)) headers = None if force: headers = {'X-Allow-Overwrite': 'true'} return self._update_resource( self.get_path(lport_id), lport, headers=headers, retry=True) def get_by_attachment(self, attachment_type, attachment_id): """Return all logical port matching the attachment type and Id""" url_suffix = ('?attachment_type=%s&attachment_id=%s' % (attachment_type, attachment_id)) return self.client.get(self.get_path(url_suffix)) def get_by_logical_switch(self, logical_switch_id): """Return all logical port of a logical switch""" url_suffix = '?logical_switch_id=%s' % logical_switch_id return self.client.get(self.get_path(url_suffix)) class LogicalRouter(core_resources.NsxLibLogicalRouter): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self, rest_client, *args, **kwargs): versionutils.report_deprecated_feature( LOG, 'resources.LogicalRouter is deprecated. ' 'Please use core_resources.NsxLibLogicalRouter instead.') super(LogicalRouter, self).__init__(rest_client) class LogicalRouterPort(utils.NsxLibApiBase): @property def uri_segment(self): return 'logical-router-ports' @staticmethod def _get_relay_binding(relay_service_uuid): return {'service_id': {'target_type': 'LogicalService', 'target_id': relay_service_uuid}} def create(self, logical_router_id, display_name, tags, resource_type, logical_port_id, address_groups, edge_cluster_member_index=None, urpf_mode=None, relay_service_uuid=None): body = {'display_name': display_name, 'resource_type': resource_type, 'logical_router_id': logical_router_id, 'tags': tags or []} if address_groups: body['subnets'] = address_groups if resource_type in [nsx_constants.LROUTERPORT_UPLINK, nsx_constants.LROUTERPORT_DOWNLINK, nsx_constants.LROUTERPORT_CENTRALIZED]: body['linked_logical_switch_port_id'] = { 'target_id': logical_port_id} elif resource_type == nsx_constants.LROUTERPORT_LINKONTIER1: body['linked_logical_router_port_id'] = { 'target_id': logical_port_id} elif logical_port_id: body['linked_logical_router_port_id'] = logical_port_id if edge_cluster_member_index: body['edge_cluster_member_index'] = edge_cluster_member_index if urpf_mode: body['urpf_mode'] = urpf_mode if relay_service_uuid: if (self.nsxlib and self.nsxlib.feature_supported( nsx_constants.FEATURE_DHCP_RELAY)): body['service_bindings'] = [self._get_relay_binding( relay_service_uuid)] else: LOG.error("Ignoring relay_service_uuid for router %s port: " "This feature is not supported.", logical_router_id) return self.client.create(self.get_path(), body=body) def update(self, logical_port_id, **kwargs): logical_router_port = {} # special treatment for updating/removing the relay service if 'relay_service_uuid' in kwargs: if kwargs['relay_service_uuid']: if (self.nsxlib and self.nsxlib.feature_supported( nsx_constants.FEATURE_DHCP_RELAY)): logical_router_port['service_bindings'] = [ self._get_relay_binding( kwargs['relay_service_uuid'])] else: LOG.error("Ignoring relay_service_uuid for router " "port %s: This feature is not supported.", logical_port_id) else: # delete the current one if 'service_bindings' in logical_router_port: logical_router_port['service_bindings'] = [] del kwargs['relay_service_uuid'] for k in kwargs: logical_router_port[k] = kwargs[k] return self._update_resource( self.get_path(logical_port_id), logical_router_port, retry=True) def delete(self, logical_port_id): self._delete_with_retry(logical_port_id) def get_by_lswitch_id(self, logical_switch_id): resource = '?logical_switch_id=%s' % logical_switch_id router_ports = self.client.url_get(self.get_path(resource)) result_count = int(router_ports.get('result_count', "0")) if result_count >= 2: raise exceptions.ManagerError( details=_("Can't support more than one logical router ports " "on same logical switch %s ") % logical_switch_id) elif result_count == 1: return router_ports['results'][0] else: err_msg = (_("Logical router link port not found on logical " "switch %s") % logical_switch_id) raise exceptions.ResourceNotFound( manager=self.client.nsx_api_managers, operation=err_msg) def update_by_lswitch_id(self, logical_router_id, ls_id, **payload): port = self.get_by_lswitch_id(ls_id) return self.update(port['id'], **payload) def delete_by_lswitch_id(self, ls_id): port = self.get_by_lswitch_id(ls_id) self.delete(port['id']) def get_by_router_id(self, logical_router_id): resource = '?logical_router_id=%s' % logical_router_id logical_router_ports = self.client.url_get(self.get_path(resource)) return logical_router_ports['results'] def get_tier1_link_port(self, logical_router_id): logical_router_ports = self.get_by_router_id(logical_router_id) for port in logical_router_ports: if port['resource_type'] == nsx_constants.LROUTERPORT_LINKONTIER1: return port raise exceptions.ResourceNotFound( manager=self.client.nsx_api_managers, operation="get router link port") def get_tier0_uplink_port(self, logical_router_id): logical_router_ports = self.get_by_router_id(logical_router_id) for port in logical_router_ports: if port['resource_type'] == nsx_constants.LROUTERPORT_UPLINK: return port def get_tier0_uplink_subnets(self, logical_router_id): port = self.get_tier0_uplink_port(logical_router_id) if port: return port.get('subnets', []) return [] def get_tier0_uplink_cidrs(self, logical_router_id): # return a list of tier0 uplink ip/prefix addresses subnets = self.get_tier0_uplink_subnets(logical_router_id) cidrs = [] for subnet in subnets: for ip_address in subnet.get('ip_addresses'): cidrs.append('%s/%s' % (ip_address, subnet.get('prefix_length'))) return cidrs def get_tier0_uplink_ips(self, logical_router_id): # return a list of tier0 uplink ip addresses subnets = self.get_tier0_uplink_subnets(logical_router_id) ips = [] for subnet in subnets: for ip_address in subnet.get('ip_addresses'): ips.append(ip_address) return ips class MetaDataProxy(core_resources.NsxLibMetadataProxy): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self, rest_client, *args, **kwargs): versionutils.report_deprecated_feature( LOG, 'resources.MetaDataProxy is deprecated. ' 'Please use core_resources.NsxLibMetadataProxy instead.') super(MetaDataProxy, self).__init__(rest_client) class DhcpProfile(core_resources.NsxLibDhcpProfile): # TODO(asarfaty): keeping this for backwards compatibility. # This code will be removed in the future. def __init__(self, rest_client, *args, **kwargs): versionutils.report_deprecated_feature( LOG, 'resources.DhcpProfile is deprecated. ' 'Please use core_resources.NsxLibDhcpProfile instead.') super(DhcpProfile, self).__init__(rest_client) class LogicalDhcpServer(utils.NsxLibApiBase): def get_dhcp_opt_code(self, name): return utils.get_dhcp_opt_code(name) @property def uri_segment(self): return 'dhcp/servers' @property def resource_type(self): return 'LogicalDhcpServer' def _construct_server(self, body, dhcp_profile_id=None, server_ip=None, name=None, dns_nameservers=None, domain_name=None, gateway_ip=False, options=None, tags=None): if name: body['display_name'] = name if dhcp_profile_id: body['dhcp_profile_id'] = dhcp_profile_id if server_ip: body['ipv4_dhcp_server']['dhcp_server_ip'] = server_ip if dns_nameservers is not None: # Note that [] is valid for dns_nameservers, means deleting it. body['ipv4_dhcp_server']['dns_nameservers'] = dns_nameservers if domain_name: body['ipv4_dhcp_server']['domain_name'] = domain_name if gateway_ip is not False: # Note that None is valid for gateway_ip, means deleting it. body['ipv4_dhcp_server']['gateway_ip'] = gateway_ip if options: body['ipv4_dhcp_server']['options'] = options if tags: body['tags'] = tags def create(self, dhcp_profile_id, server_ip, name=None, dns_nameservers=None, domain_name=None, gateway_ip=False, options=None, tags=None): body = {'ipv4_dhcp_server': {}} self._construct_server(body, dhcp_profile_id, server_ip, name, dns_nameservers, domain_name, gateway_ip, options, tags) return self.client.create(self.get_path(), body=body) def update(self, uuid, dhcp_profile_id=None, server_ip=None, name=None, dns_nameservers=None, domain_name=None, gateway_ip=False, options=None, tags=None): body = {'ipv4_dhcp_server': {}} self._construct_server(body, dhcp_profile_id, server_ip, name, dns_nameservers, domain_name, gateway_ip, options, tags) return self._update_with_retry(uuid, body) def create_binding(self, server_uuid, mac, ip, hostname=None, lease_time=None, options=None, gateway_ip=False): body = {'mac_address': mac, 'ip_address': ip} if hostname: body['host_name'] = hostname if lease_time: body['lease_time'] = lease_time if options: body['options'] = options if gateway_ip is not False: # Note that None is valid for gateway_ip, means deleting it. body['gateway_ip'] = gateway_ip url = "%s/static-bindings" % server_uuid return self.client.url_post(self.get_path(url), body) def get_binding(self, server_uuid, binding_uuid): url = "%s/static-bindings/%s" % (server_uuid, binding_uuid) return self.get(url) def update_binding(self, server_uuid, binding_uuid, **kwargs): body = {} body.update(kwargs) url = "%s/static-bindings/%s" % (server_uuid, binding_uuid) self._update_resource(self.get_path(url), body, retry=True) def delete_binding(self, server_uuid, binding_uuid): url = "%s/static-bindings/%s" % (server_uuid, binding_uuid) return self.delete(url) class IpPool(utils.NsxLibApiBase): @property def uri_segment(self): return 'pools/ip-pools' @property def resource_type(self): return 'IpPool' def _generate_ranges(self, cidr, gateway_ip): """Create list of ranges from the given cidr. Ignore the gateway_ip, if defined """ ip_set = netaddr.IPSet(netaddr.IPNetwork(cidr)) if gateway_ip: ip_set.remove(gateway_ip) return [{"start": str(r[0]), "end": str(r[-1])} for r in ip_set.iter_ipranges()] def create(self, cidr, allocation_ranges=None, display_name=None, description=None, gateway_ip=None, dns_nameservers=None, tags=None): """Create an IpPool. Arguments: cidr: (required) allocation_ranges: (optional) a list of dictionaries, each with 'start' and 'end' keys, and IP values. If None: the cidr will be used to create the ranges, excluding the gateway. display_name: (optional) description: (optional) gateway_ip: (optional) dns_nameservers: (optional) list of addresses """ if not cidr: raise exceptions.InvalidInput(operation="IP Pool create", arg_name="cidr", arg_val=cidr) if not allocation_ranges: # generate ranges from (cidr - gateway) allocation_ranges = self._generate_ranges(cidr, gateway_ip) subnet = {"allocation_ranges": allocation_ranges, "cidr": cidr} if gateway_ip: subnet["gateway_ip"] = gateway_ip if dns_nameservers: subnet["dns_nameservers"] = dns_nameservers body = {"subnets": [subnet]} if description: body["description"] = description if display_name: body["display_name"] = display_name if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) def delete(self, pool_id, force=False): url = pool_id if force: url += '?force=%s' % force return self.client.delete(self.get_path(url)) def _update_param_in_pool(self, args_dict, key, pool_data): # update the arg only if it exists in the args dictionary if key in args_dict: if args_dict[key]: pool_data[key] = args_dict[key] else: # remove the current value del pool_data[key] def update(self, pool_id, **kwargs): """Update the given attributes in the current pool configuration.""" # Get the current pool, and remove irrelevant fields pool = self.get(pool_id) for key in ["resource_type", "_create_time", "_create_user" "_last_modified_user", "_last_modified_time"]: pool.pop(key, None) # update only the attributes in kwargs self._update_param_in_pool(kwargs, 'display_name', pool) self._update_param_in_pool(kwargs, 'description', pool) self._update_param_in_pool(kwargs, 'tags', pool) self._update_param_in_pool(kwargs, 'gateway_ip', pool["subnets"][0]) self._update_param_in_pool(kwargs, 'dns_nameservers', pool["subnets"][0]) self._update_param_in_pool(kwargs, 'allocation_ranges', pool["subnets"][0]) self._update_param_in_pool(kwargs, 'cidr', pool["subnets"][0]) return self.client.update(self.get_path(pool_id), pool) def allocate(self, pool_id, ip_addr=None, display_name=None, tags=None): """Allocate an IP from a pool.""" # Note: Currently the backend does not support allocation of a # specific IP, so an exception will be raised by the backend. # Depending on the backend version, this may be allowed in the future url = "%s?action=ALLOCATE" % pool_id body = {"allocation_id": ip_addr} if tags is not None: body['tags'] = tags if display_name is not None: body['display_name'] = display_name return self.client.url_post(self.get_path(url), body=body) def release(self, pool_id, ip_addr): """Release an IP back to a pool.""" url = "%s?action=RELEASE" % pool_id body = {"allocation_id": ip_addr} return self.client.url_post(self.get_path(url), body=body) def get_allocations(self, pool_id): """Return information about the allocated IPs in the pool.""" url = "%s/allocations" % pool_id return self.client.url_get(self.get_path(url)) class NodeHttpServiceProperties(utils.NsxLibApiBase): @property def uri_segment(self): return 'node/services/http' @property def resource_type(self): return 'NodeHttpServiceProperties' def get_properties(self): return self.client.get(self.get_path()) def get_rate_limit(self): if (self.nsxlib and not self.nsxlib.feature_supported( nsx_constants.FEATURE_RATE_LIMIT)): msg = (_("Rate limit is not supported by NSX version %s") % self.nsxlib.get_version()) raise exceptions.ManagerError(details=msg) properties = self.get_properties() return properties.get('service_properties', {}).get( 'client_api_rate_limit') def update_rate_limit(self, value): """update the NSX rate limit. default value is 40. 0 means no limit""" if (self.nsxlib and not self.nsxlib.feature_supported( nsx_constants.FEATURE_RATE_LIMIT)): msg = (_("Rate limit is not supported by NSX version %s") % self.nsxlib.get_version()) raise exceptions.ManagerError(details=msg) properties = self.get_properties() if 'service_properties' in properties: properties['service_properties'][ 'client_api_rate_limit'] = int(value) # update the value using a PUT command, which is expected to return 202 expected_results = [requests.codes.accepted] self.client.update(self.uri_segment, properties, expected_results=expected_results) # restart the http service using POST, which is expected to return 202 restart_url = self.uri_segment + '?action=restart' self.client.create(restart_url, expected_results=expected_results) def delete(self, uuid): """Not supported""" msg = _("Delete is not supported for %s") % self.uri_segment raise exceptions.ManagerError(details=msg) def get(self, uuid): """Not supported""" msg = _("Get is not supported for %s") % self.uri_segment raise exceptions.ManagerError(details=msg) def list(self): """Not supported""" msg = _("List is not supported for %s") % self.uri_segment raise exceptions.ManagerError(details=msg) def find_by_display_name(self, display_name): """Not supported""" msg = _("Find is not supported for %s") % self.uri_segment raise exceptions.ManagerError(details=msg) class NsxlibClusterNodesConfig(utils.NsxLibApiBase): @property def uri_segment(self): return 'cluster/nodes' @property def resource_type(self): return 'ClusterNodeConfig' def delete(self, uuid): """Not supported""" msg = _("Delete is not supported for %s") % self.uri_segment raise exceptions.ManagerError(details=msg) def get_managers_ips(self): manager_ips = [] nodes_config = self.client.get(self.get_path())['results'] for node in nodes_config: if 'manager_role' in node: manager_conf = node['manager_role'] if 'api_listen_addr' in manager_conf: list_addr = manager_conf['mgmt_cluster_listen_addr'] ip = list_addr['ip_address'] if ip != '127.0.0.1': manager_ips.append(list_addr['ip_address']) return manager_ips class NsxlibHostSwitchProfiles(utils.NsxLibApiBase): @property def uri_segment(self): return 'host-switch-profiles' @property def resource_type(self): return 'UplinkHostSwitchProfile' class Inventory(utils.NsxLibApiBase): """REST APIs to support inventory service.""" RESOURCES_PATH = {"ContainerCluster": "container-clusters", "ContainerProject": "container-projects", "ContainerApplication": "container-applications", "ContainerApplicationInstance": "container-application-instances", "ContainerClusterNode": "container-cluster-nodes", "ContainerNetworkPolicy": "container-network-policies", "ContainerIngressPolicy": "container-ingress-policies"} SEGMENT_URI = 'fabric' SEGMENT_URI_FOR_UPDATE = 'inventory/container' @property def uri_segment(self): # only serves for get, list and delete. For batch update, # another base url is used. The reason is update API is internal. return self.SEGMENT_URI @property def resource_type(self): return 'Inventory' def update(self, cluster_id, updates): """This method supports multiple updates in a batching way.""" items = [] for update_type, update_object in updates: item = {} item["object_update_type"] = update_type item["container_object"] = update_object items.append(item) body = {"container_inventory_objects": items} request_url = "%s/%s?action=updates" % ( self.SEGMENT_URI_FOR_UPDATE, cluster_id) return self.client.url_post(request_url, body) def get(self, resource_type, resource_id): if not resource_type: msg = "null resource type is not supported" raise exceptions.ResourceNotFound(details=msg) request_url = "%s/%s" % ( self.get_path(self._get_path_for_resource(resource_type)), resource_id) return self.client.url_get(request_url) def list(self, cluster_id, resource_type): if not resource_type: msg = "null resource type is not supported" raise exceptions.ResourceNotFound(details=msg) request_url = "%s?container_cluster_id=%s" % ( self.get_path(self._get_path_for_resource(resource_type)), cluster_id) return self.client.url_get(request_url) def delete(self, resource_type, resource_id): if not resource_type: msg = "null resource type is not supported" raise exceptions.ResourceNotFound(details=msg) request_url = "%s/%s" % ( self.get_path(self._get_path_for_resource(resource_type)), resource_id) return self.client.url_delete(request_url) def create(self, resource_type, resource): if not resource_type: msg = "null resource type is not supported" raise exceptions.ResourceNotFound(details=msg) request_url = self.get_path( self._get_path_for_resource(resource_type)) return self.client.url_post(request_url, resource) def _get_path_for_resource(self, resource_type): path = self.RESOURCES_PATH.get(resource_type) if not path: msg = "backend resource %s is not supported" % resource_type raise exceptions.ResourceNotFound(details=msg) return path class SystemHealth(utils.NsxLibApiBase): @property def uri_segment(self): return 'systemhealth' @property def resource_type(self): return 'SystemHealth' def create_ncp_status(self, cluster_id, status): url = '/container-cluster/ncp/status' body = {'cluster_id': cluster_id, 'status': status} return self.client.create(self.get_path(url), body=body) vmware-nsxlib-15.0.6/vmware_nsxlib/v3/constants.py0000664000175000017500000001062213623151571022226 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. IPv4_ANY = '0.0.0.0/0' # Protocol names and numbers for Security Groups/Firewalls PROTO_NAME_AH = 'ah' PROTO_NAME_DCCP = 'dccp' PROTO_NAME_EGP = 'egp' PROTO_NAME_ESP = 'esp' PROTO_NAME_GRE = 'gre' PROTO_NAME_ICMP = 'icmp' PROTO_NAME_IGMP = 'igmp' PROTO_NAME_IPIP = 'ipip' PROTO_NAME_IPV6_ENCAP = 'ipv6-encap' PROTO_NAME_IPV6_FRAG = 'ipv6-frag' PROTO_NAME_IPV6_ICMP = 'ipv6-icmp' # For backward-compatibility of security group rule API, we keep the old value # for IPv6 ICMP. It should be clean up in the future. PROTO_NAME_IPV6_ICMP_LEGACY = 'icmpv6' PROTO_NAME_IPV6_NONXT = 'ipv6-nonxt' PROTO_NAME_IPV6_OPTS = 'ipv6-opts' PROTO_NAME_IPV6_ROUTE = 'ipv6-route' PROTO_NAME_OSPF = 'ospf' PROTO_NAME_PGM = 'pgm' PROTO_NAME_RSVP = 'rsvp' PROTO_NAME_SCTP = 'sctp' PROTO_NAME_TCP = 'tcp' PROTO_NAME_UDP = 'udp' PROTO_NAME_UDPLITE = 'udplite' PROTO_NAME_VRRP = 'vrrp' PROTO_NUM_AH = 51 PROTO_NUM_DCCP = 33 PROTO_NUM_EGP = 8 PROTO_NUM_ESP = 50 PROTO_NUM_GRE = 47 PROTO_NUM_ICMP = 1 PROTO_NUM_IGMP = 2 PROTO_NUM_IPIP = 4 PROTO_NUM_IPV6_ENCAP = 41 PROTO_NUM_IPV6_FRAG = 44 PROTO_NUM_IPV6_ICMP = 58 PROTO_NUM_IPV6_NONXT = 59 PROTO_NUM_IPV6_OPTS = 60 PROTO_NUM_IPV6_ROUTE = 43 PROTO_NUM_OSPF = 89 PROTO_NUM_PGM = 113 PROTO_NUM_RSVP = 46 PROTO_NUM_SCTP = 132 PROTO_NUM_TCP = 6 PROTO_NUM_UDP = 17 PROTO_NUM_UDPLITE = 136 PROTO_NUM_VRRP = 112 IP_PROTOCOL_MAP = {PROTO_NAME_AH: PROTO_NUM_AH, PROTO_NAME_DCCP: PROTO_NUM_DCCP, PROTO_NAME_EGP: PROTO_NUM_EGP, PROTO_NAME_ESP: PROTO_NUM_ESP, PROTO_NAME_GRE: PROTO_NUM_GRE, PROTO_NAME_ICMP: PROTO_NUM_ICMP, PROTO_NAME_IGMP: PROTO_NUM_IGMP, PROTO_NAME_IPIP: PROTO_NUM_IPIP, PROTO_NAME_IPV6_ENCAP: PROTO_NUM_IPV6_ENCAP, PROTO_NAME_IPV6_FRAG: PROTO_NUM_IPV6_FRAG, PROTO_NAME_IPV6_ICMP: PROTO_NUM_IPV6_ICMP, # For backward-compatibility of security group rule API PROTO_NAME_IPV6_ICMP_LEGACY: PROTO_NUM_IPV6_ICMP, PROTO_NAME_IPV6_NONXT: PROTO_NUM_IPV6_NONXT, PROTO_NAME_IPV6_OPTS: PROTO_NUM_IPV6_OPTS, PROTO_NAME_IPV6_ROUTE: PROTO_NUM_IPV6_ROUTE, PROTO_NAME_OSPF: PROTO_NUM_OSPF, PROTO_NAME_PGM: PROTO_NUM_PGM, PROTO_NAME_RSVP: PROTO_NUM_RSVP, PROTO_NAME_SCTP: PROTO_NUM_SCTP, PROTO_NAME_TCP: PROTO_NUM_TCP, PROTO_NAME_UDP: PROTO_NUM_UDP, PROTO_NAME_UDPLITE: PROTO_NUM_UDPLITE, PROTO_NAME_VRRP: PROTO_NUM_VRRP} # Supported ICMP types and their codes IPV4_ICMP_TYPES = {0: [0], # Echo reply 3: range(0, 16), # Destination unreachable 4: [0], # Source quench 5: range(0, 4), # Redirect 8: [0], # Echo request 9: [0, 16], # Router advertisement 10: [0], # Router Selection 11: [0, 1], # Time exceeded 12: [0, 1, 2], # Parameter Problem 13: [0], # Timestamp 14: [0], # Timestamp reply 15: [0], # Information request 16: [0], # Information reply 17: [0], # Address mask request 18: [0], # Address mask reply 33: [0], # Where-Are-You 34: [0], # I-Am-Here 35: [0], # Mobile registration request 36: [0], # Mobile registration reply } # Supported strict ICMP types and codes. MP accepts everything except 9:16 IPV4_ICMP_STRICT_TYPES = IPV4_ICMP_TYPES.copy() # Note: replace item 9 as we did a shallow copy IPV4_ICMP_STRICT_TYPES[9] = [0] vmware-nsxlib-15.0.6/vmware_nsxlib/v3/load_balancer.py0000664000175000017500000005253513623151571022771 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class ApplicationProfileTypes(object): """LoadBalancer Application Profile types""" HTTP = "LbHttpProfile" FAST_TCP = "LbFastTcpProfile" FAST_UDP = "LbFastUdpProfile" class PersistenceProfileTypes(object): """LoadBalancer Persistence Profile types""" COOKIE = "LbCookiePersistenceProfile" SOURCE_IP = "LbSourceIpPersistenceProfile" class MonitorTypes(object): """LoadBalancer Monitor types""" HTTP = "LbHttpMonitor" HTTPS = "LbHttpsMonitor" ICMP = "LbIcmpMonitor" PASSIVE = "LbPassiveMonitor" TCP = "LbTcpMonitor" UDP = "LbUdpMonitor" class LoadBalancerBase(utils.NsxLibApiBase): resource = '' @staticmethod def _build_args(body, display_name=None, description=None, tags=None, resource_type=None, **kwargs): if display_name: body['display_name'] = display_name if description: body['description'] = description if tags: body['tags'] = tags if resource_type: body['resource_type'] = resource_type body.update(kwargs) return body def add_to_list(self, resource_id, item_id, item_key): """Add item_id to resource item_key list :param resource_id: resource id, e.g. pool_id, virtual_server_id :param item_id: item to be added to the list :param item_key: item list in the resource, e.g. rule_ids in virtual server :return: client update response """ # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + resource_id body = self.client.get(object_url) if item_key in body: item_list = body[item_key] if item_id not in item_list: item_list.append(item_id) else: LOG.error('Item %s is already in resource %s', item_id, item_key) return body else: item_list = [item_id] body[item_key] = item_list return self.client.update(object_url, body) return do_update() def remove_from_list(self, resource_id, item_id, item_key): """Remove item_id from resource item_key list :param resource_id: resource id, e.g. pool_id, virtual_server_id :param item_id: item to be removed from the list :param item_key: item list in the resource, e.g. rule_ids in virtual server :return: client update response """ # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + resource_id body = self.client.get(object_url) item_list = body.get(item_key) if item_list and item_id in item_list: item_list.remove(item_id) body[item_key] = item_list return self.client.update(object_url, body) else: ops = ('removing item %s from resource %s %s as it is not in ' 'the list', item_id, item_key, item_list) raise nsxlib_exc.ResourceNotFound( manager=self.client.nsx_api_managers, operation=ops) return do_update() def create(self, display_name=None, description=None, tags=None, resource_type=None, **kwargs): orig_body = {} body = self._build_args(orig_body, display_name, description, tags, resource_type, **kwargs) return self.client.create(self.resource, body) def list(self): return self.client.list(resource=self.resource) def get(self, object_id): object_url = self.resource + '/' + object_id return self.client.get(object_url) def update(self, object_id, display_name=None, description=None, tags=None, resource_type=None, **kwargs): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + object_id orig_body = self.client.get(object_url) body = self._build_args(orig_body, display_name, description, tags, resource_type, **kwargs) return self.client.update(object_url, body) return do_update() def delete(self, object_id): object_url = self.resource + '/' + object_id return self.client.delete(object_url) class ApplicationProfile(LoadBalancerBase): resource = 'loadbalancer/application-profiles' @staticmethod def _build_args(body, display_name=None, description=None, tags=None, resource_type=None, **kwargs): if display_name: body['display_name'] = display_name if description: body['description'] = description if tags: body['tags'] = tags if resource_type is None: return body if resource_type in [ApplicationProfileTypes.HTTP, ApplicationProfileTypes.FAST_TCP, ApplicationProfileTypes.FAST_UDP]: body['resource_type'] = resource_type extra_args = ['idle_timeout'] if resource_type == ApplicationProfileTypes.HTTP: extra_args.extend( ['http_redirect_to', 'http_redirect_to_https', 'ntlm', 'request_body_size', 'request_header_size', 'response_header_size', 'response_timeout', 'x_forwarded_for']) elif resource_type == ApplicationProfileTypes.FAST_TCP: extra_args.extend( ['close_timeout', 'ha_flow_mirroring_enabled']) elif resource_type == ApplicationProfileTypes.FAST_UDP: extra_args.extend(['flow_mirroring_enabled']) return utils.build_extra_args(body, extra_args, **kwargs) else: raise nsxlib_exc.InvalidInput( operation='create_application_profile', arg_val=resource_type, arg_name='resource_type') class PersistenceProfile(LoadBalancerBase): resource = 'loadbalancer/persistence-profiles' @staticmethod def _build_args(body, display_name=None, description=None, tags=None, resource_type=None, **kwargs): if display_name: body['display_name'] = display_name if description: body['description'] = description if tags: body['tags'] = tags if resource_type == PersistenceProfileTypes.COOKIE: body['resource_type'] = resource_type extra_args = ['cookie_domain', 'cookie_fallback', 'cookie_garble', 'cookie_mode', 'cookie_name', 'cookie_path', 'cookie_time'] return utils.build_extra_args(body, extra_args, **kwargs) elif resource_type == PersistenceProfileTypes.SOURCE_IP: body['resource_type'] = resource_type extra_args = ['ha_persistence_mirroring_enabled', 'purge', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) else: raise nsxlib_exc.InvalidInput( operation='create_persistence_profile', arg_val=resource_type, arg_name='resource_type') class Rule(LoadBalancerBase): resource = 'loadbalancer/rules' class ClientSslProfile(LoadBalancerBase): resource = 'loadbalancer/client-ssl-profiles' class ServerSslProfile(LoadBalancerBase): resource = 'loadbalancer/server-ssl-profiles' class Monitor(LoadBalancerBase): resource = 'loadbalancer/monitors' @staticmethod def _build_args(body, display_name=None, description=None, tags=None, resource_type=None, **kwargs): if display_name: body['display_name'] = display_name if description: body['description'] = description if tags: body['tags'] = tags if resource_type == MonitorTypes.HTTP: body['resource_type'] = resource_type extra_args = ['fall_count', 'interval', 'monitor_port', 'request_body', 'request_method', 'request_url', 'request_version', 'response_body', 'response_status_codes', 'rise_count', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) elif resource_type == MonitorTypes.HTTPS: body['resource_type'] = resource_type extra_args = ['certificate_chain_depth', 'ciphers', 'client_certificate_id', 'fall_count', 'interval', 'monitor_port', 'protocols', 'request_body', 'request_method', 'request_url', 'request_version', 'response_body', 'response_status_codes', 'rise_count', 'server_auth', 'server_auth_ca_ids', 'server_auth_crl_ids', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) elif resource_type == MonitorTypes.ICMP: body['resource_type'] = resource_type extra_args = ['data_length', 'fall_count', 'interval', 'monitor_port', 'rise_count', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) elif resource_type == MonitorTypes.PASSIVE: body['resource_type'] = resource_type extra_args = ['max_fails', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) elif (resource_type == MonitorTypes.TCP or resource_type == MonitorTypes.UDP): body['resource_type'] = resource_type extra_args = ['fall_count', 'interval', 'monitor_port', 'receive', 'rise_count', 'send', 'timeout'] return utils.build_extra_args(body, extra_args, **kwargs) else: raise nsxlib_exc.InvalidInput( operation='create_monitor', arg_val=resource_type, arg_name='resource_type') class Pool(LoadBalancerBase): resource = 'loadbalancer/pools' def update_pool_with_members(self, pool_id, members): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + pool_id body = self.client.get(object_url) body['members'] = members return self.client.update(object_url, body) return do_update() def add_monitor_to_pool(self, pool_id, monitor_id): self.add_to_list(pool_id, monitor_id, 'active_monitor_ids') def remove_monitor_from_pool(self, pool_id, monitor_id): self.remove_from_list(pool_id, monitor_id, 'active_monitor_ids') class VirtualServer(LoadBalancerBase): resource = 'loadbalancer/virtual-servers' def update_virtual_server_with_pool(self, virtual_server_id, pool_id): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + virtual_server_id body = self.client.get(object_url) body['pool_id'] = pool_id return self.client.update(object_url, body) return do_update() def update_virtual_server_with_profiles(self, virtual_server_id, application_profile_id=None, persistence_profile_id=None, ip_protocol=None): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + virtual_server_id body = self.client.get(object_url) if application_profile_id: body['application_profile_id'] = application_profile_id if persistence_profile_id: body['persistence_profile_id'] = persistence_profile_id # In case the application profile is updated and its protocol # is updated as well, backend requires us to pass the new # protocol in the virtual server body. if ip_protocol: body['ip_protocol'] = ip_protocol return self.client.update(object_url, body) return do_update() def update_virtual_server_with_vip(self, virtual_server_id, vip): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + virtual_server_id body = self.client.get(object_url) body['ip_address'] = vip return self.client.update(object_url, body) return do_update() def add_rule(self, vs_id, rule_id): self.add_to_list(vs_id, rule_id, 'rule_ids') def remove_rule(self, vs_id, rule_id): self.remove_from_list(vs_id, rule_id, 'rule_ids') def add_client_ssl_profile_binding(self, virtual_server_id, ssl_profile_id, default_certificate_id, sni_certificate_ids=None, **kwargs): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): binding = {'ssl_profile_id': ssl_profile_id, 'default_certificate_id': default_certificate_id} if sni_certificate_ids: binding.update({'sni_certificate_ids': sni_certificate_ids}) valid_args = ['client_auth_ca_ids', 'client_auth_crl_ids', 'certificate_chain_depth', 'client_auth'] # Remove the args that is not in the valid_args list or the # keyword argument doesn't have value. for arg in kwargs: if arg in valid_args and kwargs.get(arg): binding[arg] = kwargs.get(arg) object_url = self.resource + '/' + virtual_server_id body = self.client.get(object_url) body['client_ssl_profile_binding'] = binding return self.client.update(object_url, body) return do_update() def add_server_ssl_profile_binding(self, virtual_server_id, ssl_profile_id, **kwargs): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): binding = {'ssl_profile_id': ssl_profile_id} valid_args = ['server_auth_ca_ids', 'server_auth_crl_ids', 'certificate_chain_depth', 'server_auth', 'client_certificate_id'] # Remove the args that is not in the valid_args list or the # keyword argument doesn't have value. for arg in kwargs: if arg in valid_args and kwargs.get(arg): binding[arg] = kwargs[arg] object_url = self.resource + '/' + virtual_server_id body = self.client.get(object_url) body['server_ssl_profile_binding'] = binding return self.client.update(object_url, body) return do_update() class Service(LoadBalancerBase): resource = 'loadbalancer/services' def _build_args(self, body, display_name=None, description=None, tags=None, resource_type=None, **kwargs): if display_name: body['display_name'] = display_name if description: body['description'] = description if tags: body['tags'] = tags if resource_type: body['resource_type'] = resource_type if ('relax_scale_validation' in kwargs and not self.nsxlib.feature_supported( nsx_constants.FEATURE_RELAX_SCALE_VALIDATION)): kwargs.pop('relax_scale_validation') LOG.warning("Ignoring relax_scale_validation for new " "lb service %s: this feature is not supported.", display_name) body.update(kwargs) return body def update_service_with_virtual_servers(self, service_id, virtual_server_ids): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + service_id body = self.client.get(object_url) body['virtual_server_ids'] = virtual_server_ids return self.client.update(object_url, body) return do_update() def update_service_with_attachment(self, service_id, logical_router_id, tags=None): # Using internal method so we can access max_attempts in the decorator @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): object_url = self.resource + '/' + service_id body = self.client.get(object_url) body['attachment'] = {'target_id': logical_router_id, 'target_type': 'LogicalRouter'} if tags is not None: body['tags'] = tags return self.client.update(object_url, body) return do_update() def add_virtual_server(self, service_id, vs_id): self.add_to_list(service_id, vs_id, 'virtual_server_ids') def remove_virtual_server(self, service_id, vs_id): self.remove_from_list(service_id, vs_id, 'virtual_server_ids') def get_router_lb_service(self, nsx_router_id): lb_services = self.list()['results'] for service in lb_services: if service.get('attachment'): if service['attachment']['target_id'] == nsx_router_id: return service def get_status(self, service_id): object_url = '%s/%s/%s' % (self.resource, service_id, 'status') return self.client.get(object_url) def get_virtual_servers_status(self, service_id): object_url = '%s/%s/%s/%s' % (self.resource, service_id, 'virtual-servers', 'status') return self.client.get(object_url) def get_stats(self, service_id, source='realtime'): object_url = '%s/%s/%s?source=%s' % (self.resource, service_id, 'statistics', source) return self.client.get(object_url) def get_usage(self, service_id): object_url = '%s/%s/%s' % (self.resource, service_id, 'usage') return self.client.get(object_url) class LoadBalancer(object): """This is the class that have all load balancer resource clients""" def __init__(self, client, nsxlib_config=None, nsxlib=None): self.service = Service(client, nsxlib_config, nsxlib) self.virtual_server = VirtualServer(client, nsxlib_config) self.pool = Pool(client, nsxlib_config) self.monitor = Monitor(client, nsxlib_config) self.application_profile = ApplicationProfile(client, nsxlib_config) self.persistence_profile = PersistenceProfile(client, nsxlib_config) self.client_ssl_profile = ClientSslProfile(client, nsxlib_config) self.server_ssl_profile = ServerSslProfile(client, nsxlib_config) self.rule = Rule(client, nsxlib_config) vmware-nsxlib-15.0.6/vmware_nsxlib/v3/__init__.py0000664000175000017500000002650613623151571021761 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from distutils import version from oslo_log import log from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import client from vmware_nsxlib.v3 import cluster_management from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import lib from vmware_nsxlib.v3 import load_balancer from vmware_nsxlib.v3 import native_dhcp from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import resources from vmware_nsxlib.v3 import router from vmware_nsxlib.v3 import security from vmware_nsxlib.v3 import trust_management from vmware_nsxlib.v3 import utils from vmware_nsxlib.v3 import vpn_ipsec LOG = log.getLogger(__name__) class NsxLib(lib.NsxLibBase): def init_api(self): self.port_mirror = core_resources.NsxLibPortMirror( self.client, self.nsxlib_config, nsxlib=self) self.bridge_endpoint = core_resources.NsxLibBridgeEndpoint( self.client, self.nsxlib_config, nsxlib=self) self.bridge_endpoint_profile = ( core_resources.NsxLibBridgeEndpointProfile( self.client, self.nsxlib_config, nsxlib=self)) self.logical_switch = core_resources.NsxLibLogicalSwitch( self.client, self.nsxlib_config, nsxlib=self) self.logical_router = core_resources.NsxLibLogicalRouter( self.client, self.nsxlib_config, nsxlib=self) self.switching_profile = core_resources.NsxLibSwitchingProfile( self.client, self.nsxlib_config, nsxlib=self) self.qos_switching_profile = core_resources.NsxLibQosSwitchingProfile( self.client, self.nsxlib_config, nsxlib=self) self.edge_cluster = core_resources.NsxLibEdgeCluster( self.client, self.nsxlib_config, nsxlib=self) self.bridge_cluster = core_resources.NsxLibBridgeCluster( self.client, self.nsxlib_config, nsxlib=self) self.transport_zone = core_resources.NsxLibTransportZone( self.client, self.nsxlib_config, nsxlib=self) self.transport_node = core_resources.NsxLibTransportNode( self.client, self.nsxlib_config, nsxlib=self) self.relay_service = core_resources.NsxLibDhcpRelayService( self.client, self.nsxlib_config, nsxlib=self) self.relay_profile = core_resources.NsxLibDhcpRelayProfile( self.client, self.nsxlib_config, nsxlib=self) self.native_dhcp_profile = core_resources.NsxLibDhcpProfile( self.client, self.nsxlib_config, nsxlib=self) self.native_md_proxy = core_resources.NsxLibMetadataProxy( self.client, self.nsxlib_config, nsxlib=self) self.firewall_section = security.NsxLibFirewallSection( self.client, self.nsxlib_config, nsxlib=self) self.ns_group = security.NsxLibNsGroup( self.client, self.nsxlib_config, self.firewall_section, nsxlib=self) self.native_dhcp = native_dhcp.NsxLibNativeDhcp( self.client, self.nsxlib_config, nsxlib=self) self.ip_block_subnet = core_resources.NsxLibIpBlockSubnet( self.client, self.nsxlib_config, nsxlib=self) self.ip_block = core_resources.NsxLibIpBlock( self.client, self.nsxlib_config, nsxlib=self) self.ip_set = security.NsxLibIPSet( self.client, self.nsxlib_config, nsxlib=self) self.logical_port = resources.LogicalPort( self.client, self.nsxlib_config, nsxlib=self) self.logical_router_port = resources.LogicalRouterPort( self.client, self.nsxlib_config, nsxlib=self) self.dhcp_server = resources.LogicalDhcpServer( self.client, self.nsxlib_config, nsxlib=self) self.ip_pool = resources.IpPool( self.client, self.nsxlib_config, nsxlib=self) self.load_balancer = load_balancer.LoadBalancer( self.client, self.nsxlib_config, nsxlib=self) self.trust_management = trust_management.NsxLibTrustManagement( self.client, self.nsxlib_config) self.router = router.RouterLib( self.logical_router, self.logical_router_port, self) self.virtual_machine = core_resources.NsxLibFabricVirtualMachine( self.client, self.nsxlib_config, nsxlib=self) self.vif = core_resources.NsxLibFabricVirtualInterface( self.client, self.nsxlib_config, nsxlib=self) self.vpn_ipsec = vpn_ipsec.VpnIpSec( self.client, self.nsxlib_config, nsxlib=self) self.http_services = resources.NodeHttpServiceProperties( self.client, self.nsxlib_config, nsxlib=self) self.cluster_nodes = resources.NsxlibClusterNodesConfig( self.client, self.nsxlib_config, nsxlib=self) self.global_routing = core_resources.NsxLibGlobalRoutingConfig( self.client, self.nsxlib_config, nsxlib=self) self.host_switch_profiles = resources.NsxlibHostSwitchProfiles( self.client, self.nsxlib_config, nsxlib=self) self.cluster_management = cluster_management.NsxLibClusterManagement( self.client, self.nsxlib_config) # Update tag limits self.tag_limits = self.get_tag_limits() utils.update_tag_limits(self.tag_limits) @property def keepalive_section(self): return 'transport-zones' @property def validate_connection_method(self): """Return a method that will validate the NSX manager status""" def check_manager_status_v1(client, manager_url): """MP healthcheck for Version 2.3 and below""" # Try to get the cluster status silently and with no retries status = client.get('operational/application/status', silent=True, with_retries=False) if (not status or status.get('application_status') != 'WORKING'): msg = _("Manager is not in working state: %s") % status LOG.warning(msg) raise exceptions.ResourceNotFound( manager=manager_url, operation=msg) def check_manager_status_v2(client, manager_url): """MP healthcheck for Version 2.4 and above""" # Try to get the status silently and with no retries status = client.get('reverse-proxy/node/health', silent=True, with_retries=False) if (not status or not status.get('healthy', False)): msg = _("Manager is not in working state: %s") % status LOG.warning(msg) raise exceptions.ResourceNotFound( manager=manager_url, operation=msg) def check_manager_status(client, manager_url): # Decide on the healthcheck by the version (if already initialized) if (self.nsx_version and version.LooseVersion(self.nsx_version) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_4_0)): return check_manager_status_v2(client, manager_url) return check_manager_status_v1(client, manager_url) return check_manager_status def get_version(self): if self.nsx_version: return self.nsx_version node = self.client.get("node") self.nsx_version = node.get('node_version') return self.nsx_version def export_restricted(self): node = self.client.get("node") return node.get('export_type') == 'RESTRICTED' def feature_supported(self, feature): if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_3_0_0)): # features available since 3.0.0 if (feature == nsx_constants.FEATURE_GET_TZ_FROM_SWITCH): return True if (feature == nsx_constants.FEATURE_RELAX_SCALE_VALIDATION): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_5_0)): # features available since 2.5 if (feature == nsx_constants.FEATURE_CONTAINER_CLUSTER_INVENTORY): return True if (feature == nsx_constants.FEATURE_IPV6): return True if (feature == nsx_constants.FEATURE_ENS_WITH_QOS): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_4_0)): # Features available since 2.4 if (feature == nsx_constants.FEATURE_ENS_WITH_SEC): return True if (feature == nsx_constants.FEATURE_ICMP_STRICT): return True if (feature == nsx_constants.FEATURE_ENABLE_STANDBY_RELOCATION): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_3_0)): # Features available since 2.3 if (feature == nsx_constants.FEATURE_ROUTER_ALLOCATION_PROFILE): return True if (feature == nsx_constants.FEATURE_LB_HM_RESPONSE_CODES): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_2_0)): # Features available since 2.2 if (feature == nsx_constants.FEATURE_VLAN_ROUTER_INTERFACE or feature == nsx_constants.FEATURE_IPSEC_VPN or feature == nsx_constants.FEATURE_ON_BEHALF_OF or feature == nsx_constants.FEATURE_RATE_LIMIT or feature == nsx_constants.FEATURE_TRUNK_VLAN or feature == nsx_constants.FEATURE_ROUTER_TRANSPORT_ZONE or feature == nsx_constants.FEATURE_NO_DNAT_NO_SNAT): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_1_0)): # Features available since 2.1 if (feature == nsx_constants.FEATURE_LOAD_BALANCER): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_0_0)): # Features available since 2.0 if (feature == nsx_constants.FEATURE_EXCLUDE_PORT_BY_TAG or feature == nsx_constants.FEATURE_ROUTER_FIREWALL or feature == nsx_constants.FEATURE_DHCP_RELAY): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_1_1_0)): # Features available since 1.1 if (feature == nsx_constants.FEATURE_MAC_LEARNING or feature == nsx_constants.FEATURE_DYNAMIC_CRITERIA): return True return False @property def client_url_prefix(self): return client.NSX3Client.NSX_V1_API_PREFIX vmware-nsxlib-15.0.6/vmware_nsxlib/v3/vpn_ipsec.py0000664000175000017500000003612213623151571022203 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) VPN_IPSEC_PATH = 'vpn/ipsec/' # The following classes define IPSEC NSX constants that are alo relevant to the # policy implementation: class IkeVersionTypes(object): """Supported IKE versions (NSX default is V2)""" IKE_VERSION_V1 = 'IKE_V1' IKE_VERSION_V2 = 'IKE_V2' IKE_VERSION_Flex = 'IKE_FLEX' class EncryptionAlgorithmTypes(object): """Supported encryption algorithms (NSX default is AES_128)""" ENCRYPTION_ALGORITHM_128 = 'AES_128' ENCRYPTION_ALGORITHM_256 = 'AES_256' ENCRYPTION_ALGORITHM_GCM_128 = 'AES_GCM_128' # only with IKE_V2 ENCRYPTION_ALGORITHM_GCM_192 = 'AES_GCM_192' # only with IKE_V2 ENCRYPTION_ALGORITHM_GCM_256 = 'AES_GCM_256' # only with IKE_V2 class DigestAlgorithmTypes(object): """Supported digest (auth) algorithms (NSX default is SHA2_256)""" DIGEST_ALGORITHM_SHA1 = 'SHA1' DIGEST_ALGORITHM_SHA256 = 'SHA2_256' DIGEST_ALGORITHM_SHA2_384 = 'SHA2_384' DIGEST_ALGORITHM_SHA2_512 = 'SHA2_512' DIGEST_ALGORITHM_GMAC_128 = 'GMAC_128' # only for tunnel profile DIGEST_ALGORITHM_GMAC_192 = 'GMAC_192' # only for tunnel profile DIGEST_ALGORITHM_GMAC_256 = 'GMAC_256' # only for tunnel profile class DHGroupTypes(object): """Supported DH groups for Perfect Forward Secrecy""" DH_GROUP_2 = 'GROUP2' DH_GROUP_5 = 'GROUP5' DH_GROUP_14 = 'GROUP14' DH_GROUP_15 = 'GROUP15' DH_GROUP_16 = 'GROUP16' DH_GROUP_19 = 'GROUP19' DH_GROUP_20 = 'GROUP20' DH_GROUP_21 = 'GROUP21' class EncapsulationModeTypes(object): """Supported encapsulation modes for ipsec tunnel profile""" ENCAPSULATION_MODE_TUNNEL = 'TUNNEL_MODE' class TransformProtocolTypes(object): """Supported transform protocols for ipsec tunnel profile""" TRANSFORM_PROTOCOL_ESP = 'ESP' class AuthenticationModeTypes(object): """Supported authentication modes for ipsec peer endpoint (default PSK)""" AUTH_MODE_PSK = 'PSK' AUTH_MODE_CERT = 'CERTIFICATE' class DpdProfileActionTypes(object): """Supported DPD profile actions""" DPD_PROFILE_ACTION_HOLD = 'HOLD' class DpdProfileTimeoutLimits(object): """Supported DPD timeout range""" DPD_TIMEOUT_MIN = 3 DPD_TIMEOUT_MAX = 360 class IkeSALifetimeLimits(object): """Limits to the allowed SA lifetime in seconds (NSX default is 1 day)""" SA_LIFETIME_MIN = 21600 SA_LIFETIME_MAX = 31536000 class IPsecSALifetimeLimits(object): """Limits to the allowed SA lifetime in seconds (NSX default is 3600)""" SA_LIFETIME_MIN = 900 SA_LIFETIME_MAX = 31536000 class ConnectionInitiationModeTypes(object): """Supported connection initiation mode type""" INITIATION_MODE_INITIATOR = 'INITIATOR' INITIATION_MODE_RESPOND_ONLY = 'RESPOND_ONLY' INITIATION_MODE_ON_DEMAND = 'ON_DEMAND' class IkeLogLevelTypes(object): """Supported service IKE log levels (default ERROR)""" LOG_LEVEL_DEBUG = 'DEBUG' LOG_LEVEL_INFO = 'INFO' LOG_LEVEL_WARN = 'WARN' LOG_LEVEL_ERROR = 'ERROR' class IkeProfile(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNIKEProfile' @property def uri_segment(self): return VPN_IPSEC_PATH + 'ike-profiles' def create(self, name, description=None, encryption_algorithm=None, digest_algorithm=None, ike_version=None, dh_group=None, sa_life_time=None, tags=None): # mandatory parameters body = {'display_name': name} # optional parameters if description: body['description'] = description if encryption_algorithm: body['encryption_algorithms'] = [encryption_algorithm] if digest_algorithm: body['digest_algorithms'] = [digest_algorithm] if ike_version: body['ike_version'] = ike_version if sa_life_time: body['sa_life_time'] = sa_life_time if dh_group: body['dh_groups'] = [dh_group] if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) class IPSecTunnelProfile(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNTunnelProfile' @property def uri_segment(self): return VPN_IPSEC_PATH + 'tunnel-profiles' def create(self, name, description=None, encryption_algorithm=None, digest_algorithm=None, pfs=None, dh_group=None, sa_life_time=None, tags=None): # mandatory parameters body = {'display_name': name} # optional parameters if description: body['description'] = description if encryption_algorithm: body['encryption_algorithms'] = [encryption_algorithm] if digest_algorithm: body['digest_algorithms'] = [digest_algorithm] if sa_life_time: body['sa_life_time'] = sa_life_time if dh_group: body['dh_groups'] = [dh_group] if tags: body['tags'] = tags # Boolean parameters if pfs is not None: body['enable_perfect_forward_secrecy'] = pfs return self.client.create(self.get_path(), body=body) class IPSecDpdProfile(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNDPDProfile' @property def uri_segment(self): return VPN_IPSEC_PATH + 'dpd-profiles' def create(self, name, description=None, enabled=None, timeout=None, tags=None): # mandatory parameters body = {'display_name': name} # optional parameters if description: body['description'] = description if timeout: body['dpd_probe_interval'] = timeout # Boolean parameters if enabled is not None: body['enabled'] = enabled if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) def update(self, profile_id, name=None, description=None, enabled=None, timeout=None, tags=None): body = self.get(profile_id) if name: body['display_name'] = name if description: body['description'] = description if timeout: body['dpd_probe_interval'] = timeout if enabled is not None: body['enabled'] = enabled if tags is not None: body['tags'] = tags return self.client.update(self.get_path(profile_id), body=body) class IPSecPeerEndpoint(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNPeerEndpoint' @property def uri_segment(self): return VPN_IPSEC_PATH + 'peer-endpoints' def create(self, name, peer_address, peer_id, description=None, authentication_mode=None, dpd_profile_id=None, ike_profile_id=None, ipsec_tunnel_profile_id=None, connection_initiation_mode=None, psk=None, tags=None): # mandatory parameters body = {'display_name': name, 'peer_address': peer_address, 'peer_id': peer_id} # optional parameters if description: body['description'] = description if authentication_mode: body['authentication_mode'] = authentication_mode if dpd_profile_id: body['dpd_profile_id'] = dpd_profile_id if ike_profile_id: body['ike_profile_id'] = ike_profile_id if ipsec_tunnel_profile_id: body['ipsec_tunnel_profile_id'] = ipsec_tunnel_profile_id if psk: body['psk'] = psk if connection_initiation_mode: body['connection_initiation_mode'] = connection_initiation_mode if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) def update(self, uuid, name=None, description=None, peer_address=None, peer_id=None, connection_initiation_mode=None, psk=None, tags=None): body = self.get(uuid) if description: body['description'] = description if name: body['display_name'] = name if psk: body['psk'] = psk if connection_initiation_mode: body['connection_initiation_mode'] = connection_initiation_mode if peer_address: body['peer_address'] = peer_address if peer_id: body['peer_id'] = peer_id if tags is not None: body['tags'] = tags return self.client.update(self.get_path(uuid), body=body) class LocalEndpoint(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNLocalEndpoint' @property def uri_segment(self): return VPN_IPSEC_PATH + 'local-endpoints' def create(self, name, local_address, ipsec_vpn_service_id, description=None, local_id=None, certificate_id=None, trust_ca_ids=None, trust_crl_ids=None, tags=None): # mandatory parameters body = {'display_name': name, 'local_address': local_address, 'ipsec_vpn_service_id': {'target_id': ipsec_vpn_service_id}} # optional parameters if description: body['description'] = description if local_id: body['local_id'] = local_id if certificate_id: body['certificate_id'] = certificate_id if trust_ca_ids: body['trust_ca_ids'] = trust_ca_ids if trust_crl_ids: body['trust_crl_ids'] = trust_crl_ids if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) def update(self, uuid, name=None, description=None, local_address=None, ipsec_vpn_service_id=None, local_id=None, certificate_id=None, trust_ca_ids=None, trust_crl_ids=None, tags=None): body = self.get(uuid) if description: body['description'] = description if name: body['display_name'] = name if local_address: body['local_address'] = local_address if ipsec_vpn_service_id: body['ipsec_vpn_service_id'] = {'target_id': ipsec_vpn_service_id} if local_id: body['local_id'] = local_id if certificate_id: body['certificate_id'] = certificate_id if trust_ca_ids: body['trust_ca_ids'] = trust_ca_ids if trust_crl_ids: body['trust_crl_ids'] = trust_crl_ids if tags is not None: body['tags'] = tags return self.client.update(self.get_path(uuid), body=body) class Session(utils.NsxLibApiBase): @property def resource_type(self): return 'PolicyBasedIPSecVPNSession' @property def uri_segment(self): return VPN_IPSEC_PATH + 'sessions' def create(self, name, local_endpoint_id, peer_endpoint_id, policy_rules, description=None, enabled=True, tags=None): # mandatory parameters body = {'display_name': name, 'description': description, 'local_endpoint_id': local_endpoint_id, 'peer_endpoint_id': peer_endpoint_id, 'enabled': enabled, 'resource_type': self.resource_type, 'policy_rules': policy_rules} if tags: body['tags'] = tags return self.client.create(self.get_path(), body=body) def get_rule_obj(self, sources, destinations): src_subnets = [{'subnet': src} for src in sources] dst_subnets = [{'subnet': dst} for dst in destinations] return { 'sources': src_subnets, 'destinations': dst_subnets } def update(self, uuid, name=None, description=None, policy_rules=None, tags=None, enabled=None): body = self.get(uuid) if description: body['description'] = description if name: body['display_name'] = name if name: body['display_name'] = name if policy_rules is not None: body['policy_rules'] = policy_rules if enabled is not None: body['enabled'] = enabled return self.client.update(self.get_path(uuid), body=body) def get_status(self, uuid, source='realtime'): try: return self.client.get( self.get_path(uuid + '/status?source=%s' % source)) except Exception as e: LOG.warning("No status found for session %s: %s", uuid, e) return class Service(utils.NsxLibApiBase): @property def resource_type(self): return 'IPSecVPNService' @property def uri_segment(self): return VPN_IPSEC_PATH + 'services' def create(self, name, logical_router_id, enabled=True, ike_log_level="ERROR", tags=None, bypass_rules=None): # mandatory parameters body = {'display_name': name, 'logical_router_id': logical_router_id} # optional parameters if ike_log_level: body['ike_log_level'] = ike_log_level if enabled is not None: body['enabled'] = enabled if tags: body['tags'] = tags if bypass_rules: body['bypass_rules'] = bypass_rules return self.client.create(self.get_path(), body=body) class VpnIpSec(object): """This is the class that have all vpn ipsec resource clients""" def __init__(self, client, nsxlib_config, nsxlib=None): self.ike_profile = IkeProfile(client, nsxlib_config, nsxlib=nsxlib) self.tunnel_profile = IPSecTunnelProfile(client, nsxlib_config, nsxlib=nsxlib) self.dpd_profile = IPSecDpdProfile(client, nsxlib_config, nsxlib=nsxlib) self.peer_endpoint = IPSecPeerEndpoint(client, nsxlib_config, nsxlib=nsxlib) self.local_endpoint = LocalEndpoint(client, nsxlib_config, nsxlib=nsxlib) self.session = Session(client, nsxlib_config, nsxlib=nsxlib) self.service = Service(client, nsxlib_config, nsxlib=nsxlib) vmware-nsxlib-15.0.6/vmware_nsxlib/v3/core_resources.py0000664000175000017500000012347113623151571023243 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from oslo_log import log from oslo_log import versionutils from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) SwitchingProfileTypeId = collections.namedtuple( 'SwitchingProfileTypeId', 'profile_type, profile_id') PacketAddressClassifier = collections.namedtuple( 'PacketAddressClassifier', 'ip_address, mac_address, vlan') class NsxLibPortMirror(utils.NsxLibApiBase): @property def uri_segment(self): return 'mirror-sessions' @property def resource_type(self): return 'PortMirroringSession' def create_session(self, source_ports, dest_ports, direction, description, name, tags): """Create a PortMirror Session on the backend. :param source_ports: List of UUIDs of the ports whose traffic is to be mirrored. :param dest_ports: List of UUIDs of the ports where the mirrored traffic is to be sent. :param direction: String representing the direction of traffic to be mirrored. [INGRESS, EGRESS, BIDIRECTIONAL] :param description: String representing the description of the session. :param name: String representing the name of the session. :param tags: nsx backend specific tags. """ body = {'direction': direction, 'tags': tags, 'display_name': name, 'description': description, 'mirror_sources': source_ports, 'mirror_destination': dest_ports} return self.client.create(self.get_path(), body) def delete_session(self, mirror_session_id): """Delete a PortMirror session on the backend. :param mirror_session_id: string representing the UUID of the port mirror session to be deleted. """ self.client.delete(self.get_path(mirror_session_id)) class NsxLibBridgeEndpointProfile(utils.NsxLibApiBase): @property def uri_segment(self): return 'bridge-endpoint-profiles' @property def resource_type(self): return 'BridgeEndpointProfile' def create(self, display_name, edge_cluster_id, tags, edge_cluster_member_indexes=None, failover_mode=None): """Create a bridge endpoint profile on the backend. Create a bridge endpoint profile for a given edge cluster. :param display_name: name of the bridge endpoint profile :param edge_cluster_id: identifier of the edge cluster this profile should be associated with. :param tags: tags for the newly created resource. :param edge_cluster_member_indexes: iterable of integers specifying edge cluster members where the bridge endpoints will be created :param failover_mode: failover mode for the profile. Could be either PREEMPTIVE or NON_PREEMPTIVE. """ tags = tags or [] body = {'display_name': display_name, 'tags': tags} if failover_mode: body['failover_mode'] = failover_mode if edge_cluster_member_indexes: # Test for a list of integers try: member_indexes = [int(member_idx) for member_idx in edge_cluster_member_indexes] body['edge_cluster_member_indexes'] = member_indexes except (TypeError, ValueError) as e: LOG.Error("Invalid values for member indexes: %s", e) raise exceptions.InvalidInput( operation='Create BridgeEndpointProfile', arg_val=edge_cluster_member_indexes, arg_name='edge_cluster_member_indexes') return self.client.create(self.get_path(), body) def delete(self, bridge_endpoint_profile_id): """Delete a bridge endpoint profile on the backend. :param bridge_endpoint_profile_id: string representing the UUID of the bridge endpoint profile to be deleted. """ self.client.delete(self.get_path(bridge_endpoint_profile_id)) class NsxLibBridgeEndpoint(utils.NsxLibApiBase): @property def uri_segment(self): return 'bridge-endpoints' @property def resource_type(self): return 'BridgeEndpoint' def create(self, device_name, vlan_transport_zone_id, vlan_id, tags): """Create a bridge endpoint on the backend. Create a bridge endpoint resource on a bridge cluster for the L2 gateway network connection. :param device_name: device_name actually refers to the bridge cluster's UUID. :param vlan_transport_zone_id: identifier of the transport zone id where the endpoint will be created. Mandatory for endpoints on edge clusters. :param vlan_id: integer representing the VLAN segmentation ID. :param tags: nsx backend specific tags. """ body = {'bridge_endpoint_profile_id': device_name, 'vlan_transport_zone_id': vlan_transport_zone_id, 'tags': tags, 'vlan': vlan_id} return self.client.create(self.get_path(), body) def delete(self, bridge_endpoint_id): """Delete a bridge endpoint on the backend. :param bridge_endpoint_id: string representing the UUID of the bridge endpoint to be deleted. """ self.client.delete(self.get_path(bridge_endpoint_id)) class NsxLibLogicalSwitch(utils.NsxLibApiBase): @property def uri_segment(self): return 'logical-switches' @property def resource_type(self): return 'LogicalSwitch' def create(self, display_name, transport_zone_id, tags, replication_mode=nsx_constants.MTEP, admin_state=True, vlan_id=None, ip_pool_id=None, mac_pool_id=None, description=None, trunk_vlan_range=None): operation = "Create logical switch" if display_name: display_name = utils.escape_display_name(display_name) # TODO(salv-orlando): Validate Replication mode and admin_state # NOTE: These checks might be moved to the API client library if one # that performs such checks in the client is available body = {'transport_zone_id': transport_zone_id, 'replication_mode': replication_mode, 'display_name': display_name, 'tags': tags} if admin_state: body['admin_state'] = nsx_constants.ADMIN_STATE_UP else: body['admin_state'] = nsx_constants.ADMIN_STATE_DOWN if trunk_vlan_range: failed = False if (self.nsxlib and self.nsxlib.feature_supported( nsx_constants.FEATURE_TRUNK_VLAN)): if vlan_id is not None: failed = True LOG.error("Failed to create logical switch %(name)s with " "trunk vlan: vlan id %(vlan)s is used.", {'name': display_name, 'vlan': vlan_id}) elif (len(trunk_vlan_range) != 2 or trunk_vlan_range[0] > trunk_vlan_range[1]): failed = True LOG.error("Failed to create logical switch %(name)s with " "trunk vlan: illegal range (%(trunk)s) is used.", {'name': display_name, 'trunk': trunk_vlan_range}) else: body['vlan_trunk_spec'] = {'vlan_ranges': [ {'start': trunk_vlan_range[0], 'end': trunk_vlan_range[1]}]} else: LOG.error("Failed to create logical switch %s with trunk " "vlan: this feature is not supported.", display_name) failed = True if failed: raise exceptions.InvalidInput( operation=operation, arg_val=trunk_vlan_range, arg_name='trunk_vlan_range') elif vlan_id: body['vlan'] = vlan_id if ip_pool_id: body['ip_pool_id'] = ip_pool_id if mac_pool_id: body['mac_pool_id'] = mac_pool_id if description is not None: body['description'] = description return self.client.create(self.get_path(), body) def delete(self, lswitch_id): resource = '%s?detach=true&cascade=true' % lswitch_id self._delete_with_retry(resource) def update(self, lswitch_id, name=None, admin_state=None, tags=None, description=None): body = {} if name: name = utils.escape_display_name(name) body['display_name'] = name if admin_state is not None: if admin_state: body['admin_state'] = nsx_constants.ADMIN_STATE_UP else: body['admin_state'] = nsx_constants.ADMIN_STATE_DOWN if tags is not None: body['tags'] = tags if description is not None: body['description'] = description return self._update_with_retry(lswitch_id, body) class SwitchingProfileTypes(object): IP_DISCOVERY = 'IpDiscoverySwitchingProfile' MAC_LEARNING = 'MacManagementSwitchingProfile' PORT_MIRRORING = 'PortMirroringSwitchingProfile' QOS = 'QosSwitchingProfile' SPOOF_GUARD = 'SpoofGuardSwitchingProfile' SWITCH_SECURITY = 'SwitchSecuritySwitchingProfile' class WhiteListAddressTypes(object): PORT = 'LPORT_BINDINGS' SWITCH = 'LSWITCH_BINDINGS' class NsxLibSwitchingProfile(utils.NsxLibApiBase): @property def uri_segment(self): return 'switching-profiles' def list(self): return self.client.list( self.get_path('?include_system_owned=True')) def create(self, profile_type, display_name=None, description=None, **api_args): body = { 'resource_type': profile_type, 'display_name': display_name or '', 'description': description or '' } body.update(api_args) return self.client.create(self.get_path(), body=body) def update(self, uuid, profile_type, **api_args): body = { 'resource_type': profile_type } body.update(api_args) return self.client.update(self.get_path(uuid), body=body) def create_spoofguard_profile(self, display_name, description, whitelist_ports=False, whitelist_switches=False, tags=None): whitelist_providers = [] if whitelist_ports: whitelist_providers.append(WhiteListAddressTypes.PORT) if whitelist_switches: whitelist_providers.append(WhiteListAddressTypes.SWITCH) return self.create(SwitchingProfileTypes.SPOOF_GUARD, display_name=display_name, description=description, white_list_providers=whitelist_providers, tags=tags or []) def create_dhcp_profile(self, display_name, description, tags=None): dhcp_filter = { 'client_block_enabled': True, 'server_block_enabled': False } rate_limits = { 'enabled': False, 'rx_broadcast': 0, 'tx_broadcast': 0, 'rx_multicast': 0, 'tx_multicast': 0 } bpdu_filter = { 'enabled': True, 'white_list': [] } return self.create(SwitchingProfileTypes.SWITCH_SECURITY, display_name=display_name, description=description, tags=tags or [], dhcp_filter=dhcp_filter, rate_limits=rate_limits, bpdu_filter=bpdu_filter, block_non_ip_traffic=True) def create_mac_learning_profile(self, display_name, description, mac_learning_enabled=True, tags=None): mac_learning = { 'enabled': mac_learning_enabled, 'unicast_flooding_allowed': mac_learning_enabled } return self.create(SwitchingProfileTypes.MAC_LEARNING, display_name=display_name, description=description, tags=tags or [], mac_learning=mac_learning, mac_change_allowed=True) def create_port_mirror_profile(self, display_name, description, direction, destinations, tags=None): return self.create(SwitchingProfileTypes.PORT_MIRRORING, display_name=display_name, description=description, tags=tags or [], direction=direction, destinations=destinations) @classmethod def build_switch_profile_ids(cls, client, *profiles): ids = [] for profile in profiles: if isinstance(profile, str): profile = client.get(profile) if not isinstance(profile, SwitchingProfileTypeId): profile = SwitchingProfileTypeId( profile.get('key', profile.get('resource_type')), profile.get('value', profile.get('id'))) ids.append(profile) return ids class NsxLibQosSwitchingProfile(NsxLibSwitchingProfile): @property def resource_type(self): return 'QosSwitchingProfile' def _build_args(self, tags, name=None, description=None): body = {"resource_type": "QosSwitchingProfile", "tags": tags} return self._update_args( body, name=name, description=description) def _update_args(self, body, name=None, description=None): if name: body["display_name"] = name if description: body["description"] = description return body def _get_resource_type(self, direction): if direction == nsx_constants.EGRESS: return nsx_constants.EGRESS_SHAPING return nsx_constants.INGRESS_SHAPING def _enable_shaping_in_args(self, body, burst_size=None, peak_bandwidth=None, average_bandwidth=None, direction=None): resource_type = self._get_resource_type(direction) for shaper in body["shaper_configuration"]: if shaper["resource_type"] == resource_type: shaper["enabled"] = True if burst_size is not None: shaper["burst_size_bytes"] = burst_size if peak_bandwidth is not None: shaper["peak_bandwidth_mbps"] = peak_bandwidth if average_bandwidth is not None: shaper["average_bandwidth_mbps"] = average_bandwidth break return body def _disable_shaping_in_args(self, body, direction=None): resource_type = self._get_resource_type(direction) for shaper in body["shaper_configuration"]: if shaper["resource_type"] == resource_type: shaper["enabled"] = False shaper["burst_size_bytes"] = 0 shaper["peak_bandwidth_mbps"] = 0 shaper["average_bandwidth_mbps"] = 0 break return body def _update_dscp_in_args(self, body, qos_marking, dscp): body["dscp"] = {} body["dscp"]["mode"] = qos_marking.upper() if dscp: body["dscp"]["priority"] = dscp return body def create(self, tags, name=None, description=None): body = self._build_args(tags, name, description) return self.client.create(self.get_path(), body) def update(self, profile_id, tags, name=None, description=None): # update the relevant fields body = {} body = self._update_args(body, name, description) if tags is not None: body['tags'] = tags return self._update_with_retry(profile_id, body) def update_shaping(self, profile_id, shaping_enabled=False, burst_size=None, peak_bandwidth=None, average_bandwidth=None, qos_marking=None, dscp=None, direction=nsx_constants.INGRESS): versionutils.report_deprecated_feature( LOG, 'NsxLibQosSwitchingProfile.update_shaping is deprecated. ' 'Please use set_profile_shaping instead.') # get the current configuration body = self.get(profile_id) # update the relevant fields if shaping_enabled: body = self._enable_shaping_in_args( body, burst_size=burst_size, peak_bandwidth=peak_bandwidth, average_bandwidth=average_bandwidth, direction=direction) else: body = self._disable_shaping_in_args(body, direction=direction) body = self._update_dscp_in_args(body, qos_marking, dscp) return self._update_with_retry(profile_id, body) def set_profile_shaping(self, profile_id, ingress_bw_enabled=False, ingress_burst_size=None, ingress_peak_bandwidth=None, ingress_average_bandwidth=None, egress_bw_enabled=False, egress_burst_size=None, egress_peak_bandwidth=None, egress_average_bandwidth=None, qos_marking='trusted', dscp=None): """Set all shaping parameters in the QoS switch profile""" # get the current configuration body = self.get(profile_id) # update the ingress shaping if ingress_bw_enabled: body = self._enable_shaping_in_args( body, burst_size=ingress_burst_size, peak_bandwidth=ingress_peak_bandwidth, average_bandwidth=ingress_average_bandwidth, direction=nsx_constants.INGRESS) else: body = self._disable_shaping_in_args( body, direction=nsx_constants.INGRESS) # update the egress shaping if egress_bw_enabled: body = self._enable_shaping_in_args( body, burst_size=egress_burst_size, peak_bandwidth=egress_peak_bandwidth, average_bandwidth=egress_average_bandwidth, direction=nsx_constants.EGRESS) else: body = self._disable_shaping_in_args( body, direction=nsx_constants.EGRESS) # update dscp marking body = self._update_dscp_in_args(body, qos_marking, dscp) # update the profile in the backend return self._update_with_retry(profile_id, body) class NsxLibLogicalRouter(utils.NsxLibApiBase): @property def uri_segment(self): return 'logical-routers' @property def resource_type(self): return 'LogicalRouter' def _delete_resource_by_values(self, resource, skip_not_found=True, strict_mode=True, **kwargs): """Delete resource objects matching the values in kwargs If skip_not_found is True - do not raise an exception if no object was found. If strict_mode is True - warnings will be issued if 0 or >1 objects where deleted. """ resources_list = self.client.list(resource) matched_num = 0 for res in resources_list['results']: if utils.dict_match(kwargs, res): LOG.debug("Deleting %s from resource %s", res, resource) delete_resource = resource + "/" + str(res['id']) self.client.delete(delete_resource) matched_num = matched_num + 1 if matched_num == 0: if skip_not_found: if strict_mode: LOG.warning("No resource in %(res)s matched for values: " "%(values)s", {'res': resource, 'values': kwargs}) else: err_msg = (_("No resource in %(res)s matched for values: " "%(values)s") % {'res': resource, 'values': kwargs}) raise exceptions.ResourceNotFound( manager=self.client.nsx_api_managers, operation=err_msg) elif matched_num > 1 and strict_mode: LOG.warning("%(num)s resources in %(res)s matched for values: " "%(values)s", {'num': matched_num, 'res': resource, 'values': kwargs}) def _validate_nat_rule_action(self, action): if not action: return if action in ['SNAT', 'DNAT', 'NO_NAT', 'REFLEXIVE']: # legal values for all NSX versions return if (action not in ['NO_SNAT', 'NO_DNAT'] or ( self.nsxlib and not self.nsxlib.feature_supported( nsx_constants.FEATURE_NO_DNAT_NO_SNAT))): raise exceptions.InvalidInput( operation="Create/Update NAT rule", arg_val=action, arg_name='action') def add_nat_rule(self, logical_router_id, action, translated_network, source_net=None, dest_net=None, enabled=True, rule_priority=None, match_ports=None, match_protocol=None, match_resource_type=None, bypass_firewall=True, tags=None, display_name=None): self._validate_nat_rule_action(action) resource = 'logical-routers/%s/nat/rules' % logical_router_id body = {'action': action, 'enabled': enabled, 'translated_network': translated_network} if source_net: body['match_source_network'] = source_net if dest_net: body['match_destination_network'] = dest_net if rule_priority: body['rule_priority'] = rule_priority if match_ports: body['match_service'] = { 'resource_type': (match_resource_type or nsx_constants.L4_PORT_SET_NSSERVICE), 'destination_ports': match_ports, 'l4_protocol': match_protocol or nsx_constants.TCP} # nat_pass parameter is supported with the router firewall feature if (self.nsxlib and self.nsxlib.feature_supported( nsx_constants.FEATURE_ROUTER_FIREWALL)): body['nat_pass'] = bypass_firewall elif not bypass_firewall: LOG.error("Ignoring bypass_firewall for router %s nat rule: " "this feature is not supported.", logical_router_id) if tags is not None: body['tags'] = tags if display_name: body['display_name'] = display_name return self.client.create(resource, body) def change_edge_firewall_status(self, logical_router_id, action): resource = 'firewall/status/logical_routers/%s?action=%s' % ( logical_router_id, action) return self.client.create(resource) def add_static_route(self, logical_router_id, dest_cidr, nexthop, tags=None): resource = ('logical-routers/%s/routing/static-routes' % logical_router_id) body = {} if dest_cidr: body['network'] = dest_cidr if nexthop: body['next_hops'] = [{"ip_address": nexthop}] if tags is not None: body['tags'] = tags return self.client.create(resource, body) def delete_static_route(self, logical_router_id, static_route_id): resource = 'logical-routers/%s/routing/static-routes/%s' % ( logical_router_id, static_route_id) self.client.delete(resource) def delete_static_route_by_values(self, logical_router_id, dest_cidr=None, nexthop=None): resource = ('logical-routers/%s/routing/static-routes' % logical_router_id) kwargs = {} if dest_cidr: kwargs['network'] = dest_cidr if nexthop: kwargs['next_hops'] = [{"ip_address": nexthop}] return self._delete_resource_by_values(resource, **kwargs) def delete_nat_rule(self, logical_router_id, nat_rule_id): resource = 'logical-routers/%s/nat/rules/%s' % (logical_router_id, nat_rule_id) self.client.delete(resource) def delete_nat_rule_by_values(self, logical_router_id, strict_mode=True, skip_not_found=True, **kwargs): resource = 'logical-routers/%s/nat/rules' % logical_router_id return self._delete_resource_by_values( resource, skip_not_found=skip_not_found, strict_mode=strict_mode, **kwargs) def list_nat_rules(self, logical_router_id): resource = 'logical-routers/%s/nat/rules' % logical_router_id return self.client.list(resource) def update_nat_rule(self, logical_router_id, nat_rule_id, **kwargs): if 'action' in kwargs: self._validate_nat_rule_action(kwargs['action']) resource = 'logical-routers/%s/nat/rules/%s' % ( logical_router_id, nat_rule_id) return self._update_resource(resource, kwargs, retry=True) def update_advertisement(self, logical_router_id, **kwargs): resource = ('logical-routers/%s/routing/advertisement' % logical_router_id) # ignore load balancing flags if lb is the not supported if (self.nsxlib and not self.nsxlib.feature_supported( nsx_constants.FEATURE_LOAD_BALANCER)): for arg in ('advertise_lb_vip', 'advertise_lb_snat_ip'): if kwargs[arg]: LOG.error("Ignoring %(arg)s for router %(rtr)s " "update_advertisement: This feature is not " "supported.", {'arg': arg, 'rtr': logical_router_id}) del kwargs[arg] return self._update_resource(resource, kwargs, retry=True) def update_advertisement_rules(self, logical_router_id, rules, name_prefix=None, force=False): """Update the router advertisement rules If name_prefix is None, replace the entire list of NSX rules with the new given 'rules'. Else - delete the NSX rules with this name prefix, and add 'rules' to the rest. """ resource = ('logical-routers/%s/routing/advertisement/rules' % logical_router_id) callback = None def update_payload_cbk(revised_payload, requested_payload): # delete rules with this prefix: new_rules = [] for rule in revised_payload['rules']: if (not rule.get('display_name') or not rule['display_name'].startswith(name_prefix)): new_rules.append(rule) # add new rules new_rules.extend(requested_payload['rules']) revised_payload['rules'] = new_rules del requested_payload['rules'] if name_prefix: callback = update_payload_cbk # In case of updating advertisement rule on the logical router # owned by other Principal Entities, need to force the overwrite headers = {'X-Allow-Overwrite': 'true'} if force else None return self._update_resource( resource, {'rules': rules}, retry=True, update_payload_cbk=callback, headers=headers) def get_advertisement_rules(self, logical_router_id): resource = ('logical-routers/%s/routing/advertisement/rules' % logical_router_id) return self.client.get(resource) def get_debug_info(self, logical_router_id): resource = ('logical-routers/%s/debug-info?format=text' % logical_router_id) return self.client.get(resource) def get_transportzone_id(self, logical_router_id): res = self.get_debug_info(logical_router_id) for item in res['componentInfo']: if item['componentType'] in (nsx_constants.ROUTER_TYPE_TIER0_DR, nsx_constants.ROUTER_TYPE_TIER1_DR): if item['transportZoneId']: return item['transportZoneId'][0] LOG.warning('OverlayTransportZone is not yet available on' ' %s.' % (logical_router_id)) def create(self, display_name, tags, edge_cluster_uuid=None, tier_0=False, description=None, transport_zone_id=None, allocation_pool=None, enable_standby_relocation=False, failover_mode=None): # TODO(salv-orlando): If possible do not manage edge clusters # in the main plugin logic. router_type = (nsx_constants.ROUTER_TYPE_TIER0 if tier_0 else nsx_constants.ROUTER_TYPE_TIER1) body = {'display_name': display_name, 'router_type': router_type, 'tags': tags} if edge_cluster_uuid: body['edge_cluster_id'] = edge_cluster_uuid if description: body['description'] = description if transport_zone_id: body['advanced_config'] = { 'transport_zone_id': transport_zone_id} if failover_mode: body['failover_mode'] = failover_mode allocation_profile = {} if allocation_pool: allocation_profile['allocation_pool'] = allocation_pool if (enable_standby_relocation and self.nsxlib and self.nsxlib.feature_supported( nsx_constants.FEATURE_ENABLE_STANDBY_RELOCATION)): allocation_profile[ 'enable_standby_relocation'] = enable_standby_relocation if allocation_profile: body['allocation_profile'] = allocation_profile return self.client.create(self.get_path(), body=body) def delete(self, lrouter_id, force=False): url = lrouter_id if force: url += '?force=%s' % force return self.client.delete(self.get_path(url)) def update(self, lrouter_id, *args, **kwargs): body = {} for arg in kwargs: # special care for transport_zone_id if arg == 'transport_zone_id': body['advanced_config'] = { 'transport_zone_id': kwargs['transport_zone_id']} elif arg == 'enable_standby_relocation': if (self.nsxlib and self.nsxlib.feature_supported( nsx_constants.FEATURE_ENABLE_STANDBY_RELOCATION)): body['allocation_profile'] = { 'enable_standby_relocation': kwargs['enable_standby_relocation']} else: body[arg] = kwargs[arg] return self._update_with_retry(lrouter_id, body) def get_firewall_section_id(self, lrouter_id, router_body=None): """Return the id of the auto created firewall section of the router If the router was already retrieved from the backend it is possible to give it as an input to avoid another backend call. """ if not router_body: router_body = self.get(lrouter_id) if 'firewall_sections' in router_body: firewall_sections = router_body['firewall_sections'] for sec in firewall_sections: if (sec.get('is_valid') and sec.get('target_type') == "FirewallSection"): return firewall_sections[0].get('target_id') def list(self, router_type=None): """List all/by type logical routers.""" if router_type: resource = '%s?router_type=%s' % (self.get_path(), router_type) else: resource = self.get_path() return self.client.list(resource) def get_redistribution(self, logical_router_id): resource = ('logical-routers/%s/routing/redistribution' % logical_router_id) return self.client.get(resource) def get_redistribution_rules(self, logical_router_id): resource = ('logical-routers/%s/routing/redistribution/rules' % logical_router_id) return self.client.get(resource) def update_redistribution_rules(self, logical_router_id, rules): resource = ('logical-routers/%s/routing/redistribution/rules' % logical_router_id) return self._update_resource(resource, {'rules': rules}, retry=True) def get_bgp_config(self, logical_router_id): resource = ('logical-routers/%s/routing/bgp' % logical_router_id) return self.client.get(resource) def get_route_map(self, logical_router_id, route_map_id): resource = ('logical-routers/%s/routing/route-maps/%s' % ( logical_router_id, route_map_id)) return self.client.get(resource) def get_ip_prefix_list(self, logical_router_id, ip_prefix_list_id): resource = ('logical-routers/%s/routing/ip-prefix-lists/%s' % ( logical_router_id, ip_prefix_list_id)) return self.client.get(resource) class NsxLibEdgeCluster(utils.NsxLibApiBase): @property def uri_segment(self): return 'edge-clusters' @property def resource_type(self): return 'EdgeCluster' @property def use_cache_for_get(self): return True def get_transport_nodes(self, uuid): ec = self.get(uuid) members = [] for member in ec.get('members', []): members.append(member.get('transport_node_id')) return members class NsxLibTransportZone(utils.NsxLibApiBase): TRANSPORT_TYPE_VLAN = nsx_constants.TRANSPORT_TYPE_VLAN TRANSPORT_TYPE_OVERLAY = nsx_constants.TRANSPORT_TYPE_OVERLAY HOST_SWITCH_MODE_ENS = nsx_constants.HOST_SWITCH_MODE_ENS HOST_SWITCH_MODE_STANDARD = nsx_constants.HOST_SWITCH_MODE_STANDARD @property def uri_segment(self): return 'transport-zones' @property def resource_type(self): return 'TransportZone' @property def use_cache_for_get(self): return True def get_transport_type(self, uuid): tz = self.get(uuid) return tz['transport_type'] def get_host_switch_mode(self, uuid): tz = self.get(uuid) return tz.get('host_switch_mode', self.HOST_SWITCH_MODE_STANDARD) class NsxLibTransportNode(utils.NsxLibApiBase): @property def uri_segment(self): return 'transport-nodes' @property def resource_type(self): return 'TransportNode' @property def use_cache_for_get(self): return True def get_transport_zones(self, uuid): tn = self.get(uuid) if (self.nsxlib and self.nsxlib.feature_supported( nsx_constants.FEATURE_GET_TZ_FROM_SWITCH)): if (not tn.get('host_switch_spec') or not tn['host_switch_spec'].get('host_switches')): return [] host_switches = tn.get('host_switch_spec').get('host_switches', []) return [ep.get('transport_zone_id') for ep in host_switches[0].get('transport_zone_endpoints', [])] else: return [ep.get('transport_zone_id') for ep in tn.get('transport_zone_endpoints', [])] class NsxLibDhcpProfile(utils.NsxLibApiBase): @property def uri_segment(self): return 'dhcp/server-profiles' @property def resource_type(self): return 'DhcpProfile' class NsxLibDhcpRelayService(utils.NsxLibApiBase): @property def uri_segment(self): return 'dhcp/relays' @property def resource_type(self): return 'DhcpRelayService' @property def use_cache_for_get(self): return True def get_server_ips(self, uuid): # Return the server ips of the relay profile attached to this service service = self.get(uuid) profile_id = service.get('dhcp_relay_profile_id') if profile_id and self.nsxlib: return self.nsxlib.relay_profile.get_server_ips(profile_id) class NsxLibDhcpRelayProfile(utils.NsxLibApiBase): @property def uri_segment(self): return 'dhcp/relay-profiles' @property def resource_type(self): return 'DhcpRelayProfile' @property def use_cache_for_get(self): return True def get_server_ips(self, uuid): profile = self.get(uuid) return profile.get('server_addresses') class NsxLibMetadataProxy(utils.NsxLibApiBase): @property def uri_segment(self): return 'md-proxies' @property def resource_type(self): return 'MetadataProxy' @property def use_cache_for_get(self): return True def update(self, uuid, server_url=None, secret=None, edge_cluster_id=None): body = {} # update the relevant fields if server_url is not None: body['metadata_server_url'] = server_url if secret is not None: body['secret'] = secret if edge_cluster_id is not None: body['edge_cluster_id'] = edge_cluster_id return self._update_with_retry(uuid, body) def get_md_proxy_status(self, attachment_id, logical_switch_id): """Return all matching logical port statuses""" url_suffix = ('/%s/%s/status' % (attachment_id, logical_switch_id)) return self.client.get(self.get_path(url_suffix)) class NsxLibBridgeCluster(utils.NsxLibApiBase): @property def uri_segment(self): return 'bridge-clusters' @property def resource_type(self): return 'BridgeCluster' class NsxLibIpBlockSubnet(utils.NsxLibApiBase): @property def uri_segment(self): return 'pools/ip-subnets' @property def resource_type(self): return 'IpBlockSubnet' def create(self, ip_block_id, subnet_size, allow_overwrite=False): """Create a IP block subnet on the backend.""" body = {'size': subnet_size, 'block_id': ip_block_id} headers = None if allow_overwrite: # In case of manager to policy API resources imports, # a Policy owned Manager IpBlock resource might be needed # to allocate subnet using Manager APIs. headers = {'X-Allow-Overwrite': 'true'} return self.client.create(self.get_path(), body, headers=headers) def delete(self, subnet_id): """Delete a IP block subnet on the backend.""" self.client.delete(self.get_path(subnet_id)) def list(self, ip_block_id): resource = '%s?block_id=%s' % (self.get_path(), ip_block_id) return self.client.get(resource) class NsxLibIpBlock(utils.NsxLibApiBase): @property def uri_segment(self): return 'pools/ip-blocks' @property def resource_type(self): return 'IpBlock' class NsxLibFabricVirtualMachine(utils.NsxLibApiBase): @property def uri_segment(self): return 'fabric/virtual-machines' @property def resource_type(self): return 'VirtualMachine' def get_by_display_name(self, display_name): url = '%s?display_name=%s' % (self.get_path(), display_name) return self.client.get(url) class NsxLibFabricVirtualInterface(utils.NsxLibApiBase): @property def uri_segment(self): return 'fabric/vifs' @property def resource_type(self): return 'VirtualNetworkInterface' def get_by_owner_vm_id(self, owner_vm_id): url = '%s?owner_vm_id=%s' % (self.get_path(), owner_vm_id) return self.client.get(url) class NsxLibGlobalRoutingConfig(utils.NsxLibApiBase): @property def uri_segment(self): return 'global-configs/RoutingGlobalConfig' @property def resource_type(self): return 'RoutingGlobalConfig' def set_l3_forwarding_mode(self, mode): config = self.client.get(self.get_path()) if config['l3_forwarding_mode'] != mode: config['l3_forwarding_mode'] = mode self.client.update(self.get_path(), config) def enable_ipv6(self): return self.set_l3_forwarding_mode('IPV4_AND_IPV6') def disable_ipv6(self): return self.set_l3_forwarding_mode('IPV4_ONLY') vmware-nsxlib-15.0.6/vmware_nsxlib/v3/nsx_constants.py0000664000175000017500000001331613623151571023121 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Admin statuses ADMIN_STATE_UP = "UP" ADMIN_STATE_DOWN = "DOWN" # Replication modes MTEP = "MTEP" # Port attachment types ATTACHMENT_VIF = "VIF" ATTACHMENT_LR = "LOGICALROUTER" ATTACHMENT_DHCP = "DHCP_SERVICE" ATTACHMENT_MDPROXY = "METADATA_PROXY" VIF_RESOURCE_TYPE = "VifAttachmentContext" VIF_TYPE_PARENT = "PARENT" VIF_TYPE_CHILD = "CHILD" ALLOCATE_ADDRESS_NONE = "None" # NSXv3 L2 Gateway constants BRIDGE_ENDPOINT = "BRIDGEENDPOINT" FAILOVER_MODE_PREEMPTIVE = "PREEMPTIVE" FAILOVER_MODE_NONPREEMPTIVE = "NON_PREEMPTIVE" # Router type ROUTER_TYPE_TIER0 = "TIER0" ROUTER_TYPE_TIER1 = "TIER1" ROUTER_TYPE_TIER0_DR = "DISTRIBUTED_ROUTER_TIER0" ROUTER_TYPE_TIER1_DR = "DISTRIBUTED_ROUTER_TIER1" LROUTERPORT_UPLINK = "LogicalRouterUpLinkPort" LROUTERPORT_DOWNLINK = "LogicalRouterDownLinkPort" LROUTERPORT_CENTRALIZED = "LogicalRouterCentralizedServicePort" LROUTERPORT_LINKONTIER0 = "LogicalRouterLinkPortOnTIER0" LROUTERPORT_LINKONTIER1 = "LogicalRouterLinkPortOnTIER1" # NSX service type SERVICE_DHCP = "dhcp" # NSX-V3 Distributed Firewall constants IP_SET = 'IPSet' NSGROUP = 'NSGroup' NSGROUP_COMPLEX_EXP = 'NSGroupComplexExpression' NSGROUP_SIMPLE_EXP = 'NSGroupSimpleExpression' NSGROUP_TAG_EXP = 'NSGroupTagExpression' EXCLUDE_PORT = 'Exclude-Port' # Firewall rule position FW_INSERT_BEFORE = 'insert_before' FW_INSERT_AFTER = 'insert_after' FW_INSERT_BOTTOM = 'insert_bottom' FW_INSERT_TOP = 'insert_top' # firewall rule actions FW_ACTION_ALLOW = 'ALLOW' FW_ACTION_DROP = 'DROP' FW_ACTION_REJECT = 'REJECT' # firewall disable/enable FW_ENABLE = 'enable_firewall' FW_DISABLE = 'disable_firewall' # nsgroup members update actions NSGROUP_ADD_MEMBERS = 'ADD_MEMBERS' NSGROUP_REMOVE_MEMBERS = 'REMOVE_MEMBERS' # NSServices resource types L4_PORT_SET_NSSERVICE = 'L4PortSetNSService' ICMP_TYPE_NSSERVICE = 'ICMPTypeNSService' IP_PROTOCOL_NSSERVICE = 'IPProtocolNSService' # firewall section types FW_SECTION_LAYER3 = 'LAYER3' TARGET_TYPE_LOGICAL_SWITCH = 'LogicalSwitch' TARGET_TYPE_LOGICAL_PORT = 'LogicalPort' TARGET_TYPE_IPV4ADDRESS = 'IPv4Address' TARGET_TYPE_IPV6ADDRESS = 'IPv6Address' # filtering operators and expressions EQUALS = 'EQUALS' IN = 'IN' OUT = 'OUT' IN_OUT = 'IN_OUT' TCP = 'TCP' UDP = 'UDP' ICMPV4 = 'ICMPv4' ICMPV6 = 'ICMPv6' IPV4 = 'IPV4' IPV6 = 'IPV6' IPV4_IPV6 = 'IPV4_IPV6' LOCAL_IP_PREFIX = 'local_ip_prefix' LOGGING = 'logging' # Allowed address pairs NUM_ALLOWED_IP_ADDRESSES = 128 MAX_STATIC_ROUTES = 26 # QoS directions egress/ingress EGRESS = 'egress' INGRESS = 'ingress' EGRESS_SHAPING = 'EgressRateShaper' INGRESS_SHAPING = 'IngressRateShaper' # Transport zone constants TRANSPORT_TYPE_VLAN = 'VLAN' TRANSPORT_TYPE_OVERLAY = 'OVERLAY' HOST_SWITCH_MODE_ENS = 'ENS' HOST_SWITCH_MODE_STANDARD = 'STANDARD' # Error codes returned by the backend ERR_CODE_OBJECT_NOT_FOUND = 202 ERR_CODE_IPAM_POOL_EXHAUSTED = 5109 ERR_CODE_IPAM_SPECIFIC_IP = 5123 ERR_CODE_IPAM_IP_ALLOCATED = 5141 ERR_CODE_IPAM_IP_NOT_IN_POOL = 5110 ERR_CODE_IPAM_RANGE_MODIFY = 5602 ERR_CODE_IPAM_RANGE_DELETE = 5015 ERR_CODE_IPAM_RANGE_SHRUNK = 5016 # backend versions NSX_VERSION_1_1_0 = '1.1.0' NSX_VERSION_2_0_0 = '2.0.0' NSX_VERSION_2_1_0 = '2.1.0' NSX_VERSION_2_2_0 = '2.2.0' NSX_VERSION_2_3_0 = '2.3.0' NSX_VERSION_2_4_0 = '2.4.0' NSX_VERSION_2_5_0 = '2.5.0' NSX_VERSION_3_0_0 = '3.0.0' # Features available depending on the NSX Manager backend version FEATURE_MAC_LEARNING = 'MAC Learning' FEATURE_DYNAMIC_CRITERIA = 'Dynamic criteria' FEATURE_EXCLUDE_PORT_BY_TAG = 'Exclude Port by Tag' FEATURE_ROUTER_FIREWALL = 'Router Firewall' FEATURE_LOAD_BALANCER = 'Load Balancer' FEATURE_LB_HM_RESPONSE_CODES = 'Load Balancer HM response codes' FEATURE_DHCP_RELAY = 'DHCP Relay' FEATURE_VLAN_ROUTER_INTERFACE = 'VLAN Router Interface' FEATURE_RATE_LIMIT = 'Requests Rate Limit' FEATURE_IPSEC_VPN = 'IPSec VPN' FEATURE_ON_BEHALF_OF = 'On Behalf Of' FEATURE_TRUNK_VLAN = 'Trunk Vlan' FEATURE_ROUTER_TRANSPORT_ZONE = 'Router Transport Zone' FEATURE_NO_DNAT_NO_SNAT = 'No DNAT/No SNAT' FEATURE_ENS_WITH_SEC = 'ENS with security' FEATURE_ENS_WITH_QOS = 'ENS with QoS' FEATURE_ICMP_STRICT = 'Strict list of supported ICMP types and codes' FEATURE_ROUTER_ALLOCATION_PROFILE = 'Router Allocation Profile' FEATURE_ENABLE_STANDBY_RELOCATION = 'Router Enable standby relocation' FEATURE_PARTIAL_UPDATES = 'Partial Update with PATCH' FEATURE_RELAX_SCALE_VALIDATION = 'Relax Scale Validation for LbService' FEATURE_SWITCH_HYPERBUS_MODE = 'Switch hyperbus mode with policy API' FEATURE_GET_TZ_FROM_SWITCH = 'Get TZ endpoints from host switch' FEATURE_ROUTE_REDISTRIBUTION_CONFIG = 'Tier0 route redistribution config' # Features available depending on the Policy Manager backend version FEATURE_NSX_POLICY = 'NSX Policy' FEATURE_NSX_POLICY_NETWORKING = 'NSX Policy Networking' FEATURE_NSX_POLICY_MDPROXY = 'NSX Policy Metadata Proxy' FEATURE_NSX_POLICY_DHCP = 'NSX Policy DHCP' FEATURE_NSX_POLICY_GLOBAL_CONFIG = 'NSX Policy Global Config' FEATURE_NSX_POLICY_ADMIN_STATE = 'NSX Policy Segment admin state' # FEATURE available depending on Inventory service backend version FEATURE_CONTAINER_CLUSTER_INVENTORY = 'Container Cluster Inventory' FEATURE_IPV6 = 'IPV6 Forwarding and Address Allocation' vmware-nsxlib-15.0.6/vmware_nsxlib/v3/ns_group_manager.py0000664000175000017500000001363313623151571023545 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_log import log from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants as consts LOG = log.getLogger(__name__) class NSGroupManager(object): """This class assists with NSX integration for Neutron security-groups Each Neutron security-group is associated with NSX NSGroup object. Some specific security policies are the same across all security-groups, i.e - Default drop rule, DHCP. In order to bind these rules to all NSGroups (security-groups), we create a nested NSGroup (which its members are also of type NSGroups) to group the other NSGroups and associate it with these rules. In practice, one NSGroup (nested) can't contain all the other NSGroups, as it has strict size limit. To overcome the limited space challenge, we create several nested groups instead of just one, and we evenly distribute NSGroups (security-groups) between them. By using an hashing function on the NSGroup uuid we determine in which group it should be added, and when deleting an NSGroup (security-group) we use the same procedure to find which nested group it was added. """ NESTED_GROUP_NAME = 'OS Nested Group' NESTED_GROUP_DESCRIPTION = ('OpenStack NSGroup. Do not delete.') def __init__(self, nsxlib, size): self.nsxlib_nsgroup = nsxlib.ns_group self._nested_groups = self._init_nested_groups(size) self._size = len(self._nested_groups) @property def size(self): return self._size @property def nested_groups(self): return self._nested_groups def _init_nested_groups(self, requested_size): # Construct the groups dict - # {0: ,.., n-1: } size = requested_size nested_groups = { self._get_nested_group_index_from_name(nsgroup): nsgroup['id'] for nsgroup in self.nsxlib_nsgroup.list() if self.nsxlib_nsgroup.is_internal_resource(nsgroup)} if nested_groups: size = max(requested_size, max(nested_groups) + 1) if size > requested_size: LOG.warning("Lowering the value of " "nsx_v3:number_of_nested_groups isn't " "supported, '%s' nested-groups will be used.", size) absent_groups = set(range(size)) - set(nested_groups.keys()) if absent_groups: LOG.warning( "Found %(num_present)s Nested Groups, " "creating %(num_absent)s more.", {'num_present': len(nested_groups), 'num_absent': len(absent_groups)}) for i in absent_groups: cont = self._create_nested_group(i) nested_groups[i] = cont['id'] return nested_groups def _get_nested_group_index_from_name(self, nested_group): # The name format is "Nested Group " return int(nested_group['display_name'].split()[-1]) - 1 def _create_nested_group(self, index): name_prefix = NSGroupManager.NESTED_GROUP_NAME name = '%s %s' % (name_prefix, index + 1) description = NSGroupManager.NESTED_GROUP_DESCRIPTION tags = self.nsxlib_nsgroup.build_v3_api_version_tag() return self.nsxlib_nsgroup.create(name, description, tags) def _hash_uuid(self, internal_id): return hash(uuid.UUID(internal_id)) def _suggest_nested_group(self, internal_id): # Suggests a nested group to use, can be iterated to find alternative # group in case that previous suggestions did not help. index = self._hash_uuid(internal_id) % self.size yield self.nested_groups[index] for i in range(1, self.size): index = (index + 1) % self.size yield self.nested_groups[index] def add_nsgroup(self, nsgroup_id): for group in self._suggest_nested_group(nsgroup_id): try: LOG.debug("Adding NSGroup %s to nested group %s", nsgroup_id, group) self.nsxlib_nsgroup.add_members( group, consts.NSGROUP, [nsgroup_id]) break except exceptions.NSGroupIsFull: LOG.debug("Nested group %(group_id)s is full, trying the " "next group..", {'group_id': group}) else: raise exceptions.ManagerError( details=_("Reached the maximum supported amount of " "security groups.")) def remove_nsgroup(self, nsgroup_id): for group in self._suggest_nested_group(nsgroup_id): try: self.nsxlib_nsgroup.remove_member( group, consts.NSGROUP, nsgroup_id, verify=True) break except exceptions.NSGroupMemberNotFound: LOG.warning("NSGroup %(nsgroup)s was expected to be found " "in group %(group_id)s, but wasn't. " "Looking in the next group..", {'nsgroup': nsgroup_id, 'group_id': group}) continue else: LOG.warning("NSGroup %s was marked for removal, but its " "reference is missing.", nsgroup_id) vmware-nsxlib-15.0.6/vmware_nsxlib/v3/trust_management.py0000664000175000017500000001152513623151571023572 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils BASE_SECTION = 'trust-management' CERT_SECTION = BASE_SECTION + '/certificates' ID_SECTION = BASE_SECTION + '/principal-identities' USER_GROUP_TYPES = [ 'read_only_api_users', 'read_write_api_users', 'superusers'] class NsxLibTrustManagement(utils.NsxLibApiBase): def create_cert_list(self, cert_pem, private_key=None, passphrase=None, tags=None): resource = CERT_SECTION + '?action=import' body = {'pem_encoded': cert_pem} if private_key: body.update( {'private_key': private_key}) if passphrase: body.update({'passphrase': passphrase}) if tags: body.update({'tags': tags}) return self.client.create(resource, body)['results'] def create_cert(self, cert_pem, private_key=None, passphrase=None, tags=None): results = self.create_cert_list(cert_pem, private_key, passphrase, tags) # note: the assumption of only one result is wrong. It returns the # chained certs if len(results) > 0: # should be only one result return results[0]['id'] def get_cert(self, cert_id): resource = CERT_SECTION + '/' + cert_id return self.client.get(resource) def get_certs(self): return self.client.get(CERT_SECTION)['results'] def delete_cert(self, cert_id): resource = CERT_SECTION + '/' + cert_id self.client.delete(resource) def find_cert_with_pem(self, cert_pem): # Find certificate with cert_pem certs = self.get_certs() cert_ids = [cert['id'] for cert in certs if cert['pem_encoded'] == cert_pem] return cert_ids def create_identity(self, name, cert_id, node_id, permission_group): # Validate permission group before sending to server if permission_group not in USER_GROUP_TYPES: raise nsxlib_exc.InvalidInput( operation='create_identity', arg_val=permission_group, arg_name='permission_group') body = {'name': name, 'certificate_id': cert_id, 'node_id': node_id, 'permission_group': permission_group, 'is_protected': True} self.client.create(ID_SECTION, body) def get_identities(self, name): ids = self.client.get(ID_SECTION)['results'] return [identity for identity in ids if identity['name'] == name] def delete_identity(self, identity_id): resource = ID_SECTION + '/' + identity_id self.client.delete(resource) def find_cert_and_identity(self, name, cert_pem): certs = self.get_certs() if not isinstance(cert_pem, six.text_type): cert_pem = cert_pem.decode('ascii') cert_ids = [cert['id'] for cert in certs if cert['pem_encoded'] == cert_pem] if not cert_ids: raise nsxlib_exc.ResourceNotFound( manager=self.client.nsx_api_managers, operation="find_certificate") identities = self.get_identities(name) # should be zero or one matching identities results = [identity for identity in identities if identity['certificate_id'] in cert_ids] if not results: raise nsxlib_exc.ResourceNotFound( manager=self.client.nsx_api_managers, operation="delete_identity") return results[0]['certificate_id'], results[0]['id'] def delete_cert_and_identity(self, name, cert_pem): cert_id, identity_id = self.find_cert_and_identity(name, cert_pem) self.delete_identity(identity_id) self.delete_cert(cert_id) def create_cert_and_identity(self, name, cert_pem, node_id, permission_group='read_write_api_users'): nsx_cert_id = self.create_cert(cert_pem) try: self.create_identity(name, nsx_cert_id, node_id, permission_group) except nsxlib_exc.ManagerError as e: self.delete_cert(nsx_cert_id) raise e vmware-nsxlib-15.0.6/vmware_nsxlib/v3/utils.py0000664000175000017500000005654613623151571021371 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import inspect import re import time from oslo_log import log import tenacity from tenacity import _utils as tenacity_utils from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import constants from vmware_nsxlib.v3 import exceptions as nsxlib_exceptions from vmware_nsxlib.v3 import nsx_constants LOG = log.getLogger(__name__) TagLimits = collections.namedtuple('TagLimits', ['scope_length', 'tag_length', 'max_tags']) # These defaults reflect latest tag & scope limits on the backend. As of 2.5, # backend no longer returns tag limit via API. MAX_RESOURCE_TYPE_LEN = 128 MAX_TAG_LEN = 256 MAX_TAGS = 15 MAX_NSGROUPS_CRITERIA_TAGS = 10 DEFAULT_MAX_ATTEMPTS = 10 DEFAULT_CACHE_AGE_SEC = 600 INJECT_HEADERS_CALLBACK = None IS_ATTR_SET_CALLBACK = None def set_is_attr_callback(callback): global IS_ATTR_SET_CALLBACK IS_ATTR_SET_CALLBACK = callback def is_attr_set(attr): if IS_ATTR_SET_CALLBACK: return IS_ATTR_SET_CALLBACK(attr) return attr is not None def set_inject_headers_callback(callback): global INJECT_HEADERS_CALLBACK INJECT_HEADERS_CALLBACK = callback def censor_headers(headers): censored_headers = ['authorization'] result = {} for name, value in headers.items(): if name.lower() in censored_headers: result[name] = '--- CENSORED ---' else: result[name] = value return result def _update_resource_length(length): global MAX_RESOURCE_TYPE_LEN MAX_RESOURCE_TYPE_LEN = length def _update_tag_length(length): global MAX_TAG_LEN MAX_TAG_LEN = length def _update_max_tags(max_tags): global MAX_TAGS MAX_TAGS = max_tags def _update_max_nsgroups_criteria_tags(max_tags): global MAX_NSGROUPS_CRITERIA_TAGS MAX_NSGROUPS_CRITERIA_TAGS = max(10, max_tags - 5) def update_tag_limits(limits): _update_resource_length(limits.scope_length) _update_tag_length(limits.tag_length) _update_max_tags(limits.max_tags) _update_max_nsgroups_criteria_tags(limits.max_tags) def _validate_resource_type_length(resource_type): # Add in a validation to ensure that we catch this at build time if len(resource_type) > MAX_RESOURCE_TYPE_LEN: raise nsxlib_exceptions.NsxLibInvalidInput( error_message=(_('Resource type cannot exceed %(max_len)s ' 'characters: %(resource_type)s') % {'max_len': MAX_RESOURCE_TYPE_LEN, 'resource_type': resource_type})) def add_v3_tag(tags, resource_type, tag): _validate_resource_type_length(resource_type) tags.append({'scope': resource_type, 'tag': tag[:MAX_TAG_LEN]}) return tags def update_v3_tags(current_tags, tags_update): current_scopes = set([tag['scope'] for tag in current_tags]) updated_scopes = set([tag['scope'] for tag in tags_update]) # All tags scopes which are either completely new or already defined on the # resource are left in place, unless the tag value is empty, in that case # it is ignored. tags = [{'scope': tag['scope'], 'tag': tag['tag']} for tag in (current_tags + tags_update) if tag['tag'] and tag['scope'] in (current_scopes ^ updated_scopes)] modified_scopes = current_scopes & updated_scopes for tag in tags_update: if tag['scope'] in modified_scopes: # If the tag value is empty or None, then remove the tag completely if tag['tag']: tag['tag'] = tag['tag'][:MAX_TAG_LEN] tags.append(tag) return tags def _log_before_retry(retry_state): """Before call strategy that logs to some logger the attempt.""" if retry_state.attempt_number > 1: LOG.warning("Retrying call to '%(func)s' for the %(num)s time", {'func': tenacity_utils.get_callback_name( retry_state.fn), 'num': tenacity_utils.to_ordinal( retry_state.attempt_number)}) def _get_args_from_frame(frames, frame_num): if len(frames) > frame_num and frames[frame_num] and frames[frame_num][0]: argvalues = inspect.getargvalues(frames[frame_num][0]) formated_args = inspect.formatargvalues(*argvalues) # remove the first 'self' arg from the log as it adds no information formated_args = re.sub(r'\(self=.*?, ', "(", formated_args) return formated_args def _log_after_retry(retry_state): """After call strategy that logs to some logger the finished attempt.""" # Using inspect to get arguments of the relevant call frames = inspect.trace() # Look at frame #2 first because of the internal functions _do_X formated_args = _get_args_from_frame(frames, 2) if not formated_args: formated_args = _get_args_from_frame(frames, 1) if not formated_args: formated_args = "Unknown" LOG.warning("Finished retry of %(func)s for the %(num)s time after " "%(time)0.3f(s) with args: %(args)s", {'func': tenacity_utils.get_callback_name(retry_state.fn), 'num': tenacity_utils.to_ordinal(retry_state.attempt_number), 'time': retry_state.seconds_since_start, 'args': formated_args}) def retry_upon_exception(exc, delay=0.5, max_delay=2, max_attempts=DEFAULT_MAX_ATTEMPTS): return tenacity.retry(reraise=True, retry=tenacity.retry_if_exception_type(exc), wait=tenacity.wait_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry) def retry_random_upon_exception(exc, delay=0.5, max_delay=5, max_attempts=DEFAULT_MAX_ATTEMPTS): return tenacity.retry(reraise=True, retry=tenacity.retry_if_exception_type(exc), wait=tenacity.wait_random_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry) def retry_upon_none_result(max_attempts, delay=0.5, max_delay=10, random=False): if random: wait_func = tenacity.wait_random_exponential( multiplier=delay, max=max_delay) else: wait_func = tenacity.wait_exponential( multiplier=delay, max=max_delay) return tenacity.retry(reraise=True, retry=tenacity.retry_if_result(lambda x: x is None), wait=wait_func, stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry) def list_match(list1, list2): # Check if list1 and list2 have identical elements, but relaxed on # dict elements where list1's dict element can be a subset of list2's # corresponding element. if (not isinstance(list1, list) or not isinstance(list2, list) or len(list1) != len(list2)): return False list1 = sorted(list1) list2 = sorted(list2) for (v1, v2) in zip(list1, list2): if isinstance(v1, dict): if not dict_match(v1, v2): return False elif isinstance(v1, list): if not list_match(v1, v2): return False elif v1 != v2: return False return True def dict_match(dict1, dict2): # Check if dict1 is a subset of dict2. if not isinstance(dict1, dict) or not isinstance(dict2, dict): return False for k1, v1 in dict1.items(): if k1 not in dict2: return False v2 = dict2[k1] if isinstance(v1, dict): if not dict_match(v1, v2): return False elif isinstance(v1, list): if not list_match(v1, v2): return False elif v1 != v2: return False return True def get_name_and_uuid(name, uuid, tag=None, maxlen=80): short_uuid = '_' + uuid[:5] + '...' + uuid[-5:] maxlen = maxlen - len(short_uuid) if tag: maxlen = maxlen - len(tag) - 1 return name[:maxlen] + '_' + tag + short_uuid else: return name[:maxlen] + short_uuid def build_extra_args(body, extra_args, **kwargs): for arg in extra_args: if arg in kwargs: body[arg] = kwargs[arg] return body def escape_tag_data(data): # ElasticSearch query_string requires slashes and dashes to # be escaped. We assume no other reserved characters will be # used in tag scopes or values return data.replace('/', '\\/').replace('-', '\\-').replace(':', '\\:') def escape_display_name(display_name): # Illegal characters for the display names are ;|=,~@ rx = re.compile('([;|=,~@])') return rx.sub('.', display_name) class NsxLibCache(object): def __init__(self, timeout): self.timeout = timeout self._cache = {} super(NsxLibCache, self).__init__() def expired(self, entry): return (time.time() - entry['time']) > self.timeout def get(self, key): if key in self._cache: # check that the value is still valid if self.expired(self._cache[key]): # this entry has expired self.remove(key) else: return self._cache[key]['value'] def update(self, key, value): self._cache[key] = {'time': time.time(), 'value': value} def remove(self, key): if key in self._cache: del self._cache[key] class NsxLibApiBase(object): """Base class for nsxlib api """ def __init__(self, client, nsxlib_config=None, nsxlib=None): self.client = client self.nsxlib_config = nsxlib_config self.nsxlib = nsxlib super(NsxLibApiBase, self).__init__() self.cache = NsxLibCache(self.cache_timeout) @abc.abstractproperty def uri_segment(self): pass @abc.abstractproperty def resource_type(self): pass @property def use_cache_for_get(self): """By default no caching is used""" return False @property def cache_timeout(self): """the default cache aging time in seconds""" return DEFAULT_CACHE_AGE_SEC def get_path(self, resource=None): if resource: return '%s/%s' % (self.uri_segment, resource) return self.uri_segment def list(self): return self.client.list(self.uri_segment) def get(self, uuid, silent=False): if self.use_cache_for_get: # try to get it from the cache result = self.cache.get(uuid) if result: if not silent: LOG.debug("Getting %s from cache.", self.get_path(uuid)) return result # call the client result = self.client.get(self.get_path(uuid), silent=silent) if result and self.use_cache_for_get: # add the result to the cache self.cache.update(uuid, result) return result def read(self, uuid, silent=False): """The same as get""" return self.get(uuid, silent=silent) def delete(self, uuid): if self.use_cache_for_get: self.cache.remove(uuid) return self.client.delete(self.get_path(uuid)) def find_by_display_name(self, display_name): found = [] for resource in self.list()['results']: if resource['display_name'] == display_name: found.append(resource) return found def _update_with_retry(self, uuid, payload): if self.use_cache_for_get: self.cache.remove(uuid) return self._update_resource(self.get_path(uuid), payload, retry=True) def _internal_update_resource(self, resource, payload, headers=None, create_action=False, get_params=None, action_params=None, update_payload_cbk=None): get_path = action_path = resource if get_params: get_path = get_path + get_params if action_params: action_path = action_path + action_params revised_payload = self.client.get(get_path) # custom resource callback for updating the payload if update_payload_cbk: update_payload_cbk(revised_payload, payload) # special treatment for tags (merge old and new) if 'tags_update' in payload.keys(): revised_payload['tags'] = update_v3_tags( revised_payload.get('tags', []), payload['tags_update']) del payload['tags_update'] # update all the rest of the parameters for key_name in payload.keys(): # handle 2 levels of dictionary: if isinstance(payload[key_name], dict): if key_name not in revised_payload: revised_payload[key_name] = payload[key_name] else: # copy each key revised_payload[key_name].update(payload[key_name]) else: revised_payload[key_name] = payload[key_name] if create_action: return self.client.create(action_path, revised_payload, headers=headers) else: return self.client.update(action_path, revised_payload, headers=headers) def _update_resource(self, resource, payload, headers=None, create_action=False, get_params=None, action_params=None, update_payload_cbk=None, retry=False): if retry: # If revision_id of the payload that we send is older than what # NSX has, we will get a 412: Precondition Failed. # In that case we need to re-fetch, patch the response and send # it again with the new revision_id @retry_upon_exception( nsxlib_exceptions.StaleRevision, max_attempts=self.client.max_attempts) def do_update(): return self._internal_update_resource( resource, payload, headers=headers, create_action=create_action, get_params=get_params, action_params=action_params, update_payload_cbk=update_payload_cbk) return do_update() else: return self._internal_update_resource( resource, payload, headers=headers, create_action=create_action, get_params=get_params, action_params=action_params, update_payload_cbk=update_payload_cbk) def _delete_with_retry(self, resource): # Using internal method so we can access max_attempts in the decorator @retry_upon_exception( nsxlib_exceptions.StaleRevision, max_attempts=self.client.max_attempts) def _do_delete(): self.client.delete(self.get_path(resource)) _do_delete() def _create_with_retry(self, resource, body=None, headers=None): # Using internal method so we can access max_attempts in the decorator @retry_upon_exception( nsxlib_exceptions.StaleRevision, max_attempts=self.client.max_attempts) def _do_create(): return self.client.create(resource, body, headers=headers) return _do_create() def _get_resource_by_name_or_id(self, name_or_id, resource): all_results = self.client.list(resource)['results'] matched_results = [] for rs in all_results: if rs.get('id') == name_or_id: # Matched by id - must be unique return name_or_id if rs.get('display_name') == name_or_id: # Matched by name - add to the list to verify it is unique matched_results.append(rs) if len(matched_results) == 0: err_msg = (_("Could not find %(resource)s %(name)s") % {'name': name_or_id, 'resource': resource}) raise nsxlib_exceptions.ManagerError(details=err_msg) elif len(matched_results) > 1: err_msg = (_("Found multiple %(resource)s named %(name)s") % {'name': name_or_id, 'resource': resource}) raise nsxlib_exceptions.ManagerError(details=err_msg) return matched_results[0].get('id') def get_id_by_name_or_id(self, name_or_id): """Get a resource by it's display name or uuid Return the resource data, or raise an exception if not found or not unique """ return self._get_resource_by_name_or_id(name_or_id, self.get_path()) def build_v3_api_version_tag(self): """Some resources are created on the manager that do not have a corresponding plugin resource. """ return [{'scope': self.nsxlib_config.plugin_scope, 'tag': self.nsxlib_config.plugin_tag}, {'scope': "os-api-version", 'tag': self.nsxlib_config.plugin_ver}] def build_v3_api_version_project_tag(self, project_name, project_id=None): if not project_name: project_name = self.nsxlib_config.plugin_tag tags = [{'scope': self.nsxlib_config.plugin_scope, 'tag': self.nsxlib_config.plugin_tag}, {'scope': "os-api-version", 'tag': self.nsxlib_config.plugin_ver}, {'scope': 'os-project-name', 'tag': project_name[:MAX_TAG_LEN]}] if project_id: tags.append({'scope': 'os-project-id', 'tag': project_id[:MAX_TAG_LEN]}) return tags def is_internal_resource(self, nsx_resource): """Indicates whether the passed nsx-resource is internal owned by the plugin for internal use. """ for tag in nsx_resource.get('tags', []): if tag['scope'] == self.nsxlib_config.plugin_scope: return tag['tag'] == self.nsxlib_config.plugin_tag return False def build_v3_tags_payload(self, resource, resource_type, project_name): """Construct the tags payload that will be pushed to NSX-v3 Add :, os-project-id:, os-project-name: os-api-version: """ _validate_resource_type_length(resource_type) # There may be cases when the plugin creates the port, for example DHCP if not project_name: project_name = self.nsxlib_config.plugin_tag project_id = (resource.get('project_id', '') or resource.get('tenant_id', '')) # If project_id is present in resource and set to None, explicitly set # the project_id in tags as ''. if project_id is None: project_id = '' return [{'scope': resource_type, 'tag': resource.get('id', '')[:MAX_TAG_LEN]}, {'scope': 'os-project-id', 'tag': project_id[:MAX_TAG_LEN]}, {'scope': 'os-project-name', 'tag': project_name[:MAX_TAG_LEN]}, {'scope': 'os-api-version', 'tag': self.nsxlib_config.plugin_ver}] # Some utilities for services translations & validations # both for the nsx manager & policy manager def validate_icmp_params(icmp_type, icmp_code, icmp_version=4, strict=False): if icmp_version != 4: # ICMPv6 is currently not supported return if icmp_type: if (strict and icmp_type not in constants.IPV4_ICMP_STRICT_TYPES): raise nsxlib_exceptions.InvalidInput( operation='create_rule', arg_val=icmp_type, arg_name='icmp_type') if icmp_type not in constants.IPV4_ICMP_TYPES: raise nsxlib_exceptions.InvalidInput( operation='create_rule', arg_val=icmp_type, arg_name='icmp_type') if (icmp_code and strict and icmp_code not in constants.IPV4_ICMP_STRICT_TYPES[icmp_type]): raise nsxlib_exceptions.InvalidInput( operation='create_rule', arg_val=icmp_code, arg_name='icmp_code for this icmp_type') if (icmp_code and icmp_code not in constants.IPV4_ICMP_TYPES[icmp_type]): raise nsxlib_exceptions.InvalidInput( operation='create_rule', arg_val=icmp_code, arg_name='icmp_code for this icmp_type') def get_l4_protocol_name(protocol_number): if protocol_number is None: return protocol_number = constants.IP_PROTOCOL_MAP.get(protocol_number, protocol_number) try: protocol_number = int(protocol_number) except ValueError: raise nsxlib_exceptions.InvalidInput( operation='create_rule', arg_val=protocol_number, arg_name='protocol') if protocol_number == 6: return nsx_constants.TCP elif protocol_number == 17: return nsx_constants.UDP elif protocol_number == 1: return nsx_constants.ICMPV4 else: return protocol_number def get_dhcp_opt_code(name): _supported_options = { 'subnet-mask': 1, 'time-offset': 2, 'router': 3, 'dns-name': 6, 'host-name': 12, 'boot-file-size': 13, 'domain-name': 15, 'ip-forwarding': 19, 'interface-mtu': 26, 'broadcast-address': 28, 'arp-cache-timeout': 35, 'nis-domain': 40, 'nis-servers': 41, 'ntp-servers': 42, 'netbios-name-servers': 44, 'netbios-dd-server': 45, 'netbios-node-type': 46, 'netbios-scope': 47, 'dhcp-renewal-time': 58, 'dhcp-rebinding-time': 59, 'class-id': 60, 'dhcp-client-identifier': 61, 'nisplus-domain': 64, 'nisplus-servers': 65, 'tftp-server': 66, 'tftp-server-name': 66, 'bootfile-name': 67, 'system-architecture': 93, 'interface-id': 94, 'machine-id': 97, 'name-search': 117, 'subnet-selection': 118, 'domain-search': 119, 'classless-static-route': 121, 'tftp-server-address': 150, 'server-ip-address': 150, 'etherboot': 175, 'config-file': 209, 'path-prefix': 210, 'reboot-time': 211, } return _supported_options.get(name) vmware-nsxlib-15.0.6/vmware_nsxlib/v3/policy/0000775000175000017500000000000013623151652021136 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib/v3/policy/ipsec_vpn_defs.py0000664000175000017500000002262213623151571024503 0ustar zuulzuul00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from vmware_nsxlib.v3.policy import constants from vmware_nsxlib.v3.policy import core_defs TENANTS_PATH_PATTERN = "%s/" IPSEC_VPN_IKE_PROFILES_PATH_PATTERN = (TENANTS_PATH_PATTERN + "ipsec-vpn-ike-profiles/") IPSEC_VPN_TUNNEL_PROFILES_PATH_PATTERN = (TENANTS_PATH_PATTERN + "ipsec-vpn-tunnel-profiles/") IPSEC_VPN_DPD_PROFILES_PATH_PATTERN = (TENANTS_PATH_PATTERN + "ipsec-vpn-dpd-profiles/") IPSEC_VPN_SERVICE_PATH_PATTERN = ( core_defs.TIER1_LOCALE_SERVICES_PATH_PATTERN + "%s/ipsec-vpn-services/") IPSEC_VPN_DPD_PROFILES_PATH_PATTERN = (TENANTS_PATH_PATTERN + "ipsec-vpn-dpd-profiles/") class IpsecVpnIkeProfileDef(core_defs.ResourceDef): @property def path_pattern(self): return IPSEC_VPN_IKE_PROFILES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'profile_id') @staticmethod def resource_type(): return "IPSecVpnIkeProfile" def get_obj_dict(self): body = super(IpsecVpnIkeProfileDef, self).get_obj_dict() self._set_attrs_if_specified(body, ["ike_version", "encryption_algorithms", "digest_algorithms", "dh_groups", "sa_life_time"]) return body class IpsecVpnTunnelProfileDef(core_defs.ResourceDef): @property def path_pattern(self): return IPSEC_VPN_TUNNEL_PROFILES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'profile_id') @staticmethod def resource_type(): return "IPSecVpnTunnelProfile" def get_obj_dict(self): body = super(IpsecVpnTunnelProfileDef, self).get_obj_dict() self._set_attrs_if_specified(body, ["enable_perfect_forward_secrecy", "encryption_algorithms", "digest_algorithms", "dh_groups", "sa_life_time"]) return body class IpsecVpnDpdProfileDef(core_defs.ResourceDef): @property def path_pattern(self): return IPSEC_VPN_DPD_PROFILES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'profile_id') @staticmethod def resource_type(): return "IPSecVpnDpdProfile" def get_obj_dict(self): body = super(IpsecVpnDpdProfileDef, self).get_obj_dict() self._set_attrs_if_specified(body, ["dpd_probe_interval", "enabled"]) return body class Tier1IPSecVpnServiceDef(core_defs.ResourceDef): @staticmethod def resource_type(): return 'IPSecVpnService' @property def path_pattern(self): return IPSEC_VPN_SERVICE_PATH_PATTERN def get_obj_dict(self): body = super(Tier1IPSecVpnServiceDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['enabled', 'ike_log_level']) return body @property def path_ids(self): return ('tenant', 'tier1_id', 'service_id', 'vpn_service_id') class IpsecVpnLocalEndpointDef(core_defs.ResourceDef): @property def path_pattern(self): return IPSEC_VPN_SERVICE_PATH_PATTERN + "%s/local-endpoints/" @property def path_ids(self): return ('tenant', 'tier1_id', 'service_id', 'vpn_service_id', 'endpoint_id') @staticmethod def resource_type(): return "IPSecVpnLocalEndpoint" def get_obj_dict(self): body = super(IpsecVpnLocalEndpointDef, self).get_obj_dict() self._set_attrs_if_specified(body, ["local_address", "local_id", "certificate_path", "trust_ca_ids", "trust_crl_ids"]) return body class IPSecVpnRule(object): def __init__(self, name, rule_id, action=constants.IPSEC_VPN_RULE_PROTECT, description=None, enabled=True, logged=False, destination_cidrs=None, source_cidrs=None, sequence_number=0, tags=None): self.name = name self.description = description self.action = action self.enabled = enabled self.id = rule_id self.logged = logged self.destination_cidrs = destination_cidrs self.source_cidrs = source_cidrs self.sequence_number = sequence_number self.tags = tags def get_obj_dict(self): obj = {'display_name': self.name, 'id': self.id, 'action': self.action, 'enabled': self.enabled, 'logged': self.logged, 'resource_type': 'IPSecVpnRule'} if self.description: obj['description'] = self.description if self.destination_cidrs: obj['destinations'] = [ {'subnet': cidr} for cidr in self.destination_cidrs] if self.source_cidrs: obj['sources'] = [ {'subnet': cidr} for cidr in self.source_cidrs] if self.sequence_number: obj['sequence_number'] = self.sequence_number if self.tags: obj['tags'] = self.tags return obj class Tier1IPSecVpnSessionDef(core_defs.ResourceDef): @staticmethod def resource_type(): return 'PolicyBasedIPSecVpnSession' @property def path_pattern(self): return IPSEC_VPN_SERVICE_PATH_PATTERN + "%s/sessions/" def get_obj_dict(self): body = super(Tier1IPSecVpnSessionDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['enabled', 'peer_address', 'peer_id', 'psk']) if self.has_attr('rules'): body['rules'] = [ a.get_obj_dict() if isinstance(a, IPSecVpnRule) else a for a in self.get_attr('rules')] if self.has_attr('dpd_profile_id'): path = "" if self.get_attr('dpd_profile_id'): profile = IpsecVpnDpdProfileDef( profile_id=self.get_attr('dpd_profile_id'), tenant=self.get_tenant()) path = profile.get_resource_full_path() self._set_attr_if_specified(body, 'dpd_profile_id', body_attr='dpd_profile_path', value=path) if self.has_attr('ike_profile_id'): path = "" if self.get_attr('ike_profile_id'): profile = IpsecVpnIkeProfileDef( profile_id=self.get_attr('ike_profile_id'), tenant=self.get_tenant()) path = profile.get_resource_full_path() self._set_attr_if_specified(body, 'ike_profile_id', body_attr='ike_profile_path', value=path) if self.has_attr('tunnel_profile_id'): path = "" if self.get_attr('tunnel_profile_id'): profile = IpsecVpnTunnelProfileDef( profile_id=self.get_attr('tunnel_profile_id'), tenant=self.get_tenant()) path = profile.get_resource_full_path() self._set_attr_if_specified(body, 'tunnel_profile_id', body_attr='tunnel_profile_path', value=path) if self.has_attr('local_endpoint_id'): path = "" if self.get_attr('local_endpoint_id'): endpoint = IpsecVpnLocalEndpointDef( tier1_id=self.get_attr('tier1_id'), service_id=self.get_attr('service_id'), vpn_service_id=self.get_attr('vpn_service_id'), endpoint_id=self.get_attr('local_endpoint_id'), tenant=self.get_tenant()) path = endpoint.get_resource_full_path() self._set_attr_if_specified(body, 'local_endpoint_id', body_attr='local_endpoint_path', value=path) return body @property def path_ids(self): return ('tenant', 'tier1_id', 'service_id', 'vpn_service_id', 'session_id') class Tier1IPSecVpnSessionStatusDef(core_defs.ResourceDef): @property def path_pattern(self): return (IPSEC_VPN_SERVICE_PATH_PATTERN + "%s/sessions/%s/detailed-status/") @property def path_ids(self): # TODO(asarfaty): Why do we need the last '' entry? Need to find a # better solution for this. return ('tenant', 'tier1_id', 'service_id', 'vpn_service_id', 'session_id', '') vmware-nsxlib-15.0.6/vmware_nsxlib/v3/policy/lb_resources.py0000664000175000017500000014746013623151571024213 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from oslo_log import log as logging import six from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3.policy import constants from vmware_nsxlib.v3.policy.core_resources import IGNORE from vmware_nsxlib.v3.policy.core_resources import NsxPolicyResourceBase from vmware_nsxlib.v3.policy import lb_defs from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) # Sentitel object to indicate unspecified attribute value # None value in attribute would indicate "unset" functionality, # while "ignore" means that the value not be present in request # body class NsxPolicyLBAppProfileBase(NsxPolicyResourceBase): """NSX Policy LB app profile""" def create_or_overwrite(self, name, lb_app_profile_id=None, description=IGNORE, http_redirect_to_https=IGNORE, http_redirect_to=IGNORE, idle_timeout=IGNORE, ntlm=IGNORE, request_body_size=IGNORE, request_header_size=IGNORE, response_header_size=IGNORE, response_timeout=IGNORE, x_forwarded_for=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): lb_app_profile_id = self._init_obj_uuid(lb_app_profile_id) lb_app_profile_def = self._init_def( lb_app_profile_id=lb_app_profile_id, name=name, description=description, http_redirect_to_https=http_redirect_to_https, http_redirect_to=http_redirect_to, idle_timeout=idle_timeout, ntlm=ntlm, request_body_size=request_body_size, request_header_size=request_header_size, response_header_size=response_header_size, response_timeout=response_timeout, x_forwarded_for=x_forwarded_for, tags=tags, tenant=tenant) self._create_or_store(lb_app_profile_def) return lb_app_profile_id def delete(self, lb_app_profile_id, tenant=constants.POLICY_INFRA_TENANT): lb_app_profile_def = self.entry_def( lb_app_profile_id=lb_app_profile_id, tenant=tenant) self.policy_api.delete(lb_app_profile_def) def get(self, lb_app_profile_id, tenant=constants.POLICY_INFRA_TENANT): lb_app_profile_def = self.entry_def( lb_app_profile_id=lb_app_profile_id, tenant=tenant) return self.policy_api.get(lb_app_profile_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): lb_app_profile_def = self.entry_def(tenant=tenant) return self._list(lb_app_profile_def) def update(self, lb_app_profile_id, name=IGNORE, description=IGNORE, http_redirect_to_https=IGNORE, http_redirect_to=IGNORE, idle_timeout=IGNORE, ntlm=IGNORE, request_body_size=IGNORE, request_header_size=IGNORE, response_header_size=IGNORE, response_timeout=IGNORE, x_forwarded_for=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( lb_app_profile_id=lb_app_profile_id, name=name, description=description, http_redirect_to_https=http_redirect_to_https, http_redirect_to=http_redirect_to, idle_timeout=idle_timeout, ntlm=ntlm, request_body_size=request_body_size, request_header_size=request_header_size, response_header_size=response_header_size, response_timeout=response_timeout, x_forwarded_for=x_forwarded_for, tags=tags, tenant=tenant) def get_path(self, lb_app_profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def( lb_app_profile_id=lb_app_profile_id, tenant=tenant) return profile_def.get_resource_full_path() class NsxPolicyLBAppProfileHttpApi(NsxPolicyLBAppProfileBase): """NSX Policy LB app profile""" @property def entry_def(self): return lb_defs.LBHttpProfileDef class NsxPolicyLBAppProfileFastTcpApi( NsxPolicyLBAppProfileBase): """NSX Policy LB app profile""" @property def entry_def(self): return lb_defs.LBFastTcpProfile class NsxPolicyLBAppProfileFastUdpApi( NsxPolicyLBAppProfileBase): """NSX Policy LB app profile""" @property def entry_def(self): return lb_defs.LBFastUdpProfile class NsxPolicyLoadBalancerServerSSLProfileApi(NsxPolicyResourceBase): """NSX Policy LB server ssl profile""" @property def entry_def(self): return lb_defs.LBServerSslProfileDef def create_or_overwrite(self, name, server_ssl_profile_id=None, description=IGNORE, tags=IGNORE, cipher_group_label=IGNORE, ciphers=IGNORE, protocols=IGNORE, session_cache_enabled=IGNORE, tenant=constants.POLICY_INFRA_TENANT): server_ssl_profile_id = self._init_obj_uuid(server_ssl_profile_id) lb_server_ssl_profile_def = self._init_def( server_ssl_profile_id=server_ssl_profile_id, name=name, description=description, tags=tags, protocols=protocols, tenant=tenant) self._create_or_store(lb_server_ssl_profile_def) return server_ssl_profile_id def delete(self, server_ssl_profile_id, tenant=constants.POLICY_INFRA_TENANT): lb_server_ssl_profile_def = self.entry_def( server_ssl_profile_id=server_ssl_profile_id, tenant=tenant) self.policy_api.delete(lb_server_ssl_profile_def) def get(self, server_ssl_profile_id, tenant=constants.POLICY_INFRA_TENANT): lb_server_ssl_profile_def = self.entry_def( server_ssl_profile_id=server_ssl_profile_id, tenant=tenant) return self.policy_api.get(lb_server_ssl_profile_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): lb_server_ssl_profile_def = self.entry_def(tenant=tenant) return self._list(lb_server_ssl_profile_def) def update(self, server_ssl_profile_id, name=IGNORE, description=IGNORE, tags=IGNORE, cipher_group_label=IGNORE, ciphers=IGNORE, protocols=IGNORE, session_cache_enabled=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( server_ssl_profile_id=server_ssl_profile_id, name=name, description=description, tags=tags, protocols=protocols, tenant=tenant) class NsxPolicyLoadBalancerClientSSLProfileApi(NsxPolicyResourceBase): """NSX Policy LB client ssl profile""" @property def entry_def(self): return lb_defs.LBClientSslProfileDef def create_or_overwrite(self, name, client_ssl_profile_id=None, description=IGNORE, tags=IGNORE, protocols=IGNORE, tenant=constants.POLICY_INFRA_TENANT): client_ssl_profile_id = self._init_obj_uuid( client_ssl_profile_id) lb_client_ssl_profile_def = self._init_def( client_ssl_profile_id=client_ssl_profile_id, name=name, description=description, tags=tags, protocols=protocols, tenant=tenant) self._create_or_store(lb_client_ssl_profile_def) return client_ssl_profile_id def delete(self, client_ssl_profile_id, tenant=constants.POLICY_INFRA_TENANT): lb_client_ssl_profile_def = self.entry_def( client_ssl_profile_id=client_ssl_profile_id, tenant=tenant) self.policy_api.delete(lb_client_ssl_profile_def) def get(self, client_ssl_profile_id, tenant=constants.POLICY_INFRA_TENANT): lb_client_ssl_profile_def = self.entry_def( client_ssl_profile_id=client_ssl_profile_id, tenant=tenant) return self.policy_api.get(lb_client_ssl_profile_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): lb_client_ssl_profile_def = self.entry_def(tenant=tenant) return self._list(lb_client_ssl_profile_def) def update(self, client_ssl_profile_id, name=IGNORE, description=IGNORE, tags=IGNORE, protocols=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( client_ssl_profile_id=client_ssl_profile_id, name=name, description=description, tags=tags, protocols=protocols, tenant=tenant) def get_path(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def( client_ssl_profile_id=profile_id, tenant=tenant) return profile_def.get_resource_full_path() class NsxPolicyLoadBalancerPersistenceProfileApi( NsxPolicyResourceBase): """LB generic api for all types of session persistence profiles""" @property def entry_def(self): return lb_defs.LBPersistenceProfileBase def create_or_overwrite(self, name, persistence_profile_id=None, description=IGNORE, tags=IGNORE, ha_persistence_mirroring_enabled=IGNORE, persistence_shared=IGNORE, purge=IGNORE, timeout=IGNORE, tenant=constants.POLICY_INFRA_TENANT): raise nsxlib_exc.NotImplemented( "Creating generic persistence profile") def delete(self, persistence_profile_id, tenant=constants.POLICY_INFRA_TENANT): persistence_profile_def = self.entry_def( persistence_profile_id=persistence_profile_id, tenant=tenant) self.policy_api.delete(persistence_profile_def) def get(self, persistence_profile_id, tenant=constants.POLICY_INFRA_TENANT): persistence_profile_def = self.entry_def( persistence_profile_id=persistence_profile_id, tenant=tenant) return self.policy_api.get(persistence_profile_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): persistence_profile_def = self.entry_def(tenant=tenant) return self._list(persistence_profile_def) def update(self, persistence_profile_id, name=IGNORE, description=IGNORE, tags=IGNORE, ha_persistence_mirroring_enabled=IGNORE, persistence_shared=IGNORE, purge=IGNORE, timeout=IGNORE, tenant=constants.POLICY_INFRA_TENANT): raise nsxlib_exc.NotImplemented( "Updating generic persistence profile") def get_path(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def( persistence_profile_id=profile_id, tenant=tenant) return profile_def.get_resource_full_path() def wait_until_realized(self, pers_id, entity_type='LbPersistenceProfileDto', tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): pers_def = self.entry_def( persistence_profile_id=pers_id, tenant=tenant) return self._wait_until_realized( pers_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) class NsxPolicyLoadBalancerCookiePersistenceProfileApi( NsxPolicyLoadBalancerPersistenceProfileApi): """NSX Policy LB cookie persistence profile""" @property def entry_def(self): return lb_defs.LBCookiePersistenceProfileDef def create_or_overwrite(self, name, persistence_profile_id=None, description=IGNORE, tags=IGNORE, cookie_garble=IGNORE, cookie_name=IGNORE, cookie_mode=IGNORE, cookie_path=IGNORE, cookie_time=IGNORE, persistence_shared=IGNORE, tenant=constants.POLICY_INFRA_TENANT): persistence_profile_id = self._init_obj_uuid( persistence_profile_id) lb_cookie_persistence_profile_def = self._init_def( persistence_profile_id=persistence_profile_id, name=name, description=description, tags=tags, cookie_name=cookie_name, cookie_garble=cookie_garble, cookie_mode=cookie_mode, cookie_path=cookie_path, cookie_time=cookie_time, persistence_shared=persistence_shared, tenant=tenant) self._create_or_store(lb_cookie_persistence_profile_def) return persistence_profile_id def list(self, tenant=constants.POLICY_INFRA_TENANT): lb_cookie_persistence_profile_def = self.entry_def(tenant=tenant) results = self._list(lb_cookie_persistence_profile_def) # filter the results by resource type return [res for res in results if res.get('resource_type') == self.entry_def.resource_type] def update(self, persistence_profile_id, name=IGNORE, description=IGNORE, tags=IGNORE, cookie_garble=IGNORE, cookie_name=IGNORE, cookie_mode=IGNORE, cookie_path=IGNORE, cookie_time=IGNORE, persistence_shared=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( persistence_profile_id=persistence_profile_id, name=name, description=description, tags=tags, cookie_garble=cookie_garble, cookie_mode=cookie_mode, cookie_name=cookie_name, cookie_path=cookie_path, cookie_time=cookie_time, persistence_shared=persistence_shared, tenant=tenant) class NsxPolicyLoadBalancerSourceIpPersistenceProfileApi( NsxPolicyLoadBalancerPersistenceProfileApi): """NSX Policy LB source ip persistence profile""" @property def entry_def(self): return lb_defs.LBSourceIpPersistenceProfileDef def create_or_overwrite(self, name, persistence_profile_id=None, description=IGNORE, tags=IGNORE, ha_persistence_mirroring_enabled=IGNORE, persistence_shared=IGNORE, purge=IGNORE, timeout=IGNORE, tenant=constants.POLICY_INFRA_TENANT): persistence_profile_id = self._init_obj_uuid( persistence_profile_id) lb_source_ip_persistence_profile_def = self._init_def( persistence_profile_id=persistence_profile_id, name=name, description=description, tags=tags, ha_persistence_mirroring_enabled=ha_persistence_mirroring_enabled, persistence_shared=persistence_shared, purge=purge, timeout=timeout, tenant=tenant) self._create_or_store(lb_source_ip_persistence_profile_def) return persistence_profile_id def list(self, tenant=constants.POLICY_INFRA_TENANT): lb_source_ip_persistence_profile_def = self.entry_def(tenant=tenant) results = self._list(lb_source_ip_persistence_profile_def) # filter the results by resource type return [res for res in results if res.get('resource_type') == self.entry_def.resource_type] def update(self, persistence_profile_id, name=IGNORE, description=IGNORE, tags=IGNORE, ha_persistence_mirroring_enabled=IGNORE, persistence_shared=IGNORE, purge=IGNORE, timeout=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( persistence_profile_id=persistence_profile_id, name=name, description=description, tags=tags, ha_persistence_mirroring_enabled=ha_persistence_mirroring_enabled, persistence_shared=persistence_shared, purge=purge, timeout=timeout, tenant=tenant) class NsxPolicyLoadBalancerPoolApi(NsxPolicyResourceBase): """NSX Policy LBService.""" @property def entry_def(self): return lb_defs.LBPoolDef def create_or_overwrite(self, name, lb_pool_id=None, description=IGNORE, tags=IGNORE, members=IGNORE, algorithm=IGNORE, active_monitor_paths=IGNORE, member_group=IGNORE, snat_translation=IGNORE, tenant=constants.POLICY_INFRA_TENANT): lb_pool_id = self._init_obj_uuid(lb_pool_id) lb_pool_def = self._init_def( lb_pool_id=lb_pool_id, name=name, description=description, tags=tags, members=members, active_monitor_paths=active_monitor_paths, algorithm=algorithm, member_group=member_group, snat_translation=snat_translation, tenant=tenant) self._create_or_store(lb_pool_def) return lb_pool_id def delete(self, lb_pool_id, tenant=constants.POLICY_INFRA_TENANT): lb_pool_def = self.entry_def( lb_pool_id=lb_pool_id, tenant=tenant) self.policy_api.delete(lb_pool_def) def get(self, lb_pool_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): lb_pool_def = self.entry_def( lb_pool_id=lb_pool_id, tenant=tenant) return self.policy_api.get(lb_pool_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): lb_pool_def = self.entry_def(tenant=tenant) return self.policy_api.list(lb_pool_def)['results'] def update(self, lb_pool_id, name=IGNORE, description=IGNORE, tags=IGNORE, members=IGNORE, algorithm=IGNORE, active_monitor_paths=IGNORE, member_group=IGNORE, snat_translation=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( lb_pool_id=lb_pool_id, name=name, description=description, tags=tags, members=members, active_monitor_paths=active_monitor_paths, algorithm=algorithm, member_group=member_group, snat_translation=snat_translation, tenant=tenant) def add_monitor_to_pool(self, lb_pool_id, active_monitor_paths, tenant=constants.POLICY_INFRA_TENANT): lb_pool_def = self.entry_def( lb_pool_id=lb_pool_id, tenant=tenant) lb_pool = self.policy_api.get(lb_pool_def) monitor_paths = lb_pool.get('active_monitor_paths', []) monitor_paths.extend(active_monitor_paths) self._update( lb_pool_id=lb_pool_id, active_monitor_paths=monitor_paths, tenant=tenant) def remove_monitor_from_pool(self, lb_pool_id, monitor_path, tenant=constants.POLICY_INFRA_TENANT): lb_pool_def = self.entry_def( lb_pool_id=lb_pool_id, tenant=tenant) lb_pool = self.policy_api.get(lb_pool_def) monitor_paths = lb_pool.get('active_monitor_paths', []) if monitor_path in monitor_paths: monitor_paths.remove(monitor_path) self._update(lb_pool_id=lb_pool_id, active_monitor_paths=monitor_paths, tenant=tenant) def create_pool_member_and_add_to_pool( self, lb_pool_id, ip_address, port=None, display_name=None, weight=None, admin_state=None, backup_member=None, tenant=constants.POLICY_INFRA_TENANT): lb_pool_member = lb_defs.LBPoolMemberDef( ip_address, port=port, name=display_name, weight=weight, admin_state=admin_state, backup_member=backup_member) lb_pool_def = lb_defs.LBPoolDef( lb_pool_id=lb_pool_id, tenant=tenant) lb_pool = self.policy_api.get(lb_pool_def) lb_pool_members = lb_pool.get('members', []) lb_pool_members.append(lb_pool_member) self._update(lb_pool_id=lb_pool_id, members=lb_pool_members, tenant=tenant) return lb_pool_member def update_pool_member( self, lb_pool_id, ip_address, port=None, display_name=None, weight=None, admin_state=None, backup_member=None, tenant=constants.POLICY_INFRA_TENANT): lb_pool_def = lb_defs.LBPoolDef( lb_pool_id=lb_pool_id, tenant=tenant) lb_pool = self.policy_api.get(lb_pool_def) lb_pool_members = lb_pool.get('members', []) member_to_update = [x for x in lb_pool_members if ( x.get('ip_address') == ip_address and x.get('port') == str(port))] if member_to_update: if display_name: member_to_update[0]['display_name'] = display_name if weight: member_to_update[0]['weight'] = weight if admin_state: member_to_update[0]['admin_state'] = admin_state if backup_member: member_to_update[0]['backup_member'] = backup_member self._update(lb_pool_id=lb_pool_id, members=lb_pool_members, tenant=tenant) else: ops = ('Updating member %(address)s:%(port)d failed, not found in ' 'pool %(pool)s', {'address': ip_address, 'port': port, 'pool': lb_pool_id}) raise nsxlib_exc.ResourceNotFound(manager=lb_pool_def, operation=ops) def remove_pool_member(self, lb_pool_id, ip_address, port=None, tenant=constants.POLICY_INFRA_TENANT): lb_pool_def = lb_defs.LBPoolDef( lb_pool_id=lb_pool_id, tenant=tenant) lb_pool = self.policy_api.get(lb_pool_def) lb_pool_members = lb_pool.get('members', []) lb_pool_members = [x for x in lb_pool_members if ( x.get('ip_address') != ip_address or x.get('port') != str(port))] self._update(lb_pool_id=lb_pool_id, members=lb_pool_members, tenant=tenant) def get_path(self, lb_pool_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def( lb_pool_id=lb_pool_id, tenant=tenant) return profile_def.get_resource_full_path() def wait_until_realized(self, lb_pool_id, entity_type='LbPoolDto', tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): lb_pool_def = self.entry_def( lb_pool_id=lb_pool_id, tenant=tenant) return self._wait_until_realized( lb_pool_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) class NsxPolicyLoadBalancerServiceApi(NsxPolicyResourceBase): """NSX Policy LBService.""" @property def entry_def(self): return lb_defs.LBServiceDef def create_or_overwrite(self, name, lb_service_id=None, description=IGNORE, tags=IGNORE, size=IGNORE, connectivity_path=IGNORE, relax_scale_validation=IGNORE, tenant=constants.POLICY_INFRA_TENANT): lb_service_id = self._init_obj_uuid(lb_service_id) lb_service_def = self._init_def( lb_service_id=lb_service_id, name=name, description=description, tags=tags, size=size, connectivity_path=connectivity_path, relax_scale_validation=relax_scale_validation, tenant=tenant) self._create_or_store(lb_service_def) return lb_service_id def delete(self, lb_service_id, tenant=constants.POLICY_INFRA_TENANT): lb_service_def = self.entry_def( lb_service_id=lb_service_id, tenant=tenant) self.policy_api.delete(lb_service_def) def get(self, lb_service_id, tenant=constants.POLICY_INFRA_TENANT): lb_service_def = self.entry_def( lb_service_id=lb_service_id, tenant=tenant) return self.policy_api.get(lb_service_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): lb_service_def = lb_defs.LBServiceDef(tenant=tenant) return self.policy_api.list(lb_service_def)['results'] def update(self, lb_service_id, name=IGNORE, description=IGNORE, tags=IGNORE, size=IGNORE, connectivity_path=IGNORE, relax_scale_validation=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( lb_service_id=lb_service_id, name=name, description=description, tags=tags, size=size, connectivity_path=connectivity_path, relax_scale_validation=relax_scale_validation, tenant=tenant) def get_statistics(self, lb_service_id, tenant=constants.POLICY_INFRA_TENANT): lb_service_stats_def = ( lb_defs.LBServiceStatisticsDef( lb_service_id=lb_service_id, tenant=tenant)) return self.policy_api.get(lb_service_stats_def) def get_status(self, lb_service_id, tenant=constants.POLICY_INFRA_TENANT): lb_service_status_def = ( lb_defs.LBServiceStatusDef( lb_service_id=lb_service_id, tenant=tenant)) return self.policy_api.get(lb_service_status_def) def get_virtual_server_status(self, lb_service_id, lb_virtual_server_id, tenant=constants.POLICY_INFRA_TENANT): lb_vs_status_def = ( lb_defs.LBVirtualServerStatusDef( lb_service_id=lb_service_id, lb_virtual_server_id=lb_virtual_server_id, tenant=tenant)) return self.policy_api.get(lb_vs_status_def) def get_usage(self, lb_service_id, realtime=False, tenant=constants.POLICY_INFRA_TENANT): lb_service_status_def = lb_defs.LBServiceUsageDef( lb_service_id=lb_service_id, realtime=realtime, tenant=tenant) return self.policy_api.get(lb_service_status_def) def get_path(self, lb_service_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def( lb_service_id=lb_service_id, tenant=tenant) return profile_def.get_resource_full_path() def wait_until_realized(self, lb_service_id, entity_type='LbServiceDto', tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): lb_service_def = self.entry_def( lb_service_id=lb_service_id, tenant=tenant) return self._wait_until_realized( lb_service_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) class NsxPolicyLoadBalancerVirtualServerAPI(NsxPolicyResourceBase): """NSX Policy LoadBalancerVirtualServers""" @property def entry_def(self): return lb_defs.LBVirtualServerDef def create_or_overwrite(self, name, virtual_server_id=None, description=IGNORE, rules=IGNORE, application_profile_id=IGNORE, ip_address=IGNORE, lb_service_id=IGNORE, client_ssl_profile_binding=IGNORE, pool_id=IGNORE, lb_persistence_profile_id=IGNORE, ports=IGNORE, server_ssl_profile_binding=IGNORE, waf_profile_binding=IGNORE, max_concurrent_connections=IGNORE, access_list_control=IGNORE, tenant=constants.POLICY_INFRA_TENANT, tags=IGNORE): virtual_server_id = self._init_obj_uuid(virtual_server_id) lbvs_def = self._init_def( virtual_server_id=virtual_server_id, name=name, description=description, tenant=tenant, rules=rules, application_profile_id=application_profile_id, ip_address=ip_address, lb_service_id=lb_service_id, client_ssl_profile_binding=client_ssl_profile_binding, pool_id=pool_id, lb_persistence_profile_id=lb_persistence_profile_id, ports=ports, server_ssl_profile_binding=server_ssl_profile_binding, waf_profile_binding=waf_profile_binding, max_concurrent_connections=max_concurrent_connections, access_list_control=access_list_control, tags=tags ) self._create_or_store(lbvs_def) return virtual_server_id def delete(self, virtual_server_id, tenant=constants.POLICY_INFRA_TENANT): lbvs_def = self.entry_def( virtual_server_id=virtual_server_id, tenant=tenant) self.policy_api.delete(lbvs_def) def get(self, virtual_server_id, tenant=constants.POLICY_INFRA_TENANT): lbvs_def = self.entry_def( virtual_server_id=virtual_server_id, tenant=tenant) return self.policy_api.get(lbvs_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): lbvs_def = self.entry_def(tenant=tenant) return self.policy_api.list(lbvs_def)['results'] def update(self, virtual_server_id, name=IGNORE, description=IGNORE, rules=IGNORE, application_profile_id=IGNORE, ip_address=IGNORE, lb_service_id=IGNORE, client_ssl_profile_binding=IGNORE, pool_id=IGNORE, lb_persistence_profile_id=IGNORE, ports=IGNORE, server_ssl_profile_binding=IGNORE, waf_profile_binding=IGNORE, max_concurrent_connections=IGNORE, access_list_control=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT, allow_partial_updates=True): @utils.retry_upon_exception( nsxlib_exc.StaleRevision, max_attempts=self.policy_api.client.max_attempts) def _update(): self._update( virtual_server_id=virtual_server_id, name=name, description=description, tenant=tenant, rules=rules, application_profile_id=application_profile_id, ip_address=ip_address, lb_service_id=lb_service_id, client_ssl_profile_binding=client_ssl_profile_binding, pool_id=pool_id, lb_persistence_profile_id=lb_persistence_profile_id, ports=ports, server_ssl_profile_binding=server_ssl_profile_binding, waf_profile_binding=waf_profile_binding, max_concurrent_connections=max_concurrent_connections, access_list_control=access_list_control, tags=tags, allow_partial_updates=allow_partial_updates) _update() def update_virtual_server_with_pool( self, virtual_server_id, pool_id=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return self.update( virtual_server_id, pool_id=pool_id, tenant=tenant) def update_virtual_server_application_profile( self, virtual_server_id, application_profile_id=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return self.update( virtual_server_id, application_profile_id=application_profile_id, tenant=tenant) def update_virtual_server_persistence_profile( self, virtual_server_id, lb_persistence_profile_id=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return self.update( virtual_server_id, lb_persistence_profile_id=lb_persistence_profile_id, tenant=tenant) def update_virtual_server_client_ssl_profile_binding( self, virtual_server_id, client_ssl_profile_binding=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return self.update( virtual_server_id, client_ssl_profile_binding=client_ssl_profile_binding, tenant=tenant) def remove_virtual_server_client_ssl_profile_binding( self, virtual_server_id, tenant=constants.POLICY_INFRA_TENANT): lbvs_def = self._get_and_update_def( virtual_server_id=virtual_server_id, tenant=tenant) body = lbvs_def.body if lbvs_def.body else {} body.pop('client_ssl_profile_binding', None) # Server ssl profile binding can not exist without client ssl profile # binding body.pop('server_ssl_profile_binding', None) if body: lbvs_def.set_obj_dict(body) self.policy_api.create_or_update( lbvs_def, partial_updates=False) def update_virtual_server_with_vip(self, virtual_server_id, vip, tenant=constants.POLICY_INFRA_TENANT): return self.update( virtual_server_id, ip_address=vip, tenant=tenant) def build_client_ssl_profile_binding(self, default_certificate_path, sni_certificate_paths=None, ssl_profile_path=None, client_auth_ca_paths=None, client_auth=None): return lb_defs.ClientSSLProfileBindingDef( default_certificate_path, sni_certificate_paths=sni_certificate_paths, ssl_profile_path=ssl_profile_path, client_auth_ca_paths=client_auth_ca_paths, client_auth=client_auth) def update_client_ssl_profile_binding( self, virtual_server_id, default_certificate_path, sni_certificate_paths=None, ssl_profile_path=None, client_auth_ca_paths=None, client_auth=None, tenant=constants.POLICY_INFRA_TENANT): client_ssl_def = lb_defs.ClientSSLProfileBindingDef( default_certificate_path, sni_certificate_paths=sni_certificate_paths, ssl_profile_path=ssl_profile_path, client_auth_ca_paths=client_auth_ca_paths, client_auth=client_auth) return self.update( virtual_server_id, client_ssl_profile_binding=client_ssl_def, tenant=tenant) def build_lb_rule(self, actions=None, display_name=None, match_conditions=None, match_strategy=None, phase=None): return lb_defs.LBRuleDef( actions, match_conditions, display_name, match_strategy, phase) def _add_rule_in_position(self, body, lb_rule, position): lb_rules = body.get('rules', []) if position < 0 or position > len(lb_rules): # Add as the last one lb_rules.append(lb_rule) elif position <= len(lb_rules): lb_rules.insert(position, lb_rule) return lb_rules def add_lb_rule(self, virtual_server_id, actions=None, name=None, match_conditions=None, match_strategy=None, phase=None, position=-1, tenant=constants.POLICY_INFRA_TENANT): lb_rule = lb_defs.LBRuleDef( actions, match_conditions, name, match_strategy, phase) lbvs_def = self.entry_def( virtual_server_id=virtual_server_id, tenant=tenant) body = self.policy_api.get(lbvs_def) lb_rules = self._add_rule_in_position(body, lb_rule, position) return self._update(virtual_server_id=virtual_server_id, vs_data=body, rules=lb_rules, tenant=tenant) def update_lb_rule(self, virtual_server_id, lb_rule_name, actions=None, match_conditions=None, match_strategy=None, phase=None, position=-1, tenant=constants.POLICY_INFRA_TENANT): lb_rule = lb_defs.LBRuleDef( actions, match_conditions, lb_rule_name, match_strategy, phase) lbvs_def = self.entry_def( virtual_server_id=virtual_server_id, tenant=tenant) body = self.policy_api.get(lbvs_def) lb_rules = body.get('rules', []) # Remove existing rule try: rule_index = next(lb_rules.index(r) for r in lb_rules if r.get('display_name') == lb_rule_name) except Exception: err_msg = (_("No resource in rules matched for values: " "%(values)s") % {'values': lb_rule_name}) raise nsxlib_exc.ResourceNotFound( manager=self, operation=err_msg) if position < 0: position = rule_index del(lb_rules[rule_index]) # Insert new rule lb_rules = self._add_rule_in_position(body, lb_rule, position) return self._update( virtual_server_id=virtual_server_id, rules=lb_rules, vs_data=body, tenant=tenant) def remove_lb_rule(self, virtual_server_id, lb_rule_name, tenant=constants.POLICY_INFRA_TENANT): lbvs_def = self.entry_def(virtual_server_id=virtual_server_id, tenant=tenant) body = self.policy_api.get(lbvs_def) lb_rules = body.get('rules', []) lb_rules = [r for r in lb_rules if (r.get('display_name') != lb_rule_name)] return self._update( virtual_server_id=virtual_server_id, vs_data=body, rules=lb_rules, tenant=tenant) def build_access_list_control(self, action, group_path, enabled=None): return lb_defs.LBAccessListControlDef(action, group_path, enabled) def get_path(self, virtual_server_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def( virtual_server_id=virtual_server_id, tenant=tenant) return profile_def.get_resource_full_path() def wait_until_realized(self, virtual_server_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): lbvs_def = self.entry_def( virtual_server_id=virtual_server_id, tenant=tenant) return self._wait_until_realized( lbvs_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) @six.add_metaclass(abc.ABCMeta) class NsxPolicyLBMonitorProfileBase(NsxPolicyResourceBase): """NSX Policy LB monitor profile""" def create_or_overwrite(self, lb_monitor_profile_id=None, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT, **kwargs): lb_monitor_profile_id = self._init_obj_uuid(lb_monitor_profile_id) lb_monitor_profile_def = self._init_def( lb_monitor_profile_id=lb_monitor_profile_id, tags=tags, tenant=tenant, **kwargs) self._create_or_store(lb_monitor_profile_def) return lb_monitor_profile_id def delete(self, lb_monitor_profile_id, tenant=constants.POLICY_INFRA_TENANT): lb_monitor_profile_def = self.entry_def( lb_monitor_profile_id=lb_monitor_profile_id, tenant=tenant) self.policy_api.delete(lb_monitor_profile_def) def get(self, lb_monitor_profile_id, tenant=constants.POLICY_INFRA_TENANT): lb_monitor_profile_def = self.entry_def( lb_monitor_profile_id=lb_monitor_profile_id, tenant=tenant) return self.policy_api.get(lb_monitor_profile_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): lb_monitor_profile_def = self.entry_def(tenant=tenant) return self._list(lb_monitor_profile_def) def update(self, lb_monitor_profile_id, name=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT, **kwargs): self._update( lb_monitor_profile_id=lb_monitor_profile_id, name=name, tags=tags, tenant=tenant, **kwargs) def get_path(self, lb_monitor_profile_id, tenant=constants.POLICY_INFRA_TENANT): mon_def = self.entry_def(lb_monitor_profile_id=lb_monitor_profile_id, tenant=tenant) return mon_def.get_resource_full_path() class NsxPolicyLBMonitorProfileHttpApi(NsxPolicyLBMonitorProfileBase): """NSX Policy LB HTTP monitor profile""" def create_or_overwrite(self, lb_monitor_profile_id=None, tags=IGNORE, name=IGNORE, interval=IGNORE, timeout=IGNORE, fall_count=IGNORE, rise_count=IGNORE, monitor_port=IGNORE, request_url=IGNORE, request_method=IGNORE, request_version=IGNORE, request_headers=IGNORE, request_body=IGNORE, response_status_codes=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return super(NsxPolicyLBMonitorProfileHttpApi, self).create_or_overwrite( lb_monitor_profile_id=lb_monitor_profile_id, tags=tags, name=name, interval=interval, timeout=timeout, fall_count=fall_count, rise_count=rise_count, monitor_port=monitor_port, request_url=request_url, request_method=request_method, request_version=request_version, request_headers=request_headers, request_body=request_body, response_status_codes=response_status_codes, tenant=tenant) def update(self, lb_monitor_profile_id, tags=IGNORE, name=IGNORE, interval=IGNORE, timeout=IGNORE, fall_count=IGNORE, rise_count=IGNORE, monitor_port=IGNORE, request_url=IGNORE, request_method=IGNORE, request_version=IGNORE, request_headers=IGNORE, request_body=IGNORE, response_status_codes=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return super(NsxPolicyLBMonitorProfileHttpApi, self).update( lb_monitor_profile_id=lb_monitor_profile_id, tags=tags, name=name, interval=interval, timeout=timeout, fall_count=fall_count, rise_count=rise_count, monitor_port=monitor_port, request_url=request_url, request_method=request_method, request_version=request_version, request_headers=request_headers, request_body=request_body, response_status_codes=response_status_codes, tenant=tenant) @property def entry_def(self): return lb_defs.LBHttpMonitorProfileDef class NsxPolicyLBMonitorProfileHttpsApi(NsxPolicyLBMonitorProfileHttpApi): """NSX Policy LB HTTPS monitor profile""" @property def entry_def(self): return lb_defs.LBHttpsMonitorProfileDef class NsxPolicyLBMonitorProfileUdpApi(NsxPolicyLBMonitorProfileBase): """NSX Policy LB UDP monitor profile""" def create_or_overwrite(self, lb_monitor_profile_id=None, tags=IGNORE, name=IGNORE, interval=IGNORE, timeout=IGNORE, fall_count=IGNORE, rise_count=IGNORE, monitor_port=IGNORE, receive=IGNORE, send=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return super(NsxPolicyLBMonitorProfileUdpApi, self).create_or_overwrite( lb_monitor_profile_id=lb_monitor_profile_id, tags=tags, name=name, interval=interval, timeout=timeout, fall_count=fall_count, rise_count=rise_count, monitor_port=monitor_port, receive=receive, send=send, tenant=tenant) def update(self, lb_monitor_profile_id, tags=IGNORE, name=IGNORE, interval=IGNORE, timeout=IGNORE, fall_count=IGNORE, rise_count=IGNORE, monitor_port=IGNORE, receive=IGNORE, send=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return super(NsxPolicyLBMonitorProfileUdpApi, self).update( lb_monitor_profile_id=lb_monitor_profile_id, tags=tags, name=name, interval=interval, timeout=timeout, fall_count=fall_count, rise_count=rise_count, monitor_port=monitor_port, receive=receive, send=send, tenant=tenant) @property def entry_def(self): return lb_defs.LBUdpMonitorProfileDef class NsxPolicyLBMonitorProfileIcmpApi(NsxPolicyLBMonitorProfileBase): """NSX Policy LB ICMP monitor profile""" def create_or_overwrite(self, lb_monitor_profile_id=None, tags=IGNORE, name=IGNORE, interval=IGNORE, timeout=IGNORE, fall_count=IGNORE, rise_count=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return super(NsxPolicyLBMonitorProfileIcmpApi, self).create_or_overwrite( lb_monitor_profile_id=lb_monitor_profile_id, tags=tags, name=name, interval=interval, timeout=timeout, fall_count=fall_count, rise_count=rise_count, tenant=tenant) def update(self, lb_monitor_profile_id, tags=IGNORE, name=IGNORE, interval=IGNORE, timeout=IGNORE, fall_count=IGNORE, rise_count=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return super(NsxPolicyLBMonitorProfileIcmpApi, self).update( lb_monitor_profile_id=lb_monitor_profile_id, tags=tags, name=name, interval=interval, timeout=timeout, fall_count=fall_count, rise_count=rise_count, tenant=tenant) @property def entry_def(self): return lb_defs.LBIcmpMonitorProfileDef class NsxPolicyLBMonitorProfileTcpApi(NsxPolicyLBMonitorProfileBase): """NSX Policy LB TCP monitor profile""" def create_or_overwrite(self, lb_monitor_profile_id=None, tags=IGNORE, name=IGNORE, interval=IGNORE, timeout=IGNORE, fall_count=IGNORE, rise_count=IGNORE, monitor_port=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return super(NsxPolicyLBMonitorProfileTcpApi, self).create_or_overwrite( lb_monitor_profile_id=lb_monitor_profile_id, tags=tags, name=name, interval=interval, timeout=timeout, fall_count=fall_count, rise_count=rise_count, monitor_port=monitor_port, tenant=tenant) def update(self, lb_monitor_profile_id, tags=IGNORE, name=IGNORE, interval=IGNORE, timeout=IGNORE, fall_count=IGNORE, rise_count=IGNORE, monitor_port=IGNORE, tenant=constants.POLICY_INFRA_TENANT): return super(NsxPolicyLBMonitorProfileTcpApi, self).update( lb_monitor_profile_id=lb_monitor_profile_id, tags=tags, name=name, interval=interval, timeout=timeout, fall_count=fall_count, rise_count=rise_count, monitor_port=monitor_port, tenant=tenant) @property def entry_def(self): return lb_defs.LBTcpMonitorProfileDef class NsxPolicyLoadBalancerApi(object): """This is the class that have all load balancer policy apis""" def __init__(self, *args): self.lb_http_profile = NsxPolicyLBAppProfileHttpApi(*args) self.lb_fast_tcp_profile = NsxPolicyLBAppProfileFastTcpApi(*args) self.lb_fast_udp_profile = NsxPolicyLBAppProfileFastUdpApi(*args) self.client_ssl_profile = ( NsxPolicyLoadBalancerClientSSLProfileApi(*args)) self.server_ssl_profile = ( NsxPolicyLoadBalancerServerSSLProfileApi(*args)) self.lb_persistence_profile = ( NsxPolicyLoadBalancerPersistenceProfileApi(*args)) self.lb_cookie_persistence_profile = ( NsxPolicyLoadBalancerCookiePersistenceProfileApi(*args)) self.lb_source_ip_persistence_profile = ( NsxPolicyLoadBalancerSourceIpPersistenceProfileApi(*args)) self.lb_service = NsxPolicyLoadBalancerServiceApi(*args) self.virtual_server = NsxPolicyLoadBalancerVirtualServerAPI(*args) self.lb_pool = NsxPolicyLoadBalancerPoolApi(*args) self.lb_monitor_profile_http = NsxPolicyLBMonitorProfileHttpApi(*args) self.lb_monitor_profile_https = ( NsxPolicyLBMonitorProfileHttpsApi(*args)) self.lb_monitor_profile_udp = NsxPolicyLBMonitorProfileUdpApi(*args) self.lb_monitor_profile_icmp = NsxPolicyLBMonitorProfileIcmpApi(*args) self.lb_monitor_profile_tcp = NsxPolicyLBMonitorProfileTcpApi(*args) vmware-nsxlib-15.0.6/vmware_nsxlib/v3/policy/constants.py0000664000175000017500000000634213623151571023531 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TCP = 'TCP' UDP = 'UDP' POLICY_INFRA_TENANT = 'infra' ACTION_ALLOW = 'ALLOW' ACTION_DENY = 'DROP' ANY_GROUP = 'ANY' ANY_SERVICE = 'ANY' CONDITION_KEY_TAG = 'Tag' CONDITION_KEY_NAME = 'Name' CONDITION_MEMBER_IPSET = 'IPSet' CONDITION_MEMBER_VM = 'VirtualMachine' CONDITION_MEMBER_PORT = 'LogicalPort' CONDITION_MEMBER_SWITCH = 'LogicalSwitch' CONDITION_OP_EQUALS = 'EQUALS' CONDITION_OP_CONTAINS = 'CONTAINS' CONDITION_OP_STARTS_WITH = 'STARTSWITH' CONDITION_OP_AND = 'AND' CONDITION_OP_OR = 'OR' DEFAULT_THUMBPRINT = 'abc' DEFAULT_DOMAIN = 'default' DEFAULT_ENFORCEMENT_POINT = 'default' STATE_REALIZED = 'REALIZED' STATE_UNREALIZED = 'UNREALIZED' STATE_ERROR = 'ERROR' CATEGORY_EMERGENCY = 'Emergency' CATEGORY_INFRASTRUCTURE = 'Infrastructure' CATEGORY_ENVIRONMENT = 'Environment' CATEGORY_APPLICATION = 'Application' CATEGORY_LOCAL_GW = 'LocalGatewayRules' ACTIVE_STANDBY = 'ACTIVE_STANDBY' ACTIVE_ACTIVE = 'ACTIVE_ACTIVE' PREEMPTIVE = 'PREEMPTIVE' NON_PREEMPTIVE = 'NON_PREEMPTIVE' NAT_ACTION_SNAT = 'SNAT' NAT_ACTION_DNAT = 'DNAT' NAT_ACTION_NO_SNAT = 'NO_SNAT' NAT_ACTION_NO_DNAT = 'NO_DNAT' NAT_ACTION_REFLEXIVE = 'REFLEXIVE' NAT_FIREWALL_MATCH_BYPASS = 'BYPASS' NAT_FIREWALL_MATCH_EXTERNAL = 'MATCH_EXTERNAL_ADDRESS' NAT_FIREWALL_MATCH_INTERNAL = 'MATCH_INTERNAL_ADDRESS' # Segment ports attachment types ATTACHMENT_PARENT = "PARENT" ATTACHMENT_CHILD = "CHILD" ATTACHMENT_INDEPENDENT = "INDEPENDENT" IPV6_RA_MODE_DISABLED = "DISABLED" IPV6_RA_MODE_SLAAC_RA = "SLAAC_DNS_THROUGH_RA" IPV6_RA_MODE_SLAAC_DHCP = "SLAAC_DNS_THROUGH_DHCP" IPV6_RA_MODE_DHCP = "DHCP_ADDRESS_AND_DNS_THROUGH_DHCP" # WAF operational mode types WAF_OPERATIONAL_MODE_DETECTION = 'DETECTION' WAF_OPERATIONAL_MODE_PROTECTION = 'PROTECTION' WAF_OPERATIONAL_MODE_DISABLED = 'DISABLED' # WAF debug log level types WAF_LOG_LEVEL_NO_LOG = 'NO_LOG' WAF_LOG_LEVEL_ERROR = 'ERROR' WAF_LOG_LEVEL_WARNING = 'WARNING' WAF_LOG_LEVEL_NOTICE = 'NOTICE' WAF_LOG_LEVEL_INFO = 'INFO' WAF_LOG_LEVEL_DETAIL = 'DETAIL' WAF_LOG_LEVEL_EVERYTHING = 'EVERYTHING' # IpPool subnet type IPPOOL_BLOCK_SUBNET = "IpAddressPoolBlockSubnet" IPPOOL_STATIC_SUBNET = "IpAddressPoolStaticSubnet" ADV_RULE_PERMIT = 'PERMIT' ADV_RULE_DENY = 'DENY' ADV_RULE_OPERATOR_GE = 'GE' ADV_RULE_OPERATOR_EQ = 'EQ' ADV_RULE_TYPE_TIER1_STATIC_ROUTES = 'TIER1_STATIC_ROUTES' ADV_RULE_TIER1_CONNECTED = 'TIER1_CONNECTED' ADV_RULE_TIER1_NAT = 'TIER1_NAT' ADV_RULE_TIER1_LB_VIP = 'TIER1_LB_VIP' ADV_RULE_TIER1_LB_SNAT = 'TIER1_LB_SNAT' ADV_RULE_TIER1_DNS_FORWARDER_IP = 'TIER1_DNS_FORWARDER_IP' ADV_RULE_TIER1_IPSEC_LOCAL_ENDPOINT = 'TIER1_IPSEC_LOCAL_ENDPOINT' IPSEC_VPN_RULE_PROTECT = 'PROTECT' IPSEC_VPN_RULE_BYPASS = 'BYPASS' vmware-nsxlib-15.0.6/vmware_nsxlib/v3/policy/__init__.py0000664000175000017500000002400413623151571023247 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from distutils import version from oslo_log import log from vmware_nsxlib import v3 from vmware_nsxlib.v3 import client from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import lib from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils as lib_utils from vmware_nsxlib.v3.policy import core_defs from vmware_nsxlib.v3.policy import core_resources from vmware_nsxlib.v3.policy import ipsec_vpn_resources from vmware_nsxlib.v3.policy import lb_resources LOG = log.getLogger(__name__) class NsxPolicyLib(lib.NsxLibBase): def init_api(self): # Initialize the policy client self.policy_api = core_defs.NsxPolicyApi(self.client) # NSX manager api will be used as a pass-through for apis which are # not implemented by the policy manager yet if self.nsxlib_config.allow_passthrough: config = copy.deepcopy(self.nsxlib_config) # X-Allow-Overwrite must be set for passthrough apis config.allow_overwrite_header = True self.nsx_api = v3.NsxLib(config) else: self.nsx_api = None self.nsx_version = self.get_version() if not self.feature_supported(nsx_constants.FEATURE_PARTIAL_UPDATES): self.policy_api.disable_partial_updates() args = (self.policy_api, self.nsx_api, self.nsx_version, self.nsxlib_config) # Initialize all the different resources self.domain = core_resources.NsxPolicyDomainApi(*args) self.group = core_resources.NsxPolicyGroupApi(*args) self.service = core_resources.NsxPolicyL4ServiceApi(*args) self.icmp_service = core_resources.NsxPolicyIcmpServiceApi( *args) self.ip_protocol_service = ( core_resources.NsxPolicyIPProtocolServiceApi(*args)) self.mixed_service = core_resources.NsxPolicyMixedServiceApi(*args) self.tier0 = core_resources.NsxPolicyTier0Api(*args) self.tier0_nat_rule = core_resources.NsxPolicyTier0NatRuleApi( *args) self.tier0_route_map = core_resources.NsxPolicyTier0RouteMapApi(*args) self.tier0_prefix_list = core_resources.NsxPolicyTier0PrefixListApi( *args) self.tier1 = core_resources.NsxPolicyTier1Api(*args) self.tier1_segment = core_resources.NsxPolicyTier1SegmentApi(*args) self.tier1_nat_rule = core_resources.NsxPolicyTier1NatRuleApi( *args) self.tier1_static_route = ( core_resources.NsxPolicyTier1StaticRouteApi(*args)) self.segment = core_resources.NsxPolicySegmentApi(*args) self.segment_port = core_resources.NsxPolicySegmentPortApi( *args) self.tier1_segment_port = ( core_resources.NsxPolicyTier1SegmentPortApi(*args)) self.comm_map = core_resources.NsxPolicyCommunicationMapApi(*args) self.gateway_policy = core_resources.NsxPolicyGatewayPolicyApi(*args) self.enforcement_point = core_resources.NsxPolicyEnforcementPointApi( *args) self.transport_zone = core_resources.NsxPolicyTransportZoneApi( *args) self.edge_cluster = core_resources.NsxPolicyEdgeClusterApi( *args) self.deployment_map = core_resources.NsxPolicyDeploymentMapApi( *args) self.ip_block = core_resources.NsxPolicyIpBlockApi(*args) self.ip_pool = core_resources.NsxPolicyIpPoolApi(*args) self.segment_security_profile = ( core_resources.NsxSegmentSecurityProfileApi(*args)) self.qos_profile = ( core_resources.NsxQosProfileApi(*args)) self.spoofguard_profile = ( core_resources.NsxSpoofguardProfileApi(*args)) self.ip_discovery_profile = ( core_resources.NsxIpDiscoveryProfileApi(*args)) self.mac_discovery_profile = ( core_resources.NsxMacDiscoveryProfileApi(*args)) self.waf_profile = ( core_resources.NsxWAFProfileApi(*args)) self.segment_security_profile_maps = ( core_resources.SegmentSecurityProfilesBindingMapApi( *args)) self.segment_port_security_profiles = ( core_resources.SegmentPortSecurityProfilesBindingMapApi( *args)) self.segment_port_discovery_profiles = ( core_resources.SegmentPortDiscoveryProfilesBindingMapApi( *args)) self.segment_port_qos_profiles = ( core_resources.SegmentPortQosProfilesBindingMapApi( *args)) self.segment_dhcp_static_bindings = ( core_resources.SegmentDhcpStaticBindingConfigApi(*args)) self.ipv6_ndra_profile = ( core_resources.NsxIpv6NdraProfileApi(*args)) self.dhcp_relay_config = core_resources.NsxDhcpRelayConfigApi(*args) self.dhcp_server_config = core_resources.NsxDhcpServerConfigApi(*args) self.md_proxy = core_resources.NsxPolicyMetadataProxyApi(*args) self.certificate = core_resources.NsxPolicyCertApi(*args) self.exclude_list = core_resources.NsxPolicyExcludeListApi(*args) self.load_balancer = lb_resources.NsxPolicyLoadBalancerApi(*args) self.ipsec_vpn = ipsec_vpn_resources.NsxPolicyIpsecVpnApi(*args) self.global_config = core_resources.NsxPolicyGlobalConfig(*args) @property def keepalive_section(self): return 'infra' @property def validate_connection_method(self): # TODO(asarfaty): Find an equivalent api to check policy status pass def get_version(self): """Get the NSX Policy manager version Currently the backend does not support it, so the nsx-manager api will be used temporarily as a passthrough. """ if self.nsx_version: return self.nsx_version if self.nsx_api: self.nsx_version = self.nsx_api.get_version() else: # return the initial supported version self.nsx_version = nsx_constants.NSX_VERSION_2_4_0 return self.nsx_version def feature_supported(self, feature): if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_4_0)): # Features available since 2.4 if (feature == nsx_constants.FEATURE_NSX_POLICY_NETWORKING): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_2_5_0)): # Features available since 2.5 if (feature == nsx_constants.FEATURE_ENS_WITH_QOS): return True if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_3_0_0)): # features available since 3.0.0 if feature == nsx_constants.FEATURE_PARTIAL_UPDATES: return True if feature == nsx_constants.FEATURE_SWITCH_HYPERBUS_MODE: return True if feature == nsx_constants.FEATURE_NSX_POLICY_MDPROXY: return True if feature == nsx_constants.FEATURE_NSX_POLICY_DHCP: return True if (feature == nsx_constants.FEATURE_RELAX_SCALE_VALIDATION): return True if (feature == nsx_constants.FEATURE_NSX_POLICY_GLOBAL_CONFIG): return True if feature == nsx_constants.FEATURE_ROUTE_REDISTRIBUTION_CONFIG: return True if feature == nsx_constants.FEATURE_NSX_POLICY_ADMIN_STATE: return True return (feature == nsx_constants.FEATURE_NSX_POLICY) def reinitialize_cluster(self, resource, event, trigger, payload=None): super(NsxPolicyLib, self).reinitialize_cluster( resource, event, trigger, payload=payload) if self.nsx_api: self.nsx_api.reinitialize_cluster(resource, event, trigger, payload) @property def client_url_prefix(self): return client.NSX3Client.NSX_POLICY_V1_API_PREFIX def set_realization_interval(self, interval_min): # Sets intent realization and purge cycles interval (in minutes) realization_config = {"key": "populate_realized_state_cron_expression", "value": "0 */%d * * * *" % interval_min} body = {"keyValuePairs": [realization_config]} self.client.patch("system-config", body) def search_resource_by_realized_id(self, realized_id, realized_type): """Search resources by a realized id & type :returns: a list of resource pathes matching the realized id and type. """ if not realized_type or not realized_id: raise exceptions.NsxSearchInvalidQuery( reason=_("Resource type or id was not specified")) query = ('resource_type:GenericPolicyRealizedResource AND ' 'realization_specific_identifier:%s AND ' 'entity_type:%s' % (realized_id, realized_type)) url = self._get_search_url() % query # Retry the search on case of error @lib_utils.retry_upon_exception(exceptions.NsxSearchError, max_attempts=self.client.max_attempts) def do_search(url): return self.client.url_get(url) results = do_search(url) pathes = [] for resource in results['results']: pathes.extend(resource.get('intent_paths', [])) return pathes vmware-nsxlib-15.0.6/vmware_nsxlib/v3/policy/core_resources.py0000664000175000017500000057041613623151571024547 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from distutils import version import sys import decorator import eventlet from oslo_log import log as logging from oslo_utils import uuidutils import six from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils from vmware_nsxlib.v3.policy import constants from vmware_nsxlib.v3.policy import core_defs from vmware_nsxlib.v3.policy import transaction as trans from vmware_nsxlib.v3.policy import utils as p_utils LOG = logging.getLogger(__name__) # Sentitel object to indicate unspecified attribute value # None value in attribute would indicate "unset" functionality, # while "ignore" means that the value not be present in request # body IGNORE = object() DEFAULT_MAP_ID = 'DEFAULT' @decorator.decorator def check_allowed_passthrough(f, *args, **kwargs): resource_api = args[0] if not resource_api.nsx_api: caller = sys._getframe(1).f_code.co_name LOG.error("%s failed: Passthrough api is disabled", caller) return return f(*args, **kwargs) @six.add_metaclass(abc.ABCMeta) class NsxPolicyResourceBase(object): """Abstract class for NSX policy resources declaring the basic apis each policy resource should support, and implement some common apis and utilities """ SINGLE_ENTRY_ID = 'entry' def __init__(self, policy_api, nsx_api, version, nsxlib_config): self.policy_api = policy_api self.nsx_api = nsx_api self.version = version self.nsxlib_config = nsxlib_config @property def entry_def(self): pass @abc.abstractmethod def list(self, *args, **kwargs): pass @abc.abstractmethod def get(self, uuid, *args, **kwargs): pass @abc.abstractmethod def delete(self, uuid, *args, **kwargs): pass @abc.abstractmethod def create_or_overwrite(self, *args, **kwargs): """Create new or overwrite existing resource Create would list keys and attributes, set defaults and perform necessary validations. If object with same IDs exists on backend, it will be overridden. """ pass @abc.abstractmethod def update(self, *args, **kwargs): """Update existing resource Update is different from create since it specifies only attributes that need changing. Non-updateble attributes should not be listed as update arguments. Create_or_overwrite is not good enough since it sets defaults, and thus would return non-default values to default if not specified in kwargs. """ pass def _any_arg_set(self, *args): """Helper to identify if user specified any of args""" for arg in args: if arg != IGNORE: return True return False def _get_user_args(self, **kwargs): return {key: value for key, value in kwargs.items() if value != IGNORE} def _init_def(self, **kwargs): """Helper for update function - ignore attrs without explicit value""" args = self._get_user_args(**kwargs) return self.entry_def(nsx_version=self.version, **args) def _init_parent_def(self, **kwargs): """Helper for update function - ignore attrs without explicit value""" args = self._get_user_args(**kwargs) return self.parent_entry_def(**args) def _get_and_update_def(self, **kwargs): """Helper for update function - ignore attrs without explicit value""" args = self._get_user_args(**kwargs) resource_def = self.entry_def(nsx_version=self.version, **args) body = self.policy_api.get(resource_def) if body: resource_def.set_obj_dict(body) return resource_def def _update(self, allow_partial_updates=True, **kwargs): """Helper for update function - ignore attrs without explicit value""" if (allow_partial_updates and self.policy_api.partial_updates_supported()): policy_def = self._init_def(**kwargs) partial_updates = True else: policy_def = self._get_and_update_def(**kwargs) partial_updates = False if policy_def.bodyless(): # Nothing to update - only keys provided in kwargs return self.policy_api.create_or_update( policy_def, partial_updates=partial_updates) @staticmethod def _init_obj_uuid(obj_uuid): if not obj_uuid: # generate a random id obj_uuid = str(uuidutils.generate_uuid()) return obj_uuid def _canonize_name(self, name): # remove spaces and slashes from objects names return name.replace(' ', '_').replace('/', '_') def get_by_name(self, name, *args, **kwargs): # Return first match by name resources_list = self.list(*args, **kwargs) for obj in resources_list: if obj.get('display_name') == name: return obj def _get_realization_info(self, resource_def, entity_type=None, silent=False): entities = [] try: path = resource_def.get_resource_full_path() entities = self.policy_api.get_realized_entities( path, silent=silent) if entities: if entity_type: # look for the entry with the right entity_type for entity in entities: if entity.get('entity_type') == entity_type: return entity else: # return the first realization entry # (Useful for resources with single realization entity) return entities[0] except exceptions.ResourceNotFound: pass # If we got here the resource was not deployed yet if silent: LOG.debug("No realization info found for %(path)s type %(type)s: " "%(entities)s", {"path": path, "type": entity_type, "entities": entities}) else: LOG.warning("No realization info found for %(path)s type %(type)s", {"path": path, "type": entity_type}) def _get_realized_state(self, resource_def, entity_type=None, realization_info=None): if not realization_info: realization_info = self._get_realization_info( resource_def, entity_type=entity_type) if realization_info and realization_info.get('state'): return realization_info['state'] def _get_realized_id(self, resource_def, entity_type=None, realization_info=None): if not realization_info: realization_info = self._get_realization_info( resource_def, entity_type=entity_type) if (realization_info and realization_info.get('realization_specific_identifier')): return realization_info['realization_specific_identifier'] def _get_realization_error_message_and_code(self, info): error_msg = 'unknown' error_code = None related_error_codes = [] if info.get('alarms'): alarm = info['alarms'][0] error_msg = alarm.get('message') if alarm.get('error_details'): error_code = alarm['error_details'].get('error_code') if alarm['error_details'].get('related_errors'): related = alarm['error_details']['related_errors'] for err_obj in related: error_msg = '%s: %s' % (error_msg, err_obj.get('error_message')) if err_obj.get('error_code'): related_error_codes.append(err_obj['error_code']) return error_msg, error_code, related_error_codes def _wait_until_realized(self, resource_def, entity_type=None, sleep=None, max_attempts=None): """Wait until the resource has been realized Return the realization info, or raise an error """ if sleep is None: sleep = self.nsxlib_config.realization_wait_sec if max_attempts is None: max_attempts = self.nsxlib_config.realization_max_attempts @utils.retry_upon_none_result(max_attempts, delay=sleep, random=True) def get_info(): info = self._get_realization_info( resource_def, entity_type=entity_type, silent=True) if info: if info['state'] == constants.STATE_REALIZED: return info if info['state'] == constants.STATE_ERROR: error_msg, error_code, related_error_codes = \ self._get_realization_error_message_and_code(info) raise exceptions.RealizationErrorStateError( resource_type=resource_def.resource_type(), resource_id=resource_def.get_id(), error=error_msg, error_code=error_code, related_error_codes=related_error_codes) try: return get_info() except exceptions.RealizationError as e: raise e except Exception: # max retries reached raise exceptions.RealizationTimeoutError( resource_type=resource_def.resource_type(), resource_id=resource_def.get_id(), attempts=max_attempts, sleep=sleep) @check_allowed_passthrough def _get_realized_id_using_search(self, policy_resource_path, mp_resource_type, resource_def=None, sleep=None, max_attempts=None): """Wait until the policy path will be found using search api And return the NSX ID of the MP resource that was found """ if sleep is None: sleep = self.nsxlib_config.realization_wait_sec if max_attempts is None: max_attempts = self.nsxlib_config.realization_max_attempts check_status = 3 tag = [{'scope': 'policyPath', 'tag': utils.escape_tag_data(policy_resource_path)}] test_num = 0 while test_num < max_attempts: # Use the search api to find the realization id of this entity. resources = self.nsx_api.search_by_tags( tags=tag, resource_type=mp_resource_type)['results'] if resources: return resources[0]['id'] # From time to time also check the Policy realization state, # as if it is in ERROR waiting should be avoided. if resource_def and test_num % check_status == (check_status - 1): info = self._get_realization_info(resource_def) if info and info['state'] == constants.STATE_ERROR: error_msg, error_code, related_error_codes = \ self._get_realization_error_message_and_code(info) raise exceptions.RealizationErrorStateError( resource_type=resource_def.resource_type(), resource_id=resource_def.get_id(), error=error_msg, error_code=error_code, related_error_codes=related_error_codes) if (info and info['state'] == constants.STATE_REALIZED and info.get('realization_specific_identifier')): LOG.warning("Realization ID for %s was not found via " "search api although it was realized", policy_resource_path) return info['realization_specific_identifier'] eventlet.sleep(sleep) test_num += 1 # max retries reached raise exceptions.RealizationTimeoutError( resource_type=mp_resource_type, resource_id=policy_resource_path, attempts=max_attempts, sleep=sleep) def _get_extended_attr_from_realized_info(self, realization_info, requested_attr): # Returns a list. In case a single value is expected, # caller must extract the first index to retrieve the value if realization_info: try: for attr in realization_info.get('extended_attributes', []): if attr.get('key') == requested_attr: return attr.get('values') except IndexError: return def _list(self, obj_def): return self.policy_api.list(obj_def).get('results', []) def _create_or_store(self, policy_def, child_def=None): transaction = trans.NsxPolicyTransaction.get_current() if transaction: # Store this def for batch apply for this transaction transaction.store_def(policy_def, self.policy_api.client) if child_def and not policy_def.mandatory_child_def: transaction.store_def(child_def, self.policy_api.client) else: # No transaction - apply now # in case the same object was just deleted, create may need to # be retried @utils.retry_upon_exception( exceptions.NsxPendingDelete, max_attempts=self.policy_api.client.max_attempts) def _do_create_with_retry(): if child_def: self.policy_api.create_with_parent(policy_def, child_def) else: self.policy_api.create_or_update(policy_def) _do_create_with_retry() def _delete_or_store(self, policy_def): transaction = trans.NsxPolicyTransaction.get_current() if transaction: # Mark this resource is about to be deleted policy_def.set_delete() # Set some mandatory default values to avoid failure # TODO(asarfaty): This can be removed once platform bug is fixed policy_def.set_default_mandatory_vals() # Store this def for batch apply for this transaction transaction.store_def(policy_def, self.policy_api.client) else: # No transaction - apply now self.policy_api.delete(policy_def) class NsxPolicyDomainApi(NsxPolicyResourceBase): """NSX Policy Domain.""" @property def entry_def(self): return core_defs.DomainDef def create_or_overwrite(self, name, domain_id=None, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): domain_id = self._init_obj_uuid(domain_id) domain_def = self._init_def(domain_id=domain_id, name=name, description=description, tags=tags, tenant=tenant) self._create_or_store(domain_def) return domain_id def delete(self, domain_id, tenant=constants.POLICY_INFRA_TENANT): domain_def = core_defs.DomainDef(domain_id=domain_id, tenant=tenant) self.policy_api.delete(domain_def) def get(self, domain_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): domain_def = core_defs.DomainDef(domain_id=domain_id, tenant=tenant) return self.policy_api.get(domain_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): domain_def = core_defs.DomainDef(tenant=tenant) return self._list(domain_def) def update(self, domain_id, name=IGNORE, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(domain_id=domain_id, name=name, description=description, tags=tags, tenant=tenant) class NsxPolicyGroupApi(NsxPolicyResourceBase): """NSX Policy Group (under a Domain) with condition/s""" @property def entry_def(self): return core_defs.GroupDef def create_or_overwrite( self, name, domain_id, group_id=None, description=IGNORE, cond_val=None, cond_key=constants.CONDITION_KEY_TAG, cond_op=constants.CONDITION_OP_EQUALS, cond_member_type=constants.CONDITION_MEMBER_PORT, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): """Create a group with/without a condition. Empty condition value will result a group with no condition. """ group_id = self._init_obj_uuid(group_id) # Prepare the condition if cond_val is not None: condition = core_defs.Condition(value=cond_val, key=cond_key, operator=cond_op, member_type=cond_member_type) conditions = [condition] else: conditions = [] group_def = self._init_def(domain_id=domain_id, group_id=group_id, name=name, description=description, conditions=conditions, tags=tags, tenant=tenant) self._create_or_store(group_def) return group_id def build_condition( self, cond_val=None, cond_key=constants.CONDITION_KEY_TAG, cond_op=constants.CONDITION_OP_EQUALS, cond_member_type=constants.CONDITION_MEMBER_PORT): return core_defs.Condition(value=cond_val, key=cond_key, operator=cond_op, member_type=cond_member_type) def build_ip_address_expression(self, ip_addresses): return core_defs.IPAddressExpression(ip_addresses) def build_path_expression(self, paths): return core_defs.PathExpression(paths) def build_union_condition(self, operator=constants.CONDITION_OP_OR, conditions=None): expressions = [] for cond in conditions: if len(expressions): expressions.append(core_defs.ConjunctionOperator( operator=operator)) expressions.append(cond) return expressions def build_nested_condition( self, operator=constants.CONDITION_OP_AND, conditions=None): expressions = self.build_union_condition( operator=operator, conditions=conditions) return core_defs.NestedExpression(expressions=expressions) def create_or_overwrite_with_conditions( self, name, domain_id, group_id=None, description=IGNORE, conditions=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): """Create a group with a list of conditions. To build the conditions in the list, build_condition or build_nested_condition can be used """ group_id = self._init_obj_uuid(group_id) if not conditions: conditions = [] group_def = self._init_def(domain_id=domain_id, group_id=group_id, name=name, description=description, conditions=conditions, tags=tags, tenant=tenant) self._create_or_store(group_def) return group_id def delete(self, domain_id, group_id, tenant=constants.POLICY_INFRA_TENANT): group_def = core_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=tenant) self.policy_api.delete(group_def) def get(self, domain_id, group_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): group_def = core_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=tenant) return self.policy_api.get(group_def, silent=silent) def list(self, domain_id, tenant=constants.POLICY_INFRA_TENANT): """List all the groups of a specific domain.""" group_def = core_defs.GroupDef(domain_id=domain_id, tenant=tenant) return self._list(group_def) def get_by_name(self, domain_id, name, tenant=constants.POLICY_INFRA_TENANT): """Return first group matched by name of this domain""" return super(NsxPolicyGroupApi, self).get_by_name(name, domain_id, tenant=tenant) def update(self, domain_id, group_id, name=IGNORE, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(domain_id=domain_id, group_id=group_id, name=name, description=description, tags=tags, tenant=tenant) def update_with_conditions( self, domain_id, group_id, name=IGNORE, description=IGNORE, conditions=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT, update_payload_cbk=None): group_def = self._init_def(domain_id=domain_id, group_id=group_id, name=name, description=description, conditions=conditions, tags=tags, tenant=tenant) group_path = group_def.get_resource_path() @utils.retry_upon_exception( exceptions.StaleRevision, max_attempts=self.policy_api.client.max_attempts) def _update(): # Get the current data of group group = self.policy_api.get(group_def) if update_payload_cbk: # The update_payload_cbk function takes two arguments. # The first one is the result from the internal GET request. # The second one is a dict of user-provided attributes, # which can be changed inside the callback function and # used as the new payload for the following PUT request. # For example, users want to combine the new conditions # passed to update_with_conditions() with the original # conditions retrieved from the internal GET request # instead of overriding the original conditions. update_payload_cbk(group, group_def.attrs) group_def.set_obj_dict(group) body = group_def.get_obj_dict() # Update the entire group at the NSX self.policy_api.client.update(group_path, body) _update() def get_realized_state(self, domain_id, group_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): group_def = core_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=tenant) return self._get_realized_state(group_def, entity_type=entity_type, realization_info=realization_info) def get_realized_id(self, domain_id, group_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): group_def = core_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=tenant) return self._get_realized_id(group_def, entity_type=entity_type, realization_info=realization_info) def get_realization_info(self, domain_id, group_id, entity_type=None, silent=False, tenant=constants.POLICY_INFRA_TENANT): group_def = core_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=tenant) return self._get_realization_info(group_def, entity_type=entity_type, silent=silent) def get_path(self, domain_id, group_id, tenant=constants.POLICY_INFRA_TENANT): group_def = self.entry_def(domain_id=domain_id, group_id=group_id, tenant=tenant) return group_def.get_resource_full_path() def wait_until_realized(self, domain_id, group_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): group_def = self.entry_def(domain_id=domain_id, group_id=group_id, tenant=tenant) return self._wait_until_realized(group_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) class NsxPolicyServiceBase(NsxPolicyResourceBase): """Base class for NSX Policy Service with a single entry. Note the nsx-policy backend supports multiple service entries per service. At this point this is not supported here. """ @property def parent_entry_def(self): return core_defs.ServiceDef def delete(self, service_id, tenant=constants.POLICY_INFRA_TENANT): """Delete the service with all its entries""" service_def = core_defs.ServiceDef(service_id=service_id, tenant=tenant) self.policy_api.delete(service_def) def get(self, service_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): service_def = core_defs.ServiceDef(service_id=service_id, tenant=tenant) return self.policy_api.get(service_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): service_def = core_defs.ServiceDef(tenant=tenant) return self._list(service_def) def get_realized_state(self, service_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): service_def = core_defs.ServiceDef(service_id=service_id, tenant=tenant) return self._get_realized_state(service_def, entity_type=entity_type, realization_info=realization_info) def get_realized_id(self, service_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): service_def = core_defs.ServiceDef(service_id=service_id, tenant=tenant) return self._get_realized_id(service_def, entity_type=entity_type, realization_info=realization_info) def get_realization_info(self, service_id, entity_type=None, silent=False, tenant=constants.POLICY_INFRA_TENANT): service_def = core_defs.ServiceDef(service_id=service_id, tenant=tenant) return self._get_realization_info(service_def, entity_type=entity_type, silent=silent) class NsxPolicyL4ServiceApi(NsxPolicyServiceBase): """NSX Policy Service with a single L4 service entry. Note the nsx-policy backend supports multiple service entries per service. At this point this is not supported here. """ @property def entry_def(self): return core_defs.L4ServiceEntryDef def create_or_overwrite(self, name, service_id=None, description=IGNORE, protocol=constants.TCP, dest_ports=IGNORE, source_ports=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): service_id = self._init_obj_uuid(service_id) service_def = self._init_parent_def(service_id=service_id, name=name, description=description, tags=tags, tenant=tenant) entry_def = self._init_def(service_id=service_id, entry_id=self.SINGLE_ENTRY_ID, name=self.SINGLE_ENTRY_ID, protocol=protocol, dest_ports=dest_ports, source_ports=source_ports, tenant=tenant) service_def.mandatory_child_def = entry_def self._create_or_store(service_def, entry_def) return service_id def update(self, service_id, name=IGNORE, description=IGNORE, protocol=IGNORE, dest_ports=IGNORE, source_ports=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): parent_def = self._init_parent_def( service_id=service_id, name=name, description=description, tags=tags, tenant=tenant) entry_def = self._get_and_update_def( service_id=service_id, entry_id=self.SINGLE_ENTRY_ID, protocol=protocol, dest_ports=dest_ports, source_ports=source_ports, tenant=tenant) self.policy_api.create_with_parent(parent_def, entry_def) def build_entry(self, name, service_id, entry_id, description=None, protocol=None, dest_ports=None, source_ports=None, tags=None, tenant=constants.POLICY_INFRA_TENANT): return self._init_def(service_id=service_id, entry_id=entry_id, name=name, description=description, protocol=protocol, dest_ports=dest_ports, source_ports=source_ports, tags=tags, tenant=tenant) class NsxPolicyIcmpServiceApi(NsxPolicyServiceBase): """NSX Policy Service with a single ICMP service entry. Note the nsx-policy backend supports multiple service entries per service. At this point this is not supported here. """ @property def entry_def(self): return core_defs.IcmpServiceEntryDef def create_or_overwrite(self, name, service_id=None, description=IGNORE, version=4, icmp_type=IGNORE, icmp_code=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): service_id = self._init_obj_uuid(service_id) service_def = self._init_parent_def(service_id=service_id, name=name, description=description, tags=tags, tenant=tenant) entry_def = self._init_def( service_id=service_id, entry_id=self.SINGLE_ENTRY_ID, name=self.SINGLE_ENTRY_ID, version=version, icmp_type=icmp_type, icmp_code=icmp_code, tenant=tenant) service_def.mandatory_child_def = entry_def self._create_or_store(service_def, entry_def) return service_id def update(self, service_id, name=IGNORE, description=IGNORE, version=IGNORE, icmp_type=IGNORE, icmp_code=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): parent_def = self._init_parent_def( service_id=service_id, name=name, description=description, tags=tags, tenant=tenant) entry_def = self._get_and_update_def( service_id=service_id, entry_id=self.SINGLE_ENTRY_ID, version=version, icmp_type=icmp_type, icmp_code=icmp_code, tenant=tenant) return self.policy_api.create_with_parent(parent_def, entry_def) def build_entry(self, name, service_id, entry_id, description=None, version=4, icmp_type=None, icmp_code=None, tags=None, tenant=constants.POLICY_INFRA_TENANT): return self._init_def(service_id=service_id, entry_id=entry_id, name=name, description=description, version=version, icmp_type=icmp_type, icmp_code=icmp_code, tags=tags, tenant=tenant) class NsxPolicyIPProtocolServiceApi(NsxPolicyServiceBase): """NSX Policy Service with a single IPProtocol service entry. Note the nsx-policy backend supports multiple service entries per service. At this point this is not supported here. """ @property def entry_def(self): return core_defs.IPProtocolServiceEntryDef def create_or_overwrite(self, name, service_id=None, description=IGNORE, protocol_number=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): service_id = self._init_obj_uuid(service_id) service_def = self._init_parent_def(service_id=service_id, name=name, description=description, tags=tags, tenant=tenant) entry_def = self._init_def( service_id=service_id, entry_id=self.SINGLE_ENTRY_ID, name=self.SINGLE_ENTRY_ID, protocol_number=protocol_number, tenant=tenant) service_def.mandatory_child_def = entry_def self._create_or_store(service_def, entry_def) return service_id def update(self, service_id, name=IGNORE, description=IGNORE, protocol_number=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): parent_def = self._init_parent_def( service_id=service_id, name=name, description=description, tags=tags, tenant=tenant) entry_def = self._get_and_update_def( service_id=service_id, entry_id=self.SINGLE_ENTRY_ID, protocol_number=protocol_number, tenant=tenant) return self.policy_api.create_with_parent(parent_def, entry_def) def build_entry(self, name, service_id, entry_id, description=None, protocol_number=None, tags=None, tenant=constants.POLICY_INFRA_TENANT): return self._init_def(service_id=service_id, entry_id=entry_id, name=name, protocol_number=protocol_number, tags=tags, tenant=tenant) class NsxPolicyMixedServiceApi(NsxPolicyServiceBase): """NSX Policy Service with mixed service entries.""" def create_or_overwrite(self, name, service_id, description=IGNORE, entries=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): service_def = self._init_parent_def(service_id=service_id, name=name, description=description, entries=entries, tags=tags, tenant=tenant) if entries != IGNORE: self._create_or_store(service_def, entries) else: self._create_or_store(service_def) return service_id def update(self, service_id, name=IGNORE, description=IGNORE, entries=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): parent_def = self._init_parent_def( service_id=service_id, name=name, description=description, tags=tags, tenant=tenant) if entries != IGNORE: self.policy_api.create_with_parent(parent_def, entries) else: self.policy_api.create_or_update(parent_def) class NsxPolicyTier1Api(NsxPolicyResourceBase): """NSX Tier1 API """ LOCALE_SERVICE_SUFF = '-0' @property def entry_def(self): return core_defs.Tier1Def def build_route_advertisement(self, static_routes=False, subnets=False, nat=False, lb_vip=False, lb_snat=False, ipsec_endpoints=False): return core_defs.RouteAdvertisement(static_routes=static_routes, subnets=subnets, nat=nat, lb_vip=lb_vip, lb_snat=lb_snat, ipsec_endpoints=ipsec_endpoints) def create_or_overwrite(self, name, tier1_id=None, description=IGNORE, tier0=IGNORE, force_whitelisting=IGNORE, failover_mode=constants.NON_PREEMPTIVE, route_advertisement=IGNORE, dhcp_config=IGNORE, disable_firewall=IGNORE, ipv6_ndra_profile_id=IGNORE, pool_allocation=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): tier1_id = self._init_obj_uuid(tier1_id) tier1_def = self._init_def(tier1_id=tier1_id, name=name, description=description, tier0=tier0, force_whitelisting=force_whitelisting, tags=tags, failover_mode=failover_mode, route_advertisement=route_advertisement, dhcp_config=dhcp_config, disable_firewall=disable_firewall, ipv6_ndra_profile_id=ipv6_ndra_profile_id, pool_allocation=pool_allocation, tenant=tenant) self._create_or_store(tier1_def) return tier1_id def delete(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant) self.policy_api.delete(tier1_def) def get(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant) return self.policy_api.get(tier1_def, silent=silent) def get_path(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant) return tier1_def.get_resource_full_path() def list(self, tenant=constants.POLICY_INFRA_TENANT): tier1_def = self.entry_def(tenant=tenant) return self._list(tier1_def) def update(self, tier1_id, name=IGNORE, description=IGNORE, force_whitelisting=IGNORE, failover_mode=IGNORE, tier0=IGNORE, dhcp_config=IGNORE, tags=IGNORE, enable_standby_relocation=IGNORE, disable_firewall=IGNORE, ipv6_ndra_profile_id=IGNORE, route_advertisement=IGNORE, route_advertisement_rules=IGNORE, pool_allocation=IGNORE, tenant=constants.POLICY_INFRA_TENANT, current_body=None): self._update(tier1_id=tier1_id, name=name, description=description, force_whitelisting=force_whitelisting, failover_mode=failover_mode, dhcp_config=dhcp_config, tier0=tier0, enable_standby_relocation=enable_standby_relocation, disable_firewall=disable_firewall, ipv6_ndra_profile_id=ipv6_ndra_profile_id, route_advertisement=route_advertisement, route_advertisement_rules=route_advertisement_rules, pool_allocation=pool_allocation, tags=tags, tenant=tenant) def update_route_advertisement( self, tier1_id, static_routes=None, subnets=None, nat=None, lb_vip=None, lb_snat=None, ipsec_endpoints=None, tier0=IGNORE, tenant=constants.POLICY_INFRA_TENANT): tier1_dict = self.get(tier1_id, tenant) route_adv = self.entry_def.get_route_adv(tier1_dict) route_adv.update(static_routes=static_routes, subnets=subnets, nat=nat, lb_vip=lb_vip, lb_snat=lb_snat, ipsec_endpoints=ipsec_endpoints) self.update(tier1_id, route_advertisement=route_adv, tier0=tier0, tenant=tenant) def add_advertisement_rule( self, tier1_id, name, action=None, prefix_operator=None, route_advertisement_types=None, subnets=None, tenant=constants.POLICY_INFRA_TENANT): tier1_dict = self.get(tier1_id, tenant) adv_rules = tier1_dict.get('route_advertisement_rules', []) adv_rules = [r for r in adv_rules if r.get('name') != name] adv_rule = core_defs.RouteAdvertisementRule( name=name, action=action, prefix_operator=prefix_operator, route_advertisement_types=route_advertisement_types, subnets=subnets) adv_rules.append(adv_rule) self.update(tier1_id, route_advertisement_rules=adv_rules, tenant=tenant, current_body=tier1_dict) def remove_advertisement_rule(self, tier1_id, name, tenant=constants.POLICY_INFRA_TENANT): tier1_dict = self.get(tier1_id, tenant) adv_rules = tier1_dict.get('route_advertisement_rules', []) updated_adv_rules = [r for r in adv_rules if r.get('name') != name] if updated_adv_rules != adv_rules: self.update(tier1_id, route_advertisement_rules=updated_adv_rules, tenant=tenant, current_body=tier1_dict) def build_advertisement_rule(self, name, action=None, prefix_operator=None, route_advertisement_types=None, subnets=None): return core_defs.RouteAdvertisementRule( name=name, action=action, prefix_operator=prefix_operator, route_advertisement_types=route_advertisement_types, subnets=subnets) def update_advertisement_rules(self, tier1_id, rules, name_prefix=None, tenant=constants.POLICY_INFRA_TENANT): """Update the router advertisement rules If name_prefix is None, replace the entire list of NSX rules with the new given 'rules'. Else - delete the NSX rules with this name prefix, and add 'rules' to the rest. """ tier1_dict = self.get(tier1_id, tenant) current_rules = tier1_dict.get('route_advertisement_rules', []) if name_prefix: # delete rules with this prefix: new_rules = [] for rule in current_rules: if (not rule.get('name') or not rule['name'].startswith(name_prefix)): new_rules.append(rule) # add new rules new_rules.extend(rules) else: new_rules = rules self.update(tier1_id, route_advertisement_rules=new_rules, tenant=tenant, current_body=tier1_dict) @staticmethod def _locale_service_id(tier1_id): # Supporting only a single locale-service per router for now # with the same id as the router id with a constant suffix return tier1_id + NsxPolicyTier1Api.LOCALE_SERVICE_SUFF def create_locale_service(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): t1service_def = core_defs.Tier1LocaleServiceDef( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), tenant=tenant) self._create_or_store(t1service_def) def delete_locale_service(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): t1service_def = core_defs.Tier1LocaleServiceDef( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), tenant=tenant) self.policy_api.delete(t1service_def) def set_edge_cluster_path(self, tier1_id, edge_cluster_path, tenant=constants.POLICY_INFRA_TENANT): t1service_def = core_defs.Tier1LocaleServiceDef( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), edge_cluster_path=edge_cluster_path, tenant=tenant) self._create_or_store(t1service_def) def remove_edge_cluster(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): """Reset the path in the locale-service (deleting it is not allowed)""" t1service_def = core_defs.Tier1LocaleServiceDef( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), edge_cluster_path="", tenant=tenant) self.policy_api.create_or_update(t1service_def) def get_edge_cluster_path(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): t1service_def = core_defs.Tier1LocaleServiceDef( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), tenant=tenant) try: t1service = self.policy_api.get(t1service_def) return t1service.get('edge_cluster_path') except exceptions.ResourceNotFound: return def get_edge_cluster_path_by_searching( self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): """Get the edge_cluster path of a Tier1 router""" services = self.get_locale_tier1_services(tier1_id, tenant=tenant) for srv in services: if 'edge_cluster_path' in srv: return srv['edge_cluster_path'] def get_locale_tier1_services(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): t1service_def = core_defs.Tier1LocaleServiceDef( tier1_id=tier1_id, tenant=constants.POLICY_INFRA_TENANT) return self.policy_api.list(t1service_def)['results'] def add_segment_interface(self, tier1_id, interface_id, segment_id, subnets, ipv6_ndra_profile_id=IGNORE, tenant=constants.POLICY_INFRA_TENANT): args = {'tier1_id': tier1_id, 'service_id': self._locale_service_id(tier1_id), 'interface_id': interface_id, 'segment_id': segment_id, 'subnets': subnets, 'tenant': tenant} if ipv6_ndra_profile_id != IGNORE: args['ipv6_ndra_profile_id'] = ipv6_ndra_profile_id t1interface_def = core_defs.Tier1InterfaceDef(**args) self.policy_api.create_or_update(t1interface_def) def remove_segment_interface(self, tier1_id, interface_id, tenant=constants.POLICY_INFRA_TENANT): t1interface_def = core_defs.Tier1InterfaceDef( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), interface_id=interface_id, tenant=tenant) self.policy_api.delete(t1interface_def) def list_segment_interface(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): t1interface_def = core_defs.Tier1InterfaceDef( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), tenant=tenant) return self._list(t1interface_def) def get_realized_state(self, tier1_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant) return self._get_realized_state(tier1_def, entity_type=entity_type, realization_info=realization_info) def get_realized_id(self, tier1_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant) if self.nsx_api: # Use MP search api to find the LR ID as it is faster return self._get_realized_id_using_search( self.get_path(tier1_id, tenant=tenant), self.nsx_api.logical_router.resource_type, resource_def=tier1_def) return self._get_realized_id(tier1_def, entity_type=entity_type, realization_info=realization_info) def get_realization_info(self, tier1_id, entity_type=None, silent=False, tenant=constants.POLICY_INFRA_TENANT): tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant) return self._get_realization_info(tier1_def, silent=silent) def wait_until_realized(self, tier1_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant) return self._wait_until_realized(tier1_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) @check_allowed_passthrough def update_transport_zone(self, tier1_id, transport_zone_id, tenant=constants.POLICY_INFRA_TENANT): """Use the pass-through api to update the TZ zone on the NSX router""" realization_info = self.wait_until_realized( tier1_id, entity_type='RealizedLogicalRouter', tenant=tenant) nsx_router_uuid = self.get_realized_id( tier1_id, tenant=tenant, realization_info=realization_info) self.nsx_api.logical_router.update( nsx_router_uuid, transport_zone_id=transport_zone_id) @check_allowed_passthrough def _get_realized_downlink_port( self, tier1_id, segment_id, tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): """Return the realized ID of a tier1 downlink port of a segment If not found, wait until it has been realized """ if sleep is None: sleep = self.nsxlib_config.realization_wait_sec if max_attempts is None: max_attempts = self.nsxlib_config.realization_max_attempts tier1_def = self.entry_def(tier1_id=tier1_id, tenant=tenant) path = tier1_def.get_resource_full_path() test_num = 0 while test_num < max_attempts: # get all the realized resources of the tier1 entities = self.policy_api.get_realized_entities(path) for e in entities: # Look for router ports if (e['entity_type'] == 'RealizedLogicalRouterPort' and e['state'] == constants.STATE_REALIZED): # Get the NSX port to check if its the downlink port port = self.nsx_api.logical_router_port.get( e['realization_specific_identifier']) # compare the segment ID to the port display name as this # is the way policy sets it port_type = port.get('resource_type') if (port_type == nsx_constants.LROUTERPORT_DOWNLINK and segment_id in port.get('display_name', '')): return port['id'] eventlet.sleep(sleep) test_num += 1 raise exceptions.DetailedRealizationTimeoutError( resource_type='Tier1', resource_id=tier1_id, realized_type="downlink port", related_type="segment", related_id=segment_id, attempts=max_attempts, sleep=sleep) @check_allowed_passthrough def set_dhcp_relay(self, tier1_id, segment_id, relay_service_uuid, tenant=constants.POLICY_INFRA_TENANT): """Set relay service on the nsx logical router port Using passthrough api, as the policy api does not support this yet """ downlink_port_id = self._get_realized_downlink_port( tier1_id, segment_id, tenant=tenant) self.nsx_api.logical_router_port.update( downlink_port_id, relay_service_uuid=relay_service_uuid) def set_standby_relocation(self, tier1_id, enable_standby_relocation=True, tenant=constants.POLICY_INFRA_TENANT): """Set the flag for standby relocation on the Tier1 router """ return self.update(tier1_id, enable_standby_relocation=enable_standby_relocation, tenant=tenant) class NsxPolicyTier0Api(NsxPolicyResourceBase): """NSX Tier0 API """ @property def entry_def(self): return core_defs.Tier0Def def create_or_overwrite(self, name, tier0_id=None, description=IGNORE, ha_mode=constants.ACTIVE_ACTIVE, failover_mode=constants.NON_PREEMPTIVE, dhcp_config=IGNORE, force_whitelisting=IGNORE, default_rule_logging=IGNORE, transit_subnets=IGNORE, disable_firewall=IGNORE, ipv6_ndra_profile_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): tier0_id = self._init_obj_uuid(tier0_id) tier0_def = self._init_def(tier0_id=tier0_id, name=name, description=description, ha_mode=ha_mode, failover_mode=failover_mode, dhcp_config=dhcp_config, force_whitelisting=force_whitelisting, default_rule_logging=default_rule_logging, transit_subnets=transit_subnets, disable_firewall=disable_firewall, ipv6_ndra_profile_id=ipv6_ndra_profile_id, tags=tags, tenant=tenant) self.policy_api.create_or_update(tier0_def) return tier0_id def delete(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant) self.policy_api.delete(tier0_def) def get(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant) return self.policy_api.get(tier0_def, silent=silent) def get_path(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant) return tier0_def.get_resource_full_path() def list(self, tenant=constants.POLICY_INFRA_TENANT): tier0_def = self.entry_def(tenant=tenant) return self._list(tier0_def) def update(self, tier0_id, name=IGNORE, description=IGNORE, failover_mode=IGNORE, dhcp_config=IGNORE, force_whitelisting=IGNORE, default_rule_logging=IGNORE, transit_subnets=IGNORE, disable_firewall=IGNORE, ipv6_ndra_profile_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(tier0_id=tier0_id, name=name, description=description, failover_mode=failover_mode, dhcp_config=dhcp_config, force_whitelisting=force_whitelisting, default_rule_logging=default_rule_logging, transit_subnets=transit_subnets, disable_firewall=disable_firewall, ipv6_ndra_profile_id=ipv6_ndra_profile_id, tags=tags, tenant=tenant) def get_locale_services(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): t0service_def = core_defs.Tier0LocaleServiceDef( tier0_id=tier0_id, tenant=constants.POLICY_INFRA_TENANT) return self.policy_api.list(t0service_def)['results'] def get_edge_cluster_path(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): """Get the edge_cluster path of a Tier0 router""" services = self.get_locale_services(tier0_id, tenant=tenant) for srv in services: if 'edge_cluster_path' in srv: return srv['edge_cluster_path'] @check_allowed_passthrough def get_overlay_transport_zone( self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): """Use the pass-through api to get the TZ zone of the NSX tier0""" realization_info = self.wait_until_realized( tier0_id, entity_type='RealizedLogicalRouter', tenant=tenant) nsx_router_uuid = self.get_realized_id( tier0_id, tenant=tenant, realization_info=realization_info) return self.nsx_api.router.get_tier0_router_overlay_tz( nsx_router_uuid) def get_realized_state(self, tier0_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant) return self._get_realized_state(tier0_def, entity_type=entity_type, realization_info=realization_info) def get_realized_id(self, tier0_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant) return self._get_realized_id(tier0_def, entity_type=entity_type, realization_info=realization_info) def get_realization_info(self, tier0_id, entity_type=None, silent=False, tenant=constants.POLICY_INFRA_TENANT): tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant) return self._get_realization_info(tier0_def, entity_type=entity_type, silent=silent) def wait_until_realized(self, tier0_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): tier0_def = self.entry_def(tier0_id=tier0_id, tenant=tenant) return self._wait_until_realized(tier0_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) @check_allowed_passthrough def get_transport_zones(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): """Return a list of the transport zones IDs connected to the tier0 Currently this is supported only with the passthrough api """ realization_info = self.wait_until_realized( tier0_id, entity_type='RealizedLogicalRouter', tenant=tenant) nsx_router_uuid = self.get_realized_id( tier0_id, tenant=tenant, realization_info=realization_info) return self.nsx_api.router.get_tier0_router_tz( nsx_router_uuid) def _get_uplink_subnets(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): subnets = [] services = self.get_locale_services(tier0_id, tenant=tenant) for srv in services: # get the interfaces of this service t0interface_def = core_defs.Tier0InterfaceDef( tier0_id=tier0_id, service_id=srv['id'], tenant=constants.POLICY_INFRA_TENANT) interfaces = self.policy_api.list( t0interface_def).get('results', []) for interface in interfaces: if interface.get('type') == 'EXTERNAL': subnets.extend(interface.get('subnets', [])) return subnets def get_uplink_ips(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): """Return a link of all uplink ips of this tier0 router""" subnets = self._get_uplink_subnets(tier0_id, tenant=tenant) uplink_ips = [] for subnet in subnets: uplink_ips.extend(subnet.get('ip_addresses', [])) return uplink_ips def get_uplink_cidrs(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): """Return a link of all uplink cidrs of this tier0 router""" subnets = self._get_uplink_subnets(tier0_id, tenant=tenant) cidrs = [] for subnet in subnets: for ip_address in subnet.get('ip_addresses'): cidrs.append('%s/%s' % (ip_address, subnet.get('prefix_len'))) return cidrs def get_bgp_config(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): services = self.get_locale_services(tier0_id, tenant=tenant) for srv in services: bgpconfig_def = core_defs.BgpRoutingConfigDef( tier0_id=tier0_id, service_id=srv['id'], tenant=constants.POLICY_INFRA_TENANT) try: return self.policy_api.get(bgpconfig_def) except exceptions.ResourceNotFound: continue def build_route_redistribution_rule(self, name=None, types=None, route_map_path=None): return core_defs.Tier0RouteRedistributionRule( name, types, route_map_path) def build_route_redistribution_config(self, enabled=None, rules=None): return core_defs.Tier0RouteRedistributionConfig(enabled, rules) def get_route_redistribution_config(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): services = self.get_locale_services(tier0_id, tenant=tenant) for srv in services: if srv.get('route_redistribution_config'): return srv['route_redistribution_config'] def update_route_redistribution_config( self, tier0_id, redistribution_config, service_id=None, tenant=constants.POLICY_INFRA_TENANT): if not service_id: # Update on the first locale service services = self.get_locale_services(tier0_id, tenant=tenant) if len(services) > 0: service_id = services[0]['id'] if not service_id: err_msg = (_("Cannot update route redistribution config without " "locale service on Tier0 router")) raise exceptions.ManagerError(details=err_msg) service_def = core_defs.Tier0LocaleServiceDef( nsx_version=self.version, tier0_id=tier0_id, service_id=service_id, route_redistribution_config=redistribution_config, tenant=tenant) self.policy_api.create_or_update(service_def) class NsxPolicyTier0NatRuleApi(NsxPolicyResourceBase): DEFAULT_NAT_ID = 'USER' @property def entry_def(self): return core_defs.Tier0NatRule def create_or_overwrite(self, name, tier0_id, nat_id=DEFAULT_NAT_ID, nat_rule_id=None, description=IGNORE, source_network=IGNORE, destination_network=IGNORE, translated_network=IGNORE, firewall_match=IGNORE, action=IGNORE, sequence_number=IGNORE, log=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT, enabled=IGNORE): nat_rule_id = self._init_obj_uuid(nat_rule_id) nat_rule_def = self._init_def(tier0_id=tier0_id, nat_id=nat_id, nat_rule_id=nat_rule_id, name=name, description=description, source_network=source_network, destination_network=destination_network, translated_network=translated_network, firewall_match=firewall_match, action=action, sequence_number=sequence_number, log=log, tags=tags, tenant=tenant, enabled=enabled) self._create_or_store(nat_rule_def) return nat_rule_id def delete(self, tier0_id, nat_rule_id, nat_id=DEFAULT_NAT_ID, tenant=constants.POLICY_INFRA_TENANT): nat_rule_def = self.entry_def(tier0_id=tier0_id, nat_id=nat_id, nat_rule_id=nat_rule_id, tenant=tenant) self.policy_api.delete(nat_rule_def) def get(self, tier0_id, nat_rule_id, nat_id=DEFAULT_NAT_ID, tenant=constants.POLICY_INFRA_TENANT): nat_rule_def = self.entry_def(tier0_id=tier0_id, nat_id=nat_id, nat_rule_id=nat_rule_id, tenant=tenant) return self.policy_api.get(nat_rule_def) def list(self, tier0_id, nat_id=DEFAULT_NAT_ID, tenant=constants.POLICY_INFRA_TENANT): nat_rule_def = self.entry_def(tier0_id=tier0_id, nat_id=nat_id, tenant=tenant) return self._list(nat_rule_def) def update(self, tier0_id, nat_rule_id, nat_id=DEFAULT_NAT_ID, name=IGNORE, description=IGNORE, source_network=IGNORE, destination_network=IGNORE, translated_network=IGNORE, firewall_match=IGNORE, action=IGNORE, sequence_number=IGNORE, log=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT, enabled=IGNORE): self._update(tier0_id=tier0_id, nat_id=nat_id, nat_rule_id=nat_rule_id, name=name, description=description, source_network=source_network, destination_network=destination_network, translated_network=translated_network, firewall_match=firewall_match, action=action, sequence_number=sequence_number, log=log, tags=tags, tenant=tenant, enabled=enabled) class NsxPolicyTier1NatRuleApi(NsxPolicyResourceBase): DEFAULT_NAT_ID = 'USER' @property def entry_def(self): return core_defs.Tier1NatRule def create_or_overwrite(self, name, tier1_id, nat_id=DEFAULT_NAT_ID, nat_rule_id=None, description=IGNORE, source_network=IGNORE, destination_network=IGNORE, translated_network=IGNORE, firewall_match=IGNORE, action=IGNORE, sequence_number=IGNORE, log=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT, enabled=IGNORE): nat_rule_id = self._init_obj_uuid(nat_rule_id) nat_rule_def = self._init_def(tier1_id=tier1_id, nat_id=nat_id, nat_rule_id=nat_rule_id, name=name, description=description, source_network=source_network, destination_network=destination_network, translated_network=translated_network, firewall_match=firewall_match, action=action, sequence_number=sequence_number, log=log, tags=tags, tenant=tenant, enabled=enabled) self._create_or_store(nat_rule_def) return nat_rule_id def delete(self, tier1_id, nat_rule_id, nat_id=DEFAULT_NAT_ID, tenant=constants.POLICY_INFRA_TENANT): nat_rule_def = self.entry_def(tier1_id=tier1_id, nat_id=nat_id, nat_rule_id=nat_rule_id, tenant=tenant) self._delete_or_store(nat_rule_def) def get(self, tier1_id, nat_rule_id, nat_id=DEFAULT_NAT_ID, tenant=constants.POLICY_INFRA_TENANT): nat_rule_def = self.entry_def(tier1_id=tier1_id, nat_id=nat_id, nat_rule_id=nat_rule_id, tenant=tenant) return self.policy_api.get(nat_rule_def) def list(self, tier1_id, nat_id=DEFAULT_NAT_ID, tenant=constants.POLICY_INFRA_TENANT): nat_rule_def = self.entry_def(tier1_id=tier1_id, nat_id=nat_id, tenant=tenant) return self._list(nat_rule_def) def update(self, tier1_id, nat_rule_id, nat_id=DEFAULT_NAT_ID, name=IGNORE, description=IGNORE, source_network=IGNORE, destination_network=IGNORE, translated_network=IGNORE, firewall_match=IGNORE, action=IGNORE, sequence_number=IGNORE, log=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT, enabled=IGNORE): self._update(tier1_id=tier1_id, nat_id=nat_id, nat_rule_id=nat_rule_id, name=name, description=description, source_network=source_network, destination_network=destination_network, translated_network=translated_network, firewall_match=firewall_match, action=action, sequence_number=sequence_number, log=log, tags=tags, tenant=tenant, enabled=enabled) class NsxPolicyTier1StaticRouteApi(NsxPolicyResourceBase): @property def entry_def(self): return core_defs.Tier1StaticRoute def create_or_overwrite(self, name, tier1_id, static_route_id=None, description=IGNORE, network=IGNORE, next_hop=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): static_route_id = self._init_obj_uuid(static_route_id) static_route_def = self._init_def(tier1_id=tier1_id, static_route_id=static_route_id, name=name, description=description, network=network, next_hop=next_hop, tags=tags, tenant=tenant) self._create_or_store(static_route_def) return static_route_id def delete(self, tier1_id, static_route_id, tenant=constants.POLICY_INFRA_TENANT): static_route_def = self.entry_def(tier1_id=tier1_id, static_route_id=static_route_id, tenant=tenant) self.policy_api.delete(static_route_def) def get(self, tier1_id, static_route_id, tenant=constants.POLICY_INFRA_TENANT): static_route_def = self.entry_def(tier1_id=tier1_id, static_route_id=static_route_id, tenant=tenant) return self.policy_api.get(static_route_def) def list(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): static_route_def = self.entry_def(tier1_id=tier1_id, tenant=tenant) return self._list(static_route_def) def update(self, tier1_id, static_route_id, name=IGNORE, description=IGNORE, network=IGNORE, next_hop=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(tier1_id=tier1_id, static_route_id=static_route_id, name=name, description=description, network=network, next_hop=next_hop, tags=tags, tenant=tenant) class NsxPolicyTier1SegmentApi(NsxPolicyResourceBase): """NSX Tier1 Segment API """ @property def entry_def(self): return core_defs.Tier1SegmentDef def build_subnet(self, gateway_address, dhcp_ranges=None): return core_defs.Subnet(gateway_address, dhcp_ranges) def create_or_overwrite(self, name, tier1_id, segment_id=None, description=IGNORE, subnets=IGNORE, dhcp_config=IGNORE, dns_domain_name=IGNORE, vlan_ids=IGNORE, default_rule_logging=IGNORE, ip_pool_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): segment_id = self._init_obj_uuid(segment_id) segment_def = self._init_def(tier1_id=tier1_id, segment_id=segment_id, name=name, description=description, subnets=subnets, dhcp_config=dhcp_config, dns_domain_name=dns_domain_name, vlan_ids=vlan_ids, default_rule_logging=default_rule_logging, ip_pool_id=ip_pool_id, tags=tags, tenant=tenant) self._create_or_store(segment_def) return segment_id def delete(self, tier1_id, segment_id, tenant=constants.POLICY_INFRA_TENANT): segment_def = self.entry_def(tier1_id=tier1_id, segment_id=segment_id, tenant=tenant) self.policy_api.delete(segment_def) def get(self, tier1_id, segment_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): segment_def = self.entry_def(tier1_id=tier1_id, segment_id=segment_id, tenant=tenant) return self.policy_api.get(segment_def, silent=silent) def list(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): segment_def = self.entry_def(tier1_id=tier1_id, tenant=tenant) return self._list(segment_def) def update(self, tier1_id, segment_id, name=IGNORE, description=IGNORE, subnets=IGNORE, dhcp_config=IGNORE, dns_domain_name=IGNORE, vlan_ids=IGNORE, default_rule_logging=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(tier1_id=tier1_id, segment_id=segment_id, name=name, description=description, subnets=subnets, dhcp_config=dhcp_config, dns_domain_name=dns_domain_name, vlan_ids=vlan_ids, default_rule_logging=default_rule_logging, tags=tags, tenant=tenant) class NsxPolicySegmentApi(NsxPolicyResourceBase): """NSX Infra Segment API """ @property def entry_def(self): return core_defs.SegmentDef def build_subnet(self, gateway_address, dhcp_ranges=None): return core_defs.Subnet(gateway_address, dhcp_ranges) def create_or_overwrite(self, name, segment_id=None, tier1_id=IGNORE, tier0_id=IGNORE, description=IGNORE, subnets=IGNORE, dns_domain_name=IGNORE, vlan_ids=IGNORE, transport_zone_id=IGNORE, ip_pool_id=IGNORE, metadata_proxy_id=IGNORE, dhcp_server_config_id=IGNORE, admin_state=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): if tier0_id != IGNORE and tier1_id != IGNORE: err_msg = (_("Cannot connect Segment to a Tier-0 and Tier-1 " "Gateway simultaneously")) raise exceptions.InvalidInput(details=err_msg) segment_id = self._init_obj_uuid(segment_id) segment_def = self._init_def( segment_id=segment_id, name=name, description=description, tier1_id=tier1_id, tier0_id=tier0_id, subnets=subnets, dns_domain_name=dns_domain_name, vlan_ids=vlan_ids, transport_zone_id=transport_zone_id, ip_pool_id=ip_pool_id, metadata_proxy_id=metadata_proxy_id, dhcp_server_config_id=dhcp_server_config_id, admin_state=admin_state, tags=tags, tenant=tenant) self._create_or_store(segment_def) return segment_id def delete(self, segment_id, tenant=constants.POLICY_INFRA_TENANT): segment_def = self.entry_def(segment_id=segment_id, tenant=tenant) @utils.retry_upon_exception( exceptions.NsxSegemntWithVM, delay=self.nsxlib_config.realization_wait_sec, max_attempts=self.nsxlib_config.realization_max_attempts) def do_delete(): self.policy_api.delete(segment_def) do_delete() def get(self, segment_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): segment_def = self.entry_def(segment_id=segment_id, tenant=tenant) return self.policy_api.get(segment_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): segment_def = self.entry_def(tenant=tenant) return self._list(segment_def) def update(self, segment_id, name=IGNORE, description=IGNORE, tier1_id=IGNORE, tier0_id=IGNORE, subnets=IGNORE, dns_domain_name=IGNORE, vlan_ids=IGNORE, metadata_proxy_id=IGNORE, dhcp_server_config_id=IGNORE, admin_state=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(segment_id=segment_id, name=name, description=description, tier1_id=tier1_id, tier0_id=tier0_id, subnets=subnets, dns_domain_name=dns_domain_name, vlan_ids=vlan_ids, metadata_proxy_id=metadata_proxy_id, dhcp_server_config_id=dhcp_server_config_id, admin_state=admin_state, tags=tags, tenant=tenant) def remove_connectivity_and_subnets( self, segment_id, tenant=constants.POLICY_INFRA_TENANT): """Disconnect a segment from a router and remove its subnets. PATCH does not support this action so PUT is used for this """ # Get the current segment and update it segment = self.get(segment_id) segment['subnets'] = None segment['connectivity_path'] = None segment_def = self.entry_def(segment_id=segment_id, tenant=tenant) path = segment_def.get_resource_path() self.policy_api.client.update(path, segment) def get_realized_state(self, segment_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): segment_def = self.entry_def(segment_id=segment_id, tenant=tenant) return self._get_realized_state(segment_def, entity_type=entity_type, realization_info=realization_info) def get_realized_id(self, segment_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): segment_def = self.entry_def(segment_id=segment_id, tenant=tenant) return self._get_realized_id(segment_def, entity_type=entity_type, realization_info=realization_info) def get_path(self, segment_id, tenant=constants.POLICY_INFRA_TENANT): segment_def = self.entry_def(segment_id=segment_id, tenant=tenant) return segment_def.get_resource_full_path() def get_realized_logical_switch_id(self, segment_id, tenant=constants.POLICY_INFRA_TENANT): segment_def = self.entry_def(segment_id=segment_id, tenant=tenant) if self.nsx_api: # Use MP search api to find the LS ID as it is faster return self._get_realized_id_using_search( self.get_path(segment_id, tenant=tenant), self.nsx_api.logical_switch.resource_type, resource_def=segment_def) realization_info = self._wait_until_realized( segment_def, entity_type='RealizedLogicalSwitch') return self._get_realized_id(segment_def, realization_info=realization_info) def get_realization_info(self, segment_id, entity_type=None, silent=False, tenant=constants.POLICY_INFRA_TENANT): segment_def = self.entry_def(segment_id=segment_id, tenant=tenant) return self._get_realization_info(segment_def, entity_type=entity_type, silent=silent) def wait_until_realized(self, segment_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): segment_def = self.entry_def(segment_id=segment_id, tenant=tenant) return self._wait_until_realized(segment_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) @check_allowed_passthrough def set_admin_state(self, segment_id, admin_state, tenant=constants.POLICY_INFRA_TENANT): """Set the segment admin state using the passthrough/policy api""" if (version.LooseVersion(self.version) >= version.LooseVersion(nsx_constants.NSX_VERSION_3_0_0)): return self.update(segment_id, admin_state=admin_state, tenant=tenant) realization_info = self.wait_until_realized( segment_id, entity_type='RealizedLogicalSwitch', tenant=tenant) nsx_ls_uuid = self.get_realized_id( segment_id, tenant=tenant, realization_info=realization_info) self.nsx_api.logical_switch.update( nsx_ls_uuid, admin_state=admin_state) def get_transport_zone_id(self, segment_id, tenant=constants.POLICY_INFRA_TENANT): segment = self.get(segment_id, tenant=tenant) tz_path = segment.get('transport_zone_path') if tz_path: return p_utils.path_to_id(tz_path) class NsxPolicySegmentPortApi(NsxPolicyResourceBase): """NSX Segment Port API """ @property def entry_def(self): return core_defs.SegmentPortDef def build_address_binding(self, ip_address, mac_address, vlan_id=None): return core_defs.PortAddressBinding(ip_address, mac_address, vlan_id) def create_or_overwrite(self, name, segment_id, port_id=None, description=IGNORE, address_bindings=IGNORE, attachment_type=IGNORE, vif_id=IGNORE, app_id=IGNORE, context_id=IGNORE, traffic_tag=IGNORE, allocate_addresses=IGNORE, hyperbus_mode=IGNORE, admin_state=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): port_id = self._init_obj_uuid(port_id) port_def = self._init_def(segment_id=segment_id, port_id=port_id, name=name, description=description, address_bindings=address_bindings, attachment_type=attachment_type, vif_id=vif_id, app_id=app_id, context_id=context_id, traffic_tag=traffic_tag, allocate_addresses=allocate_addresses, hyperbus_mode=hyperbus_mode, admin_state=admin_state, tags=tags, tenant=tenant) self._create_or_store(port_def) return port_id def delete(self, segment_id, port_id, tenant=constants.POLICY_INFRA_TENANT): port_def = self.entry_def(segment_id=segment_id, port_id=port_id, tenant=tenant) self.policy_api.delete(port_def) def get(self, segment_id, port_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): port_def = self.entry_def(segment_id=segment_id, port_id=port_id, tenant=tenant) return self.policy_api.get(port_def, silent=silent) def list(self, segment_id, tenant=constants.POLICY_INFRA_TENANT): port_def = self.entry_def(segment_id=segment_id, tenant=tenant) return self._list(port_def) def update(self, segment_id, port_id, name=IGNORE, description=IGNORE, address_bindings=IGNORE, hyperbus_mode=IGNORE, admin_state=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(segment_id=segment_id, port_id=port_id, name=name, description=description, address_bindings=address_bindings, hyperbus_mode=hyperbus_mode, admin_state=admin_state, tags=tags, tenant=tenant) def detach(self, segment_id, port_id, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): port_def = self.entry_def(segment_id=segment_id, port_id=port_id, vif_id=None, attachment_type=None, tags=tags, tenant=tenant) self.policy_api.create_or_update(port_def) def attach(self, segment_id, port_id, attachment_type, vif_id, allocate_addresses=None, app_id=None, context_id=None, traffic_tag=None, hyperbus_mode=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): port_def = self._init_def(segment_id=segment_id, port_id=port_id, attachment_type=attachment_type, allocate_addresses=allocate_addresses, vif_id=vif_id, app_id=app_id, context_id=context_id, traffic_tag=traffic_tag, hyperbus_mode=hyperbus_mode, tags=tags, tenant=tenant) self.policy_api.create_or_update(port_def) def get_realized_state(self, segment_id, port_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): port_def = self.entry_def(segment_id=segment_id, port_id=port_id, tenant=tenant) return self._get_realized_state(port_def, entity_type=entity_type, realization_info=realization_info) def get_realized_id(self, segment_id, port_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): port_def = self.entry_def(segment_id=segment_id, port_id=port_id, tenant=tenant) return self._get_realized_id(port_def, entity_type=entity_type, realization_info=realization_info) def get_realization_info(self, segment_id, port_id, entity_type=None, silent=False, tenant=constants.POLICY_INFRA_TENANT): port_def = self.entry_def(segment_id=segment_id, port_id=port_id, tenant=tenant) return self._get_realization_info(port_def, entity_type=entity_type, silent=silent) def wait_until_realized(self, segment_id, port_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): port_def = self.entry_def(segment_id=segment_id, port_id=port_id, tenant=tenant) return self._wait_until_realized(port_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) @check_allowed_passthrough def set_admin_state(self, segment_id, port_id, admin_state, tenant=constants.POLICY_INFRA_TENANT): """Set the segment port admin state using the passthrough/policy api""" if (version.LooseVersion(self.version) >= version.LooseVersion(nsx_constants.NSX_VERSION_3_0_0)): return self.update(segment_id, port_id, admin_state=admin_state, tenant=tenant) realization_info = self.wait_until_realized( segment_id, port_id, entity_type='RealizedLogicalPort', tenant=tenant) nsx_lp_uuid = self.get_realized_id( segment_id, port_id, tenant=tenant, realization_info=realization_info) self.nsx_api.logical_port.update( nsx_lp_uuid, False, admin_state=admin_state) class SegmentProfilesBindingMapBaseApi(NsxPolicyResourceBase): def delete(self, segment_id, map_id=DEFAULT_MAP_ID, tenant=constants.POLICY_INFRA_TENANT): map_def = self.entry_def(segment_id=segment_id, map_id=map_id, tenant=tenant) self.policy_api.delete(map_def) def get(self, segment_id, map_id=DEFAULT_MAP_ID, tenant=constants.POLICY_INFRA_TENANT): map_def = self.entry_def(segment_id=segment_id, map_id=map_id, tenant=tenant) return self.policy_api.get(map_def) def list(self, segment_id, tenant=constants.POLICY_INFRA_TENANT): map_def = self.entry_def(segment_id=segment_id, tenant=tenant) return self._list(map_def) class SegmentSecurityProfilesBindingMapApi(SegmentProfilesBindingMapBaseApi): @property def entry_def(self): return core_defs.SegmentSecProfilesBindingMapDef def create_or_overwrite(self, name, segment_id, map_id=DEFAULT_MAP_ID, description=IGNORE, segment_security_profile_id=IGNORE, spoofguard_profile_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): map_id = self._init_obj_uuid(map_id) map_def = self._init_def( segment_id=segment_id, map_id=map_id, name=name, description=description, segment_security_profile_id=segment_security_profile_id, spoofguard_profile_id=spoofguard_profile_id, tags=tags, tenant=tenant) self._create_or_store(map_def) return map_id def update(self, segment_id, map_id=DEFAULT_MAP_ID, name=IGNORE, description=IGNORE, segment_security_profile_id=IGNORE, spoofguard_profile_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( segment_id=segment_id, map_id=map_id, name=name, description=description, segment_security_profile_id=segment_security_profile_id, spoofguard_profile_id=spoofguard_profile_id, tags=tags, tenant=tenant) class SegmentPortProfilesBindingMapBaseApi(NsxPolicyResourceBase): def delete(self, segment_id, port_id, map_id=DEFAULT_MAP_ID, tenant=constants.POLICY_INFRA_TENANT): map_def = self.entry_def(segment_id=segment_id, port_id=port_id, map_id=map_id, tenant=tenant) self.policy_api.delete(map_def) def get(self, segment_id, port_id, map_id=DEFAULT_MAP_ID, tenant=constants.POLICY_INFRA_TENANT): map_def = self.entry_def(segment_id=segment_id, port_id=port_id, map_id=map_id, tenant=tenant) return self.policy_api.get(map_def) def list(self, segment_id, port_id, tenant=constants.POLICY_INFRA_TENANT): map_def = self.entry_def(segment_id=segment_id, port_id=port_id, tenant=tenant) return self._list(map_def) class SegmentPortSecurityProfilesBindingMapApi( SegmentPortProfilesBindingMapBaseApi): @property def entry_def(self): return core_defs.SegmentPortSecProfilesBindingMapDef def create_or_overwrite(self, name, segment_id, port_id, map_id=DEFAULT_MAP_ID, description=IGNORE, segment_security_profile_id=IGNORE, spoofguard_profile_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): map_id = self._init_obj_uuid(map_id) map_def = self._init_def( segment_id=segment_id, port_id=port_id, map_id=map_id, name=name, description=description, segment_security_profile_id=segment_security_profile_id, spoofguard_profile_id=spoofguard_profile_id, tags=tags, tenant=tenant) self._create_or_store(map_def) return map_id def update(self, segment_id, port_id, map_id=DEFAULT_MAP_ID, name=IGNORE, description=IGNORE, segment_security_profile_id=IGNORE, spoofguard_profile_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( segment_id=segment_id, port_id=port_id, map_id=map_id, name=name, description=description, segment_security_profile_id=segment_security_profile_id, spoofguard_profile_id=spoofguard_profile_id, tags=tags, tenant=tenant) class SegmentPortDiscoveryProfilesBindingMapApi( SegmentPortProfilesBindingMapBaseApi): @property def entry_def(self): return core_defs.SegmentPortDiscoveryProfilesBindingMapDef def create_or_overwrite(self, name, segment_id, port_id, map_id=DEFAULT_MAP_ID, description=IGNORE, mac_discovery_profile_id=IGNORE, ip_discovery_profile_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): map_id = self._init_obj_uuid(map_id) map_def = self._init_def( segment_id=segment_id, port_id=port_id, map_id=map_id, name=name, description=description, mac_discovery_profile_id=mac_discovery_profile_id, ip_discovery_profile_id=ip_discovery_profile_id, tags=tags, tenant=tenant) self._create_or_store(map_def) return map_id def update(self, segment_id, port_id, map_id=DEFAULT_MAP_ID, name=IGNORE, description=IGNORE, mac_discovery_profile_id=IGNORE, ip_discovery_profile_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( segment_id=segment_id, port_id=port_id, map_id=map_id, name=name, description=description, mac_discovery_profile_id=mac_discovery_profile_id, ip_discovery_profile_id=ip_discovery_profile_id, tags=tags, tenant=tenant) class SegmentPortQosProfilesBindingMapApi( SegmentPortProfilesBindingMapBaseApi): @property def entry_def(self): return core_defs.SegmentPortQoSProfilesBindingMapDef def create_or_overwrite(self, name, segment_id, port_id, map_id=DEFAULT_MAP_ID, description=IGNORE, qos_profile_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): map_id = self._init_obj_uuid(map_id) map_def = self._init_def( segment_id=segment_id, port_id=port_id, map_id=map_id, name=name, description=description, qos_profile_id=qos_profile_id, tags=tags, tenant=tenant) self._create_or_store(map_def) return map_id def update(self, segment_id, port_id, map_id=DEFAULT_MAP_ID, name=IGNORE, description=IGNORE, qos_profile_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( segment_id=segment_id, port_id=port_id, map_id=map_id, name=name, description=description, qos_profile_id=qos_profile_id, tags=tags, tenant=tenant) class NsxPolicyTier1SegmentPortApi(NsxPolicyResourceBase): """NSX Tier1 Segment Port API """ @property def entry_def(self): return core_defs.Tier1SegmentPortDef def build_address_binding(self, ip_address, mac_address, vlan_id=None): return core_defs.PortAddressBinding(ip_address, mac_address, vlan_id) def create_or_overwrite(self, name, tier1_id, segment_id, port_id=None, description=IGNORE, address_bindings=IGNORE, attachment_type=IGNORE, vif_id=IGNORE, app_id=IGNORE, context_id=IGNORE, traffic_tag=IGNORE, allocate_addresses=IGNORE, hyperbus_mode=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): port_id = self._init_obj_uuid(port_id) port_def = self._init_def(segment_id=segment_id, tier1_id=tier1_id, port_id=port_id, name=name, description=description, address_bindings=address_bindings, attachment_type=attachment_type, vif_id=vif_id, app_id=app_id, context_id=context_id, traffic_tag=traffic_tag, allocate_addresses=allocate_addresses, hyperbus_mode=hyperbus_mode, tags=tags, tenant=tenant) self._create_or_store(port_def) return port_id def delete(self, tier1_id, segment_id, port_id, tenant=constants.POLICY_INFRA_TENANT): port_def = self.entry_def(segment_id=segment_id, tier1_id=tier1_id, port_id=port_id, tenant=tenant) self.policy_api.delete(port_def) def get(self, tier1_id, segment_id, port_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): port_def = self.entry_def(segment_id=segment_id, tier1_id=tier1_id, port_id=port_id, tenant=tenant) return self.policy_api.get(port_def, silent=silent) def list(self, tier1_id, segment_id, tenant=constants.POLICY_INFRA_TENANT): port_def = self.entry_def(segment_id=segment_id, tier1_id=tier1_id, tenant=tenant) return self._list(port_def) def update(self, tier1_id, segment_id, port_id, name=IGNORE, description=IGNORE, address_bindings=IGNORE, hyperbus_mode=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(segment_id=segment_id, tier1_id=tier1_id, port_id=port_id, name=name, description=description, address_bindings=address_bindings, hyperbus_mode=hyperbus_mode, tags=tags, tenant=tenant) def detach(self, tier1_id, segment_id, port_id, tenant=constants.POLICY_INFRA_TENANT): port_def = self.entry_def(segment_id=segment_id, tier1_id=tier1_id, port_id=port_id, attachment_type=None, tenant=tenant) self.policy_api.create_or_update(port_def) def attach(self, tier1_id, segment_id, port_id, attachment_type, vif_id, allocate_addresses, app_id=None, context_id=None, hyperbus_mode=IGNORE, tenant=constants.POLICY_INFRA_TENANT): port_def = self.entry_def(segment_id=segment_id, tier1_id=tier1_id, port_id=port_id, attachment_type=attachment_type, allocate_addresses=allocate_addresses, vif_id=vif_id, app_id=app_id, context_id=context_id, hyperbus_mode=hyperbus_mode, tenant=tenant) self.policy_api.create_or_update(port_def) def get_realized_state(self, tier1_id, segment_id, port_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): port_def = self.entry_def(segment_id=segment_id, tier1_id=tier1_id, port_id=port_id, tenant=tenant) return self._get_realized_state(port_def, entity_type=entity_type, realization_info=realization_info) def get_realized_id(self, tier1_id, segment_id, port_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): port_def = self.entry_def(segment_id=segment_id, tier1_id=tier1_id, port_id=port_id, tenant=tenant) return self._get_realized_id(port_def, entity_type=entity_type, realization_info=realization_info) def get_realization_info(self, tier1_id, segment_id, port_id, entity_type=None, silent=False, tenant=constants.POLICY_INFRA_TENANT): port_def = self.entry_def(segment_id=segment_id, tier1_id=tier1_id, port_id=port_id, tenant=tenant) return self._get_realization_info(port_def, entity_type=entity_type, silent=silent) def wait_until_realized(self, tier1_id, segment_id, port_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): port_def = self.entry_def(segment_id=segment_id, port_id=port_id, tier1_id=tier1_id, tenant=tenant) return self._wait_until_realized(port_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) # This resource is both for DhcpV4StaticBindingConfig and # DhcpV6StaticBindingConfig class SegmentDhcpStaticBindingConfigApi(NsxPolicyResourceBase): @property def entry_def(self): return core_defs.DhcpV4StaticBindingConfig def create_or_overwrite(self, name, segment_id, binding_id=None, **kwargs): err_msg = (_("This action is not supported. Please call " "create_or_overwrite_v4 or create_or_overwrite_v6")) raise exceptions.ManagerError(details=err_msg) def create_or_overwrite_v4(self, name, segment_id, binding_id=None, description=IGNORE, gateway_address=IGNORE, host_name=IGNORE, ip_address=IGNORE, lease_time=IGNORE, mac_address=IGNORE, options=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): binding_id = self._init_obj_uuid(binding_id) binding_def = self._init_def(segment_id=segment_id, binding_id=binding_id, name=name, description=description, gateway_address=gateway_address, host_name=host_name, ip_address=ip_address, lease_time=lease_time, mac_address=mac_address, options=options, tags=tags, tenant=tenant) self._create_or_store(binding_def) return binding_id def create_or_overwrite_v6(self, name, segment_id, binding_id=None, description=IGNORE, domain_names=IGNORE, dns_nameservers=IGNORE, ip_addresses=IGNORE, sntp_servers=IGNORE, preferred_time=IGNORE, lease_time=IGNORE, mac_address=IGNORE, options=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): binding_id = self._init_obj_uuid(binding_id) args = self._get_user_args(segment_id=segment_id, binding_id=binding_id, name=name, description=description, domain_names=domain_names, dns_nameservers=dns_nameservers, ip_addresses=ip_addresses, sntp_servers=sntp_servers, preferred_time=preferred_time, lease_time=lease_time, mac_address=mac_address, options=options, tags=tags, tenant=tenant) binding_def = core_defs.DhcpV6StaticBindingConfig(**args) self._create_or_store(binding_def) return binding_id def delete(self, segment_id, binding_id, tenant=constants.POLICY_INFRA_TENANT): binding_def = self.entry_def(segment_id=segment_id, binding_id=binding_id, tenant=tenant) self.policy_api.delete(binding_def) def get(self, segment_id, binding_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): binding_def = self.entry_def(segment_id=segment_id, binding_id=binding_id, tenant=tenant) return self.policy_api.get(binding_def, silent=silent) def list(self, segment_id, tenant=constants.POLICY_INFRA_TENANT): binding_def = self.entry_def(segment_id=segment_id, tenant=tenant) return self._list(binding_def) def update(self, segment_id, binding_id, **kwargs): err_msg = (_("This action is currently not supported")) raise exceptions.ManagerError(details=err_msg) class NsxPolicyIpBlockApi(NsxPolicyResourceBase): """NSX Policy IP Block API""" @property def entry_def(self): return core_defs.IpBlockDef def create_or_overwrite(self, name, ip_block_id=None, description=IGNORE, cidr=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): ip_block_id = self._init_obj_uuid(ip_block_id) ip_block_def = self._init_def(ip_block_id=ip_block_id, name=name, description=description, cidr=cidr, tags=tags, tenant=tenant) self._create_or_store(ip_block_def) return ip_block_id def delete(self, ip_block_id, tenant=constants.POLICY_INFRA_TENANT): ip_block_def = self.entry_def(ip_block_id=ip_block_id, tenant=tenant) self.policy_api.delete(ip_block_def) def get(self, ip_block_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): ip_block_def = self.entry_def(ip_block_id=ip_block_id, tenant=tenant) return self.policy_api.get(ip_block_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): ip_block_def = self.entry_def(tenant=tenant) return self._list(ip_block_def) def update(self, ip_block_id, name=IGNORE, description=IGNORE, cidr=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(ip_block_id=ip_block_id, name=name, description=description, cidr=cidr, tags=tags, tenant=tenant) class NsxPolicyIpPoolApi(NsxPolicyResourceBase): """NSX Policy IP Pool API""" @property def entry_def(self): return core_defs.IpPoolDef def create_or_overwrite(self, name, ip_pool_id=None, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): ip_pool_id = self._init_obj_uuid(ip_pool_id) ip_pool_def = self._init_def(ip_pool_id=ip_pool_id, name=name, description=description, tags=tags, tenant=tenant) self._create_or_store(ip_pool_def) return ip_pool_id def delete(self, ip_pool_id, tenant=constants.POLICY_INFRA_TENANT): ip_pool_def = self.entry_def(ip_pool_id=ip_pool_id, tenant=tenant) self.policy_api.delete(ip_pool_def) def get(self, ip_pool_id, tenant=constants.POLICY_INFRA_TENANT): ip_pool_def = self.entry_def(ip_pool_id=ip_pool_id, tenant=tenant) return self.policy_api.get(ip_pool_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): ip_pool_def = self.entry_def(tenant=tenant) return self._list(ip_pool_def) def update(self, ip_pool_id, name=IGNORE, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(ip_pool_id=ip_pool_id, name=name, description=description, tags=tags, tenant=tenant) def allocate_ip(self, ip_pool_id, ip_allocation_id=None, ip_address=IGNORE, name=IGNORE, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): # If ip_address is not set, a random IP will be allocated # from the pool. ip_allocation_id = self._init_obj_uuid(ip_allocation_id) args = self._get_user_args( ip_pool_id=ip_pool_id, ip_allocation_id=ip_allocation_id, allocation_ip=ip_address, name=name, description=description, tags=tags, tenant=tenant) ip_allocation_def = core_defs.IpPoolAllocationDef(**args) self._create_or_store(ip_allocation_def) def release_ip(self, ip_pool_id, ip_allocation_id, tenant=constants.POLICY_INFRA_TENANT): ip_allocation_def = core_defs.IpPoolAllocationDef( ip_allocation_id=ip_allocation_id, ip_pool_id=ip_pool_id, tenant=tenant) self.policy_api.delete(ip_allocation_def) def list_allocations(self, ip_pool_id, tenant=constants.POLICY_INFRA_TENANT): ip_allocation_def = core_defs.IpPoolAllocationDef( ip_pool_id=ip_pool_id, tenant=tenant) return self._list(ip_allocation_def) def get_allocation(self, ip_pool_id, ip_allocation_id, tenant=constants.POLICY_INFRA_TENANT): ip_allocation_def = core_defs.IpPoolAllocationDef( ip_pool_id=ip_pool_id, ip_allocation_id=ip_allocation_id, tenant=tenant) return self.policy_api.get(ip_allocation_def) def allocate_block_subnet(self, ip_pool_id, ip_block_id, size, ip_subnet_id=None, auto_assign_gateway=IGNORE, name=IGNORE, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT, start_ip=IGNORE): ip_subnet_id = self._init_obj_uuid(ip_subnet_id) args = self._get_user_args( ip_pool_id=ip_pool_id, ip_block_id=ip_block_id, ip_subnet_id=ip_subnet_id, size=size, auto_assign_gateway=auto_assign_gateway, name=name, description=description, tags=tags, tenant=tenant, start_ip=start_ip) ip_subnet_def = core_defs.IpPoolBlockSubnetDef( nsx_version=self.version, **args) self._create_or_store(ip_subnet_def) def release_block_subnet(self, ip_pool_id, ip_subnet_id, tenant=constants.POLICY_INFRA_TENANT): ip_subnet_def = core_defs.IpPoolBlockSubnetDef( ip_subnet_id=ip_subnet_id, ip_pool_id=ip_pool_id, tenant=tenant) self.policy_api.delete(ip_subnet_def) def list_block_subnets(self, ip_pool_id, tenant=constants.POLICY_INFRA_TENANT): ip_subnet_def = core_defs.IpPoolBlockSubnetDef( ip_pool_id=ip_pool_id, tenant=tenant) subnets = self._list(ip_subnet_def) block_subnets = [] for subnet in subnets: if subnet['resource_type'] == ip_subnet_def.resource_type(): block_subnets.append(subnet) return block_subnets def get_ip_block_subnet(self, ip_pool_id, ip_subnet_id, tenant=constants.POLICY_INFRA_TENANT): ip_subnet_def = core_defs.IpPoolBlockSubnetDef( ip_pool_id=ip_pool_id, ip_subnet_id=ip_subnet_id, tenant=tenant) return self.policy_api.get(ip_subnet_def) def get_ip_block_subnet_cidr(self, ip_pool_id, ip_subnet_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, wait=False, sleep=None, max_attempts=None): # Retrieve the allocated Subnet CIDR for Subnet ID # Return None in case the CIDR is not yet allocated realized_info = self.get_ip_subnet_realization_info( ip_pool_id, ip_subnet_id, entity_type, tenant, wait, sleep, max_attempts) # Returns a list of CIDRs. In case a single value is expected, # caller must extract the first index to retrieve the CIDR value return self._get_extended_attr_from_realized_info( realized_info, requested_attr='cidr') def create_or_update_static_subnet(self, ip_pool_id, cidr, allocation_ranges, ip_subnet_id=None, name=IGNORE, description=IGNORE, gateway_ip=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): ip_subnet_id = self._init_obj_uuid(ip_subnet_id) args = self._get_user_args( ip_pool_id=ip_pool_id, ip_subnet_id=ip_subnet_id, cidr=cidr, allocation_ranges=allocation_ranges, name=name, description=description, tags=tags, tenant=tenant) ip_subnet_def = core_defs.IpPoolStaticSubnetDef(**args) self._create_or_store(ip_subnet_def) def release_static_subnet(self, ip_pool_id, ip_subnet_id, tenant=constants.POLICY_INFRA_TENANT): ip_subnet_def = core_defs.IpPoolStaticSubnetDef( ip_subnet_id=ip_subnet_id, ip_pool_id=ip_pool_id, tenant=tenant) self.policy_api.delete(ip_subnet_def) def list_static_subnets(self, ip_pool_id, tenant=constants.POLICY_INFRA_TENANT): ip_subnet_def = core_defs.IpPoolStaticSubnetDef( ip_pool_id=ip_pool_id, tenant=tenant) subnets = self._list(ip_subnet_def) static_subnets = [] for subnet in subnets: if subnet['resource_type'] == ip_subnet_def.resource_type(): static_subnets.append(subnet) return static_subnets def get_static_subnet(self, ip_pool_id, ip_subnet_id, tenant=constants.POLICY_INFRA_TENANT): ip_subnet_def = core_defs.IpPoolStaticSubnetDef( ip_pool_id=ip_pool_id, ip_subnet_id=ip_subnet_id, tenant=tenant) return self.policy_api.get(ip_subnet_def) def get_realization_info(self, ip_pool_id, entity_type=None, silent=False, tenant=constants.POLICY_INFRA_TENANT): ip_pool_def = self.entry_def(ip_pool_id=ip_pool_id, tenant=tenant) return self._get_realization_info(ip_pool_def, entity_type=entity_type, silent=silent) def get_ip_subnet_realization_info( self, ip_pool_id, ip_subnet_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, wait=False, sleep=None, max_attempts=None, subnet_type=constants.IPPOOL_BLOCK_SUBNET): if subnet_type == constants.IPPOOL_BLOCK_SUBNET: ip_subnet_def = core_defs.IpPoolBlockSubnetDef( ip_pool_id=ip_pool_id, ip_subnet_id=ip_subnet_id, tenant=tenant) else: ip_subnet_def = core_defs.IpPoolStaticSubnetDef( ip_pool_id=ip_pool_id, ip_subnet_id=ip_subnet_id, tenant=tenant) if wait: return self._wait_until_realized( ip_subnet_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) return self._get_realization_info(ip_subnet_def, entity_type=entity_type) def get_ip_alloc_realization_info(self, ip_pool_id, ip_allocation_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, wait=False, sleep=None, max_attempts=None): ip_allocation_def = core_defs.IpPoolAllocationDef( ip_pool_id=ip_pool_id, ip_allocation_id=ip_allocation_id, tenant=tenant) if wait: return self._wait_until_realized( ip_allocation_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) return self._get_realization_info(ip_allocation_def, entity_type=entity_type) def get_realized_allocated_ip(self, ip_pool_id, ip_allocation_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, wait=False, sleep=None, max_attempts=None): # Retrieve the allocated IpAddress for allocation ID # Return None in case the IP is not yet allocated realized_info = self.get_ip_alloc_realization_info( ip_pool_id, ip_allocation_id, entity_type, tenant, wait, sleep, max_attempts) if realized_info: try: return realized_info['extended_attributes'][0].get( 'values')[0] except IndexError: return def wait_until_realized(self, ip_pool_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): ip_pool_def = self.entry_def(ip_pool_id=ip_pool_id, tenant=tenant) return self._wait_until_realized(ip_pool_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) class NsxPolicySecurityPolicyBaseApi(NsxPolicyResourceBase): def _get_last_seq_num(self, domain_id, map_id, tenant=constants.POLICY_INFRA_TENANT): # get the current entries, and choose the next unused sequence number # between the entries under the same communication map try: com_map = self.get(domain_id, map_id, tenant=tenant) com_entries = com_map.get('rules') except exceptions.ResourceNotFound: return -1 if not com_entries: return 0 seq_nums = [int(cm['sequence_number']) for cm in com_entries] seq_nums.sort() return seq_nums[-1] def _get_seq_num(self, last_sequence): if last_sequence < 0: return 1 return last_sequence + 1 def create_or_overwrite(self, name, domain_id, map_id=None, description=IGNORE, category=constants.CATEGORY_APPLICATION, sequence_number=None, service_ids=IGNORE, action=constants.ACTION_ALLOW, scope=IGNORE, source_groups=IGNORE, dest_groups=IGNORE, direction=nsx_constants.IN_OUT, logged=IGNORE, tags=IGNORE, map_sequence_number=IGNORE, tenant=constants.POLICY_INFRA_TENANT): """Create CommunicationMap & Entry. source_groups/dest_groups should be a list of group ids belonging to the domain. NOTE: In multi-connection environment, it is recommended to execute this call under lock to prevent race condition where two entries end up with same sequence number. """ last_sequence = -1 if map_id: if not sequence_number: # get the next available sequence number last_sequence = self._get_last_seq_num(domain_id, map_id, tenant=tenant) else: map_id = self._init_obj_uuid(map_id) if not sequence_number: sequence_number = self._get_seq_num(last_sequence) # Build the communication entry. Since we currently support only one # it will have the same id as its parent entry_def = self._init_def( domain_id=domain_id, map_id=map_id, entry_id=self.SINGLE_ENTRY_ID, name=name, description=description, sequence_number=sequence_number, source_groups=source_groups, dest_groups=dest_groups, service_ids=service_ids, action=action, scope=scope, direction=direction, logged=logged, tenant=tenant) map_def = self._init_parent_def( domain_id=domain_id, map_id=map_id, tenant=tenant, name=name, description=description, category=category, tags=tags, map_sequence_number=map_sequence_number) self._create_or_store(map_def, entry_def) return map_id def create_or_overwrite_map_only( self, name, domain_id, map_id=None, description=IGNORE, category=constants.CATEGORY_APPLICATION, tags=IGNORE, map_sequence_number=IGNORE, tenant=constants.POLICY_INFRA_TENANT): """Create or update a CommunicationMap Create a communication map without any entries, or update the communication map itself, leaving the entries unchanged. """ map_id = self._init_obj_uuid(map_id) map_def = self._init_parent_def( domain_id=domain_id, map_id=map_id, tenant=tenant, name=name, description=description, category=category, tags=tags, map_sequence_number=map_sequence_number) self._create_or_store(map_def) return map_id def build_entry(self, name, domain_id, map_id, entry_id=None, description=None, sequence_number=None, service_ids=None, action=constants.ACTION_ALLOW, scope=None, source_groups=None, dest_groups=None, direction=nsx_constants.IN_OUT, logged=False, tag=None, ip_protocol=nsx_constants.IPV4_IPV6, service_entries=IGNORE, tenant=constants.POLICY_INFRA_TENANT): """Get the definition of a single map entry""" entry_id = self._init_obj_uuid(entry_id) return self._init_def(domain_id=domain_id, map_id=map_id, entry_id=entry_id, name=name, description=description, sequence_number=sequence_number, source_groups=source_groups, dest_groups=dest_groups, service_ids=service_ids, action=action, scope=scope, direction=direction, ip_protocol=ip_protocol, logged=logged, tag=tag, service_entries=service_entries, tenant=tenant) def create_with_entries( self, name, domain_id, map_id=None, description=IGNORE, category=constants.CATEGORY_APPLICATION, entries=None, tags=IGNORE, map_sequence_number=IGNORE, tenant=constants.POLICY_INFRA_TENANT): """Create CommunicationMap with entries""" map_id = self._init_obj_uuid(map_id) map_def = self._init_parent_def( domain_id=domain_id, map_id=map_id, tenant=tenant, name=name, description=description, category=category, tags=tags, map_sequence_number=map_sequence_number) # in case the same object was just deleted, create may need to # be retried @utils.retry_upon_exception( exceptions.NsxPendingDelete, delay=self.nsxlib_config.realization_wait_sec, max_attempts=self.nsxlib_config.realization_max_attempts) def _do_create_with_retry(): self._create_or_store(map_def, entries) _do_create_with_retry() return map_id def create_entry(self, name, domain_id, map_id, entry_id=None, description=None, sequence_number=None, service_ids=None, action=constants.ACTION_ALLOW, source_groups=None, dest_groups=None, scope=None, ip_protocol=nsx_constants.IPV4_IPV6, direction=nsx_constants.IN_OUT, logged=False, tag=None, service_entries=IGNORE, tenant=constants.POLICY_INFRA_TENANT): """Create CommunicationMap Entry. source_groups/dest_groups should be a list of group ids belonging to the domain. """ # get the next available sequence number if not sequence_number: last_sequence = self._get_last_seq_num(domain_id, map_id, tenant=tenant) sequence_number = self._get_seq_num(last_sequence) entry_id = self._init_obj_uuid(entry_id) # Build the communication entry entry_def = self._init_def(domain_id=domain_id, map_id=map_id, entry_id=entry_id, name=name, description=description, sequence_number=sequence_number, source_groups=source_groups, dest_groups=dest_groups, service_ids=service_ids, action=action, scope=scope, ip_protocol=ip_protocol, direction=direction, logged=logged, tag=tag, service_entries=service_entries, tenant=tenant) self._create_or_store(entry_def) return entry_id def create_entry_from_def(self, entry_def): """Create CommunicationMap Entry from a predefined entry def""" self._create_or_store(entry_def) def delete(self, domain_id, map_id, tenant=constants.POLICY_INFRA_TENANT): map_def = self._init_parent_def( domain_id=domain_id, map_id=map_id, tenant=tenant) self.policy_api.delete(map_def) def delete_entry(self, domain_id, map_id, entry_id, tenant=constants.POLICY_INFRA_TENANT): entry_def = self.entry_def( domain_id=domain_id, map_id=map_id, entry_id=entry_id, tenant=tenant) self.policy_api.delete(entry_def) def get(self, domain_id, map_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): map_def = self.parent_entry_def( domain_id=domain_id, map_id=map_id, tenant=tenant) return self.policy_api.get(map_def, silent=silent) def get_entry(self, domain_id, map_id, entry_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): entry_def = self.entry_def( domain_id=domain_id, map_id=map_id, entry_id=entry_id, tenant=tenant) return self.policy_api.get(entry_def, silent=silent) def get_by_name(self, domain_id, name, tenant=constants.POLICY_INFRA_TENANT): """Return first communication map entry matched by name""" return super(NsxPolicySecurityPolicyBaseApi, self).get_by_name( name, domain_id, tenant=tenant) def list(self, domain_id, tenant=constants.POLICY_INFRA_TENANT): """List all the map entries of a specific domain.""" map_def = self.parent_entry_def( domain_id=domain_id, tenant=tenant) return self._list(map_def) def update(self, domain_id, map_id, name=IGNORE, description=IGNORE, sequence_number=IGNORE, service_ids=IGNORE, action=IGNORE, source_groups=IGNORE, dest_groups=IGNORE, direction=IGNORE, logged=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): # Note(asarfaty): Category is mandatory in update calls for now # although it cannot change. Getting it from the NSX orig_entry = self.get(domain_id, map_id, tenant=tenant) category = orig_entry.get('category') parent_def = self._init_parent_def( domain_id=domain_id, map_id=map_id, name=name, description=description, category=category, tags=tags, tenant=tenant) if self._any_arg_set(sequence_number, service_ids, action, source_groups, dest_groups, direction, logged): # Update the entry only if relevant attributes were changed entry_def = self._get_and_update_def( domain_id=domain_id, map_id=map_id, entry_id=self.SINGLE_ENTRY_ID, service_ids=service_ids, source_groups=source_groups, dest_groups=dest_groups, sequence_number=sequence_number, action=action, direction=direction, logged=logged, tenant=tenant) self.policy_api.create_with_parent(parent_def, entry_def) else: self.policy_api.create_or_update(parent_def) def update_entry(self, domain_id, map_id, entry_id, name=IGNORE, description=IGNORE, sequence_number=IGNORE, service_ids=IGNORE, action=IGNORE, source_groups=IGNORE, dest_groups=IGNORE, scope=IGNORE, ip_protocol=IGNORE, direction=IGNORE, logged=IGNORE, tags=IGNORE, tag=IGNORE, service_entries=IGNORE, tenant=constants.POLICY_INFRA_TENANT): if self._any_arg_set(name, description, sequence_number, service_ids, action, source_groups, dest_groups, scope, ip_protocol, direction, logged, tags): entry_def = self._get_and_update_def( domain_id=domain_id, map_id=map_id, entry_id=entry_id, name=name, description=description, sequence_number=sequence_number, service_ids=service_ids, action=action, source_groups=source_groups, dest_groups=dest_groups, scope=scope, ip_protocol=ip_protocol, direction=direction, logged=logged, tags=tags, tag=tag, service_entries=service_entries, tenant=tenant) self.policy_api.create_or_update(entry_def) def update_entries(self, domain_id, map_id, entries, category=constants.CATEGORY_APPLICATION, tenant=constants.POLICY_INFRA_TENANT): self.update_with_entries(domain_id, map_id, entries, category=category, tenant=tenant) def update_with_entries(self, domain_id, map_id, entries=IGNORE, name=IGNORE, description=IGNORE, category=constants.CATEGORY_APPLICATION, tags=IGNORE, map_sequence_number=IGNORE, tenant=constants.POLICY_INFRA_TENANT): map_def = self._init_parent_def( domain_id=domain_id, map_id=map_id, tenant=tenant, name=name, description=description, category=category, tags=tags, map_sequence_number=map_sequence_number) map_path = map_def.get_resource_path() def _overwrite_entries(old_entries, new_entries, transaction): # Replace old entries with new entries, but copy additional # attributes from old entries for those kept in new entries # and marked the unwanted ones in the old entries as deleted # if it is in the transaction call. old_rules = {entry["id"]: entry for entry in old_entries} replaced_entries = [] for entry in new_entries: rule_id = entry.get_id() new_rule = entry.get_obj_dict() old_rule = old_rules.get(rule_id) if old_rule: old_rules.pop(rule_id) for key, value in old_rule.items(): if key not in new_rule: new_rule[key] = value replaced_entries.append( self.entry_def.adapt_from_rule_dict( new_rule, domain_id, map_id)) if transaction: replaced_entries.extend( _mark_delete_entries(old_rules.values())) return replaced_entries def _mark_delete_entries(delete_rule_dicts): delete_entries = [] for delete_rule_dict in delete_rule_dicts: delete_entry = self.entry_def.adapt_from_rule_dict( delete_rule_dict, domain_id, map_id) delete_entry.set_delete() delete_entries.append(delete_entry) return delete_entries @utils.retry_upon_exception( exceptions.StaleRevision, max_attempts=self.policy_api.client.max_attempts) def _update(): transaction = trans.NsxPolicyTransaction.get_current() # Get the current data of communication map & its entries comm_map = self.policy_api.get(map_def) replaced_entries = None ignore_entries = (entries == IGNORE) if not ignore_entries: replaced_entries = _overwrite_entries(comm_map['rules'], entries, transaction) comm_map.pop('rules') map_def.set_obj_dict(comm_map) # Update the entire map at the NSX if transaction: self._create_or_store(map_def, replaced_entries) else: body = map_def.get_obj_dict() if not ignore_entries: body['rules'] = [rule.get_obj_dict() for rule in replaced_entries] self.policy_api.client.update(map_path, body) _update() def update_entries_logged(self, domain_id, map_id, logged, tenant=constants.POLICY_INFRA_TENANT): """Update all communication map entries logged flags""" map_def = self.parent_entry_def( domain_id=domain_id, map_id=map_id, tenant=tenant) map_path = map_def.get_resource_path() @utils.retry_upon_exception( exceptions.StaleRevision, max_attempts=self.policy_api.client.max_attempts) def _update(): # Get the current data of communication map & its' entries comm_map = self.policy_api.get(map_def) # Update the field in all the entries if comm_map.get('rules'): for comm_entry in comm_map['rules']: comm_entry['logged'] = logged # Update the entire map at the NSX self.policy_api.client.update(map_path, comm_map) _update() def get_realized_state(self, domain_id, map_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): map_def = self.parent_entry_def(map_id=map_id, domain_id=domain_id, tenant=tenant) return self._get_realized_state(map_def, entity_type=entity_type, realization_info=realization_info) def get_realized_id(self, domain_id, map_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): map_def = self.parent_entry_def(map_id=map_id, domain_id=domain_id, tenant=tenant) return self._get_realized_id(map_def, entity_type=entity_type, realization_info=realization_info) def get_realization_info(self, domain_id, map_id, entity_type=None, silent=False, tenant=constants.POLICY_INFRA_TENANT): map_def = self.parent_entry_def(map_id=map_id, domain_id=domain_id, tenant=tenant) return self._get_realization_info(map_def, entity_type=entity_type, silent=silent) class NsxPolicyCommunicationMapApi(NsxPolicySecurityPolicyBaseApi): """NSX Policy CommunicationMap (Under a Domain). AKA Security""" @property def entry_def(self): return core_defs.CommunicationMapEntryDef @property def parent_entry_def(self): return core_defs.CommunicationMapDef class NsxPolicyGatewayPolicyApi(NsxPolicySecurityPolicyBaseApi): """NSX Policy Gateway policy (Edge firewall)""" @property def entry_def(self): return core_defs.GatewayPolicyRuleDef @property def parent_entry_def(self): return core_defs.GatewayPolicyDef class NsxPolicyEnforcementPointApi(NsxPolicyResourceBase): """NSX Policy Enforcement Point.""" @property def entry_def(self): return core_defs.EnforcementPointDef def create_or_overwrite(self, name, ep_id=None, description=IGNORE, ip_address=IGNORE, username=IGNORE, password=IGNORE, thumbprint=IGNORE, edge_cluster_id=IGNORE, transport_zone_id=IGNORE, tenant=constants.POLICY_INFRA_TENANT): if not ip_address or not username or password is None: err_msg = (_("Cannot create an enforcement point without " "ip_address, username and password")) raise exceptions.ManagerError(details=err_msg) ep_id = self._init_obj_uuid(ep_id) ep_def = self._init_def(ep_id=ep_id, name=name, description=description, ip_address=ip_address, username=username, password=password, thumbprint=thumbprint, edge_cluster_id=edge_cluster_id, transport_zone_id=transport_zone_id, tenant=tenant) self._create_or_store(ep_def) return ep_id def delete(self, ep_id, tenant=constants.POLICY_INFRA_TENANT): ep_def = core_defs.EnforcementPointDef( ep_id=ep_id, tenant=tenant) self.policy_api.delete(ep_def) def get(self, ep_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): ep_def = core_defs.EnforcementPointDef( ep_id=ep_id, tenant=tenant) return self.policy_api.get(ep_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): ep_def = core_defs.EnforcementPointDef(tenant=tenant) return self._list(ep_def) def update(self, ep_id, name=IGNORE, description=IGNORE, ip_address=IGNORE, username=IGNORE, password=IGNORE, thumbprint=IGNORE, edge_cluster_id=IGNORE, transport_zone_id=IGNORE, tenant=constants.POLICY_INFRA_TENANT): """Update the enforcement point. username & password must be defined """ if not username or password is None: # username/password must be provided err_msg = (_("Cannot update an enforcement point without " "username and password")) raise exceptions.ManagerError(details=err_msg) # Get the original body because ip & thumbprint are mandatory ep_def = self._get_and_update_def(ep_id=ep_id, name=name, description=description, ip_address=ip_address, username=username, password=password, edge_cluster_id=edge_cluster_id, transport_zone_id=transport_zone_id, thumbprint=thumbprint, tenant=tenant) self.policy_api.create_or_update(ep_def) def get_realized_state(self, ep_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): ep_def = core_defs.EnforcementPointDef(ep_id=ep_id, tenant=tenant) return self._get_realized_state(ep_def, entity_type=entity_type, realization_info=realization_info) def get_realization_info(self, ep_id, entity_type=None, silent=False, tenant=constants.POLICY_INFRA_TENANT, realization_info=None): ep_def = core_defs.EnforcementPointDef(ep_id=ep_id, tenant=tenant) return self._get_realization_info(ep_def, entity_type=entity_type, silent=silent, realization_info=realization_info) def reload(self, ep_id, tenant=constants.POLICY_INFRA_TENANT): # Use post command to reload the enforcement point ep_def = core_defs.EnforcementPointDef(ep_id=ep_id, tenant=tenant) path = "%s?action=reload" % ep_def.get_resource_path() self.policy_api.client.create(path) class NsxPolicyTransportZoneApi(NsxPolicyResourceBase): TZ_TYPE_OVERLAY = 'OVERLAY_STANDARD' TZ_TYPE_ENS = 'OVERLAY_ENS' TZ_TYPE_VLAN = 'VLAN_BACKED' @property def entry_def(self): return core_defs.TransportZoneDef def get(self, tz_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT, silent=False): tz_def = core_defs.TransportZoneDef( ep_id=ep_id, tz_id=tz_id, tenant=tenant) return self.policy_api.get(tz_def, silent=silent) def get_tz_type(self, tz_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): tz = self.get(tz_id, ep_id=ep_id, tenant=tenant) return tz.get('tz_type') def get_transport_type(self, tz_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): """This api is consistent with the nsx manager resource api""" tz_type = self.get_tz_type(tz_id, ep_id=ep_id, tenant=tenant) if tz_type == self.TZ_TYPE_VLAN: return nsx_constants.TRANSPORT_TYPE_VLAN else: return nsx_constants.TRANSPORT_TYPE_OVERLAY def get_host_switch_mode(self, tz_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): """This api is consistent with the nsx manager resource api""" tz_type = self.get_tz_type(tz_id, ep_id=ep_id, tenant=tenant) if tz_type == self.TZ_TYPE_ENS: return nsx_constants.HOST_SWITCH_MODE_ENS else: return nsx_constants.HOST_SWITCH_MODE_STANDARD def list(self, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): tz_def = core_defs.TransportZoneDef(ep_id=ep_id, tenant=tenant) return self._list(tz_def) def get_by_name(self, name, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): """Return first group matched by name""" return super(NsxPolicyTransportZoneApi, self).get_by_name( name, ep_id, tenant=tenant) def create_or_overwrite(self, name, tz_id=None, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def update(self, tz_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def delete(self, tz_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) class NsxPolicyEdgeClusterApi(NsxPolicyResourceBase): @property def entry_def(self): return core_defs.EdgeClusterDef def get(self, ec_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT, silent=False): ec_def = core_defs.EdgeClusterDef( ep_id=ep_id, ec_id=ec_id, tenant=tenant) return self.policy_api.get(ec_def, silent=silent) def list(self, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): ec_def = core_defs.EdgeClusterDef(ep_id=ep_id, tenant=tenant) return self._list(ec_def) def get_by_name(self, name, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): """Return first group matched by name""" return super(NsxPolicyEdgeClusterApi, self).get_by_name( name, ep_id, tenant=tenant) def create_or_overwrite(self, name, ec_id=None, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def update(self, ec_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def delete(self, ec_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def get_path(self, ec_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): ec_def = core_defs.EdgeClusterDef( ep_id=ep_id, ec_id=ec_id, tenant=tenant) return ec_def.get_resource_full_path() def get_edge_node_ids(self, ec_id, ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=constants.POLICY_INFRA_TENANT): nodes_def = core_defs.EdgeClusterNodeDef( ep_id=ep_id, ec_id=ec_id, tenant=tenant) nodes = self._list(nodes_def) return [node['id'] for node in nodes] class NsxPolicyMetadataProxyApi(NsxPolicyResourceBase): # Currently this is used as a ready only Api @property def entry_def(self): return core_defs.MetadataProxyDef def get(self, mdproxy_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): md_def = core_defs.MetadataProxyDef( mdproxy_id=mdproxy_id, tenant=tenant) return self.policy_api.get(md_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): md_def = core_defs.MetadataProxyDef(tenant=tenant) return self._list(md_def) def get_by_name(self, name, tenant=constants.POLICY_INFRA_TENANT): return super(NsxPolicyMetadataProxyApi, self).get_by_name( name, tenant=tenant) def create_or_overwrite(self, name, mdproxy_id=None, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def update(self, mdproxy_id, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def delete(self, mdproxy_id, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def get_path(self, mdproxy_id, tenant=constants.POLICY_INFRA_TENANT): md_def = core_defs.MetadataProxyDef( mdproxy_id=mdproxy_id, tenant=tenant) return md_def.get_resource_full_path() class NsxPolicyDeploymentMapApi(NsxPolicyResourceBase): """NSX Policy Deployment Map.""" @property def entry_def(self): return core_defs.DeploymentMapDef def create_or_overwrite(self, name, map_id=None, description=IGNORE, ep_id=IGNORE, domain_id=IGNORE, tenant=constants.POLICY_INFRA_TENANT): map_id = self._init_obj_uuid(map_id) map_def = core_defs.DeploymentMapDef( map_id=map_id, name=name, description=description, ep_id=ep_id, domain_id=domain_id, tenant=tenant) self._create_or_store(map_def) return map_id def delete(self, map_id, domain_id=None, tenant=constants.POLICY_INFRA_TENANT): if not domain_id: # domain_id must be provided err_msg = (_("Cannot delete deployment maps without a domain")) raise exceptions.ManagerError(details=err_msg) map_def = core_defs.DeploymentMapDef( map_id=map_id, domain_id=domain_id, tenant=tenant) self.policy_api.delete(map_def) def get(self, map_id, domain_id=None, tenant=constants.POLICY_INFRA_TENANT, silent=False): if not domain_id: # domain_id must be provided err_msg = (_("Cannot get deployment maps without a domain")) raise exceptions.ManagerError(details=err_msg) map_def = core_defs.DeploymentMapDef( map_id=map_id, domain_id=domain_id, tenant=tenant) return self.policy_api.get(map_def, silent=silent) def list(self, domain_id=None, tenant=constants.POLICY_INFRA_TENANT): if not domain_id: # domain_id must be provided err_msg = (_("Cannot list deployment maps without a domain")) raise exceptions.ManagerError(details=err_msg) map_def = core_defs.DeploymentMapDef(domain_id=domain_id, tenant=tenant) return self._list(map_def) def update(self, map_id, name=IGNORE, description=IGNORE, ep_id=IGNORE, domain_id=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(map_id=map_id, name=name, description=description, ep_id=ep_id, domain_id=domain_id, tenant=tenant) class NsxSegmentProfileBaseApi(NsxPolicyResourceBase): """NSX Segment Profile base API""" def create_or_overwrite(self, name, profile_id=None, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): profile_id = self._init_obj_uuid(profile_id) profile_def = self._init_def(profile_id=profile_id, name=name, description=description, tags=tags, tenant=tenant) self._create_or_store(profile_def) return profile_id def delete(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(profile_id=profile_id, tenant=tenant) self.policy_api.delete(profile_def) def get(self, profile_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): profile_def = self.entry_def(profile_id=profile_id, tenant=tenant) return self.policy_api.get(profile_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(tenant=tenant) return self._list(profile_def) def get_by_name(self, name, tenant=constants.POLICY_INFRA_TENANT): return super(NsxSegmentProfileBaseApi, self).get_by_name( name, tenant=tenant) def update(self, profile_id, name=IGNORE, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(profile_id=profile_id, name=name, description=description, tags=tags, tenant=tenant) def get_path(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(profile_id=profile_id, tenant=tenant) return profile_def.get_resource_full_path() class NsxSegmentSecurityProfileApi(NsxSegmentProfileBaseApi): @property def entry_def(self): return core_defs.SegmentSecurityProfileDef def create_or_overwrite(self, name, profile_id=None, description=IGNORE, bpdu_filter_enable=IGNORE, dhcp_client_block_enabled=IGNORE, dhcp_client_block_v6_enabled=IGNORE, dhcp_server_block_enabled=IGNORE, dhcp_server_block_v6_enabled=IGNORE, non_ip_traffic_block_enabled=IGNORE, ra_guard_enabled=IGNORE, rate_limits_enabled=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): profile_id = self._init_obj_uuid(profile_id) profile_def = self._init_def( profile_id=profile_id, name=name, description=description, bpdu_filter_enable=bpdu_filter_enable, dhcp_client_block_enabled=dhcp_client_block_enabled, dhcp_client_block_v6_enabled=dhcp_client_block_v6_enabled, dhcp_server_block_enabled=dhcp_server_block_enabled, dhcp_server_block_v6_enabled=dhcp_server_block_v6_enabled, non_ip_traffic_block_enabled=non_ip_traffic_block_enabled, ra_guard_enabled=ra_guard_enabled, rate_limits_enabled=rate_limits_enabled, tags=tags, tenant=tenant) self._create_or_store(profile_def) return profile_id class NsxQosProfileApi(NsxSegmentProfileBaseApi): @property def entry_def(self): return core_defs.QosProfileDef def _build_rate_limiter(self, resource_type, average_bandwidth, peak_bandwidth, burst_size, enabled): return core_defs.QoSRateLimiter( resource_type=resource_type, average_bandwidth=average_bandwidth, peak_bandwidth=peak_bandwidth, burst_size=burst_size, enabled=enabled) def build_ingress_rate_limiter( self, average_bandwidth=None, peak_bandwidth=None, burst_size=None, enabled=True): return self._build_rate_limiter( resource_type=core_defs.QoSRateLimiter.INGRESS_RATE_LIMITER_TYPE, average_bandwidth=average_bandwidth, peak_bandwidth=peak_bandwidth, burst_size=burst_size, enabled=enabled) def build_egress_rate_limiter( self, average_bandwidth=None, peak_bandwidth=None, burst_size=None, enabled=True): return self._build_rate_limiter( resource_type=core_defs.QoSRateLimiter.EGRESS_RATE_LIMITER_TYPE, average_bandwidth=average_bandwidth, peak_bandwidth=peak_bandwidth, burst_size=burst_size, enabled=enabled) def build_dscp(self, trusted=False, priority=None): mode = (core_defs.QoSDscp.QOS_DSCP_TRUSTED if trusted else core_defs.QoSDscp.QOS_DSCP_UNTRUSTED) return core_defs.QoSDscp(mode=mode, priority=priority) def create_or_overwrite(self, name, profile_id=None, description=IGNORE, class_of_service=IGNORE, dscp=IGNORE, shaper_configurations=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): profile_id = self._init_obj_uuid(profile_id) profile_def = self._init_def( profile_id=profile_id, name=name, description=description, class_of_service=class_of_service, dscp=dscp, shaper_configurations=shaper_configurations, tags=tags, tenant=tenant) self._create_or_store(profile_def) return profile_id class NsxSpoofguardProfileApi(NsxSegmentProfileBaseApi): @property def entry_def(self): return core_defs.SpoofguardProfileDef def create_or_overwrite(self, name, profile_id=None, description=IGNORE, address_binding_whitelist=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): profile_id = self._init_obj_uuid(profile_id) profile_def = self._init_def( profile_id=profile_id, name=name, description=description, address_binding_whitelist=address_binding_whitelist, tags=tags, tenant=tenant) self._create_or_store(profile_def) return profile_id class NsxIpDiscoveryProfileApi(NsxSegmentProfileBaseApi): @property def entry_def(self): return core_defs.IpDiscoveryProfileDef class NsxWAFProfileApi(NsxSegmentProfileBaseApi): @property def entry_def(self): return core_defs.WAFProfileDef class NsxMacDiscoveryProfileApi(NsxSegmentProfileBaseApi): @property def entry_def(self): return core_defs.MacDiscoveryProfileDef def create_or_overwrite(self, name, profile_id=None, description=IGNORE, mac_change_enabled=IGNORE, mac_learning_enabled=IGNORE, unknown_unicast_flooding_enabled=IGNORE, mac_limit_policy=IGNORE, mac_limit=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): profile_id = self._init_obj_uuid(profile_id) profile_def = self._init_def( profile_id=profile_id, name=name, description=description, mac_change_enabled=mac_change_enabled, mac_learning_enabled=mac_learning_enabled, unknown_unicast_flooding_enabled=unknown_unicast_flooding_enabled, mac_limit_policy=mac_limit_policy, mac_limit=mac_limit, tags=tags, tenant=tenant) self._create_or_store(profile_def) return profile_id class NsxIpv6NdraProfileApi(NsxPolicyResourceBase): @property def entry_def(self): return core_defs.Ipv6NdraProfileDef def create_or_overwrite(self, name, profile_id=None, description=IGNORE, ra_mode=IGNORE, reachable_timer=IGNORE, retransmit_interval=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): profile_id = self._init_obj_uuid(profile_id) profile_def = self._init_def( profile_id=profile_id, name=name, description=description, ra_mode=ra_mode, reachable_timer=reachable_timer, retransmit_interval=retransmit_interval, tags=tags, tenant=tenant) self._create_or_store(profile_def) return profile_id def delete(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(profile_id=profile_id, tenant=tenant) self.policy_api.delete(profile_def) def get(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(profile_id=profile_id, tenant=tenant) return self.policy_api.get(profile_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(tenant=tenant) return self._list(profile_def) def get_by_name(self, name, tenant=constants.POLICY_INFRA_TENANT): return super(NsxSegmentProfileBaseApi, self).get_by_name( name, tenant=tenant) def update(self, profile_id, name=IGNORE, description=IGNORE, ra_mode=IGNORE, reachable_timer=IGNORE, retransmit_interval=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(profile_id=profile_id, name=name, description=description, ra_mode=ra_mode, reachable_timer=reachable_timer, retransmit_interval=retransmit_interval, tags=tags, tenant=tenant) class NsxDhcpRelayConfigApi(NsxPolicyResourceBase): @property def entry_def(self): return core_defs.DhcpRelayConfigDef def create_or_overwrite(self, name, config_id=None, description=None, server_addresses=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): config_id = self._init_obj_uuid(config_id) config_def = self._init_def( config_id=config_id, name=name, description=description, server_addresses=server_addresses, tags=tags, tenant=tenant) self._create_or_store(config_def) return config_id def delete(self, config_id, tenant=constants.POLICY_INFRA_TENANT): config_def = self.entry_def(config_id=config_id, tenant=tenant) self.policy_api.delete(config_def) def get(self, config_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): config_def = self.entry_def(config_id=config_id, tenant=tenant) return self.policy_api.get(config_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): config_def = self.entry_def(tenant=tenant) return self._list(config_def) def update(self, config_id, name=IGNORE, description=IGNORE, server_addresses=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(config_id=config_id, name=name, description=description, server_addresses=server_addresses, tags=tags, tenant=tenant) class NsxDhcpServerConfigApi(NsxPolicyResourceBase): @property def entry_def(self): return core_defs.DhcpServerConfigDef def create_or_overwrite(self, name, config_id=None, description=None, server_addresses=IGNORE, edge_cluster_path=IGNORE, lease_time=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): config_id = self._init_obj_uuid(config_id) config_def = self._init_def( config_id=config_id, name=name, description=description, server_addresses=server_addresses, edge_cluster_path=edge_cluster_path, lease_time=lease_time, tags=tags, tenant=tenant) self._create_or_store(config_def) return config_id def delete(self, config_id, tenant=constants.POLICY_INFRA_TENANT): config_def = self.entry_def(config_id=config_id, tenant=tenant) self.policy_api.delete(config_def) def get(self, config_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): config_def = self.entry_def(config_id=config_id, tenant=tenant) return self.policy_api.get(config_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): config_def = self.entry_def(tenant=tenant) return self._list(config_def) def update(self, config_id, name=IGNORE, description=IGNORE, server_addresses=IGNORE, edge_cluster_path=IGNORE, lease_time=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(config_id=config_id, name=name, description=description, server_addresses=server_addresses, edge_cluster_path=edge_cluster_path, lease_time=lease_time, tags=tags, tenant=tenant) class NsxPolicyCertApi(NsxPolicyResourceBase): """NSX Policy Certificate API.""" @property def entry_def(self): return core_defs.CertificateDef def create_or_overwrite(self, name, certificate_id=None, pem_encoded=IGNORE, private_key=IGNORE, passphrase=IGNORE, key_algo=IGNORE, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): certificate_id = self._init_obj_uuid(certificate_id) certificate_def = self._init_def(certificate_id=certificate_id, name=name, private_key=private_key, pem_encoded=pem_encoded, passphrase=passphrase, key_algo=key_algo, description=description, tags=tags, tenant=tenant) self._create_or_store(certificate_def) return certificate_id def delete(self, certificate_id, tenant=constants.POLICY_INFRA_TENANT): certificate_def = self.entry_def(certificate_id=certificate_id, tenant=tenant) self.policy_api.delete(certificate_def) def get(self, certificate_id, tenant=constants.POLICY_INFRA_TENANT, silent=False): certificate_def = self.entry_def(certificate_id=certificate_id, tenant=tenant) return self.policy_api.get(certificate_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): certificate_def = self.entry_def(tenant=tenant) return self._list(certificate_def) def update(self, certificate_id, name=IGNORE, pem_encoded=IGNORE, private_key=IGNORE, passphrase=IGNORE, key_algo=IGNORE, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(certificate_id=certificate_id, name=name, description=description, tags=tags, private_key=private_key, pem_encoded=pem_encoded, passphrase=passphrase, key_algo=key_algo, tenant=tenant) def get_path(self, certificate_id, tenant=constants.POLICY_INFRA_TENANT): c_def = self.entry_def(certificate_id=certificate_id, tenant=tenant) return c_def.get_resource_full_path() def wait_until_realized(self, certificate_id, entity_type=None, tenant=constants.POLICY_INFRA_TENANT, sleep=None, max_attempts=None): cert_def = self.entry_def( certificate_id=certificate_id, tenant=tenant) return self._wait_until_realized( cert_def, entity_type=entity_type, sleep=sleep, max_attempts=max_attempts) class NsxPolicyExcludeListApi(NsxPolicyResourceBase): """NSX Policy Exclude list.""" @property def entry_def(self): return core_defs.ExcludeListDef def create_or_overwrite(self, members=IGNORE, tenant=constants.POLICY_INFRA_TENANT): exclude_list_def = self._init_def(members=members, tenant=tenant) self._create_or_store(exclude_list_def) def delete(self, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def get(self, tenant=constants.POLICY_INFRA_TENANT, silent=False): exclude_list_def = self.entry_def(tenant=tenant) return self.policy_api.get(exclude_list_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def update(self, members=IGNORE, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) # TODO(asarfaty): Add support for add/remove member class NsxPolicyTier0RouteMapApi(NsxPolicyResourceBase): @property def entry_def(self): return core_defs.Tier0RouteMapDef def create_or_overwrite(self, name, tier0_id, route_map_id=None, entries=IGNORE, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): route_map_id = self._init_obj_uuid(route_map_id) route_map_def = self._init_def(tier0_id=tier0_id, route_map_id=route_map_id, name=name, entries=entries, description=description, tags=tags, tenant=tenant) self._create_or_store(route_map_def) return route_map_id def delete(self, tier0_id, route_map_id, tenant=constants.POLICY_INFRA_TENANT): route_map_def = self.entry_def(tier0_id=tier0_id, route_map_id=route_map_id, tenant=tenant) self.policy_api.delete(route_map_def) def get(self, tier0_id, route_map_id, tenant=constants.POLICY_INFRA_TENANT): route_map_def = self.entry_def(tier0_id=tier0_id, route_map_id=route_map_id, tenant=tenant) return self.policy_api.get(route_map_def) def list(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): route_map_def = self.entry_def(tier0_id=tier0_id, tenant=tenant) return self._list(route_map_def) def update(self, name, tier0_id, route_map_id, entries, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(tier0_id=tier0_id, route_map_id=route_map_id, name=name, entries=entries, description=description, tags=tags, tenant=tenant) def build_route_map_entry(self, action, community_list_matches=None, prefix_list_matches=None, entry_set=None): return core_defs.RouteMapEntry(action, community_list_matches, prefix_list_matches, entry_set) def build_route_map_entry_set(self, local_preference=100, as_path_prepend=None, community=None, med=None, weight=None): return core_defs.RouteMapEntrySet(local_preference, as_path_prepend, community, med, weight) def build_community_match_criteria(self, criteria, match_operator=None): return core_defs.CommunityMatchCriteria(criteria, match_operator) class NsxPolicyTier0PrefixListApi(NsxPolicyResourceBase): @property def entry_def(self): return core_defs.Tier0PrefixListDef def create_or_overwrite(self, name, tier0_id, prefix_list_id=None, prefixes=IGNORE, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): prefix_list_id = self._init_obj_uuid(prefix_list_id) prefix_list_def = self._init_def(tier0_id=tier0_id, prefix_list_id=prefix_list_id, name=name, prefixes=prefixes, description=description, tags=tags, tenant=tenant) self._create_or_store(prefix_list_def) return prefix_list_id def delete(self, tier0_id, prefix_list_id, tenant=constants.POLICY_INFRA_TENANT): prefix_list_def = self.entry_def(tier0_id=tier0_id, prefix_list_id=prefix_list_id, tenant=tenant) self.policy_api.delete(prefix_list_def) def get(self, tier0_id, prefix_list_id, tenant=constants.POLICY_INFRA_TENANT): prefix_list_def = self.entry_def(tier0_id=tier0_id, prefix_list_id=prefix_list_id, tenant=tenant) return self.policy_api.get(prefix_list_def) def list(self, tier0_id, tenant=constants.POLICY_INFRA_TENANT): prefix_list_def = self.entry_def(tier0_id=tier0_id, tenant=tenant) return self._list(prefix_list_def) def update(self, name, tier0_id, prefix_list_id, prefixes, description=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(tier0_id=tier0_id, prefix_list_id=prefix_list_id, name=name, prefixes=prefixes, description=description, tags=tags, tenant=tenant) def build_prefix_entry(self, network, le=None, ge=None, action=constants.ADV_RULE_PERMIT): return core_defs.PrefixEntry(network, le, ge, action) class NsxPolicyGlobalConfig(NsxPolicyResourceBase): @property def entry_def(self): return core_defs.GlobalConfigDef def create_or_overwrite(self, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def delete(self, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def get(self, tenant=constants.POLICY_INFRA_TENANT, silent=False): global_config_def = self.entry_def(tenant=tenant) return self.policy_api.get(global_config_def, silent=silent) def list(self, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def update(self, members=IGNORE, tenant=constants.POLICY_INFRA_TENANT): err_msg = (_("This action is not supported")) raise exceptions.ManagerError(details=err_msg) def _set_l3_forwarding_mode(self, mode, tenant): # Using PUT as PATCH is not supported for this API config = self.get() if config['l3_forwarding_mode'] != mode: config['l3_forwarding_mode'] = mode config_def = self.entry_def(tenant=tenant) path = config_def.get_resource_path() self.policy_api.client.update(path, config) def enable_ipv6(self, tenant=constants.POLICY_INFRA_TENANT): return self._set_l3_forwarding_mode('IPV4_AND_IPV6', tenant) def disable_ipv6(self, tenant=constants.POLICY_INFRA_TENANT): return self._set_l3_forwarding_mode('IPV4_ONLY', tenant) vmware-nsxlib-15.0.6/vmware_nsxlib/v3/policy/core_defs.py0000664000175000017500000023164213623151571023451 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from distutils import version from oslo_log import log as logging import six from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3.policy import constants from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__) TENANTS_PATH_PATTERN = "%s/" DOMAINS_PATH_PATTERN = TENANTS_PATH_PATTERN + "domains/" IP_BLOCKS_PATH_PATTERN = TENANTS_PATH_PATTERN + "ip-blocks/" IP_POOLS_PATH_PATTERN = TENANTS_PATH_PATTERN + "ip-pools/" SEGMENTS_PATH_PATTERN = TENANTS_PATH_PATTERN + "segments/" PROVIDERS_PATH_PATTERN = TENANTS_PATH_PATTERN + "providers/" TIER0S_PATH_PATTERN = TENANTS_PATH_PATTERN + "tier-0s/" TIER1S_PATH_PATTERN = TENANTS_PATH_PATTERN + "tier-1s/" SERVICES_PATH_PATTERN = TENANTS_PATH_PATTERN + "services/" GLOBAL_CONFIG_PATH_PATTERN = TENANTS_PATH_PATTERN + "global-config/" ENFORCEMENT_POINT_PATTERN = (TENANTS_PATH_PATTERN + "sites/default/enforcement-points/") TRANSPORT_ZONE_PATTERN = ENFORCEMENT_POINT_PATTERN + "%s/transport-zones/" EDGE_CLUSTER_PATTERN = ENFORCEMENT_POINT_PATTERN + "%s/edge-clusters/" SEGMENT_SECURITY_PROFILES_PATH_PATTERN = (TENANTS_PATH_PATTERN + "segment-security-profiles/") QOS_PROFILES_PATH_PATTERN = TENANTS_PATH_PATTERN + "qos-profiles/" SPOOFGUARD_PROFILES_PATH_PATTERN = (TENANTS_PATH_PATTERN + "spoofguard-profiles/") IP_DISCOVERY_PROFILES_PATH_PATTERN = (TENANTS_PATH_PATTERN + "ip-discovery-profiles/") MAC_DISCOVERY_PROFILES_PATH_PATTERN = (TENANTS_PATH_PATTERN + "mac-discovery-profiles/") IPV6_NDRA_PROFILES_PATH_PATTERN = (TENANTS_PATH_PATTERN + "ipv6-ndra-profiles/") WAF_PROFILES_PATH_PATTERN = (TENANTS_PATH_PATTERN + "waf-profiles/") CERTIFICATE_PATH_PATTERN = TENANTS_PATH_PATTERN + "certificates/" EXCLUDE_LIST_PATH_PATTERN = (TENANTS_PATH_PATTERN + "settings/firewall/security/exclude-list") REALIZATION_PATH = "infra/realized-state/realized-entities?intent_path=%s" DHCP_REALY_PATTERN = TENANTS_PATH_PATTERN + "dhcp-relay-configs/" DHCP_SERVER_PATTERN = TENANTS_PATH_PATTERN + "dhcp-server-configs/" MDPROXY_PATTERN = TENANTS_PATH_PATTERN + "metadata-proxies/" TIER0_LOCALE_SERVICES_PATH_PATTERN = (TIER0S_PATH_PATTERN + "%s/locale-services/") TIER1_LOCALE_SERVICES_PATH_PATTERN = (TIER1S_PATH_PATTERN + "%s/locale-services/") @six.add_metaclass(abc.ABCMeta) class ResourceDef(object): def __init__(self, nsx_version=None, **kwargs): self.attrs = kwargs # nsx_version should be passed in on init if the resource has # version-dependant attributes. Otherwise this is ignored self.nsx_version = nsx_version # init default tenant self.attrs['tenant'] = self.get_tenant() self.body = {} # Whether this entry needs to be deleted self.delete = False # As of now, for some defs (ex: services) child entry is required, # meaning parent creation will fail without the child. # Unfortunately in transactional API policy still fails us, even if # child is specified as ChildEntry in same transaction. # To provide a workaround, we need keep reference to the child and # populate child entry inside parent clause in transactional API. # TODO(annak): remove this if/when policy solves this self.mandatory_child_def = None def set_delete(self): self.delete = True def get_delete(self): return self.delete def get_obj_dict(self): body = self.body if self.body else {} if self.resource_type(): body['resource_type'] = self.resource_type() self._set_attr_if_specified(body, 'name', 'display_name') self._set_attrs_if_specified(body, ['description', 'tags']) resource_id = self.get_id() if resource_id: body['id'] = resource_id return body # This is needed for sake of update due to policy issue. # Policy refuses to update without requires attributes provided, # so we need to run an extra GET to acquire these. # This should be removed when/if this issue is fixed on backend. def set_obj_dict(self, obj_dict): self.body = obj_dict @abc.abstractproperty def path_pattern(self): pass @abc.abstractproperty def path_ids(self): pass @staticmethod def resource_type(): pass @classmethod def resource_class(cls): # Returns base resource type for polymorphic objects # if not overriden, would return resource_type return cls.resource_type() @staticmethod def resource_use_cache(): return False def path_defs(self): pass def get_id(self): if self.attrs and self.path_ids: return self.attrs.get(self.path_ids[-1]) def get_attr(self, attr): return self.attrs.get(attr) def has_attr(self, attr): return attr in self.attrs def get_tenant(self): if self.attrs.get('tenant'): return self.attrs.get('tenant') return constants.POLICY_INFRA_TENANT def get_section_path(self): path_ids = [self.get_attr(path_id) for path_id in self.path_ids[:-1]] return self.path_pattern % (tuple(path_ids)) def get_resource_path(self): resource_id = self.get_id() if resource_id: return self.get_section_path() + resource_id return self.get_section_path() def get_resource_full_path(self): return '/' + self.get_resource_path() @property def get_last_section_dict_key(self): last_section = self.path_pattern.split("/")[-2] return last_section.replace('-', '_') @staticmethod def sub_entries_path(): pass def _get_body_from_kwargs(self, **kwargs): if 'body' in kwargs: body = kwargs['body'] else: body = {} return body # Helper to set attr in body if user specified it # Can be used if body name is different than attr name # If value is different than self.get_attr(attr), it can be set in arg def _set_attr_if_specified(self, body, attr, body_attr=None, **kwargs): if self.has_attr(attr): value = (kwargs['value'] if 'value' in kwargs else self.get_attr(attr)) if body_attr: # Body attr is different that attr exposed by resource def body[body_attr] = value else: # Body attr is the same body[attr] = value # Helper to set attrs in body if user specified them # Body name must match attr name def _set_attrs_if_specified(self, body, attr_list): for attr in attr_list: self._set_attr_if_specified(body, attr) # Helper to set attr in body if user specified it # and current nsx version supports it # Body name must match attr name def _set_attr_if_supported(self, body, attr, value=None): if self.has_attr(attr) and self._version_dependant_attr_supported( attr): value = value if value is not None else self.get_attr(attr) body[attr] = value # Helper to set attrs in body if user specified them # and current nsx version supports it # Body name must match attr name def _set_attrs_if_supported(self, body, attr_list): for attr in attr_list: self._set_attr_if_supported(body, attr) @property def version_dependant_attr_map(self): """Specify version depenand attributes and supporting NSX version Resources that contain version dependant attributes should specify attribute name and first supporting version in map returned from this call. """ return {} def _version_dependant_attr_supported(self, attr): """Check if a version dependent attr is supported on current NSX For each resource def, there could be some attributes which only exist on NSX after certain versions. These attrs should be defined on def level via version_dependant_attr_map, where map value indicates NSX version that first exposes the support. By design, Devs should use _set_attr_if_supported() to add any attrs that are only known to NSX after a certain version. This method works as a registry for _set_attrs_if_supported() to know the baseline version of each version dependent attr. Non-version-dependent attributes should be added to the request body by using _set_attr_if_specified(). This method defaults to false since any version dependent attr unknown to this lib should be excluded for security and safety reasons. """ supporting_version = self.version_dependant_attr_map.get(attr) if not supporting_version: LOG.warning("Supporting version not defined for attr %s. Assuming " "no support", attr) return False if (version.LooseVersion(self.nsx_version) >= version.LooseVersion(supporting_version)): return True LOG.warning( "Ignoring %s for %s %s: this feature is not supported." "Current NSX version: %s. Minimum supported version: %s", attr, self.resource_type, self.attrs.get('name', ''), self.nsx_version, supporting_version) return False @classmethod def get_single_entry(cls, obj_body): """Return the single sub-entry from the object body. If there are no entries, or more than 1 - return None. """ entries_path = cls.sub_entries_path() if not entries_path: # This sub class doesn't support this return if (entries_path not in obj_body or len(obj_body[entries_path]) != 1): return return obj_body[entries_path][0] def bodyless(self): """Return True if args contain only keys and meta attrs""" meta = ['resource_type'] meta.extend(self.path_ids) body_args = [key for key in self.attrs.keys() if key not in meta] return len(body_args) == 0 def set_default_mandatory_vals(self): pass class TenantDef(ResourceDef): @property def path_pattern(self): return TENANTS_PATH_PATTERN @staticmethod def resource_type(): return 'Infra' def path_defs(self): return () @property def path_ids(self): return ('tenant',) def get_resource_path(self): return 'infra/' def get_section_path(self): return 'infra/' class DomainDef(ResourceDef): @property def path_pattern(self): return DOMAINS_PATH_PATTERN @property def path_ids(self): return ('tenant', 'domain_id') @staticmethod def resource_type(): return 'Domain' def path_defs(self): return (TenantDef,) class RouteAdvertisement(object): types = {'static_routes': constants.ADV_RULE_TYPE_TIER1_STATIC_ROUTES, 'subnets': constants.ADV_RULE_TIER1_CONNECTED, 'nat': constants.ADV_RULE_TIER1_NAT, 'lb_vip': constants.ADV_RULE_TIER1_LB_VIP, 'lb_snat': constants.ADV_RULE_TIER1_LB_SNAT, 'dns_forwarder_ip': constants.ADV_RULE_TIER1_DNS_FORWARDER_IP, 'ipsec_endpoints': constants.ADV_RULE_TIER1_IPSEC_LOCAL_ENDPOINT} def __init__(self, **kwargs): self.attrs = kwargs def get_obj_dict(self): return [value for key, value in self.types.items() if self.attrs.get(key) is True] def set_obj_dict(self, obj_dict): # This initializes object based on list coming from backend # f.e. [TIER1_NAT, TIER1_LB_SNAT] for key, value in self.types.items(): self.attrs[key] = value in obj_dict def update(self, **kwargs): # "None" will be passed as value when user does not specify adv type # True/False will be passed when user wants to switch adv ON/OFF for key, value in kwargs.items(): if value is not None: self.attrs[key] = value class RouterDef(ResourceDef): def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(RouterDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['failover_mode', 'force_whitelisting', 'default_rule_logging', 'disable_firewall']) # Add dhcp relay config # TODO(asarfaty): this can be either dhcp or dhcp relay config if self.has_attr('dhcp_config'): paths = "" if self.get_attr('dhcp_config'): dhcp_conf = DhcpRelayConfigDef( config_id=self.get_attr('dhcp_config'), tenant=self.get_tenant()) paths = [dhcp_conf.get_resource_full_path()] self._set_attr_if_specified(body, 'dhcp_config', body_attr='dhcp_config_paths', value=paths) if self.has_attr('ipv6_ndra_profile_id'): if self.get_attr('ipv6_ndra_profile_id'): ndra_profile = Ipv6NdraProfileDef( profile_id=self.get_attr('ipv6_ndra_profile_id'), tenant=self.get_tenant()) else: # Set it to the default profile # This will allow removing the old profile, # as the NSX does not support empty value. ndra_profile = Ipv6NdraProfileDef( profile_id=Ipv6NdraProfileDef.default_profile(), tenant=self.get_tenant()) paths = [ndra_profile.get_resource_full_path()] self._set_attr_if_specified(body, 'ipv6_ndra_profile_id', body_attr='ipv6_profile_paths', value=paths) return body class Tier0Def(RouterDef): @property def path_pattern(self): return TIER0S_PATH_PATTERN @property def path_ids(self): return ('tenant', 'tier0_id') @staticmethod def resource_type(): return 'Tier0' @staticmethod def resource_use_cache(): return True def get_obj_dict(self): body = super(Tier0Def, self).get_obj_dict() self._set_attrs_if_specified(body, ['ha_mode', 'transit_subnets']) return body class Tier1Def(RouterDef): @property def path_pattern(self): return TIER1S_PATH_PATTERN @property def path_ids(self): return ('tenant', 'tier1_id') @staticmethod def resource_type(): return 'Tier1' def get_obj_dict(self): body = super(Tier1Def, self).get_obj_dict() if self.has_attr('tier0'): tier0 = self.get_attr('tier0') tier0_path = "" if tier0: tenant = TENANTS_PATH_PATTERN % self.get_tenant() tier0_path = "/%stier-0s/%s" % (tenant, tier0) self._set_attr_if_specified(body, 'tier0', body_attr='tier0_path', value=tier0_path) if self.has_attr('route_advertisement'): body['route_advertisement_types'] = self.get_attr( 'route_advertisement').get_obj_dict() self._set_attrs_if_specified(body, ['enable_standby_relocation']) self._set_attr_if_supported(body, 'pool_allocation') if self.has_attr('route_advertisement_rules'): body['route_advertisement_rules'] = [ a.get_obj_dict() if isinstance(a, RouteAdvertisementRule) else a for a in self.get_attr('route_advertisement_rules')] return body @staticmethod def get_route_adv(obj_dict): route_adv = RouteAdvertisement() if 'route_advertisement_types' in obj_dict: route_adv.set_obj_dict(obj_dict['route_advertisement_types']) return route_adv @property def version_dependant_attr_map(self): return {'pool_allocation': nsx_constants.NSX_VERSION_3_0_0} class RouterLocaleServiceDef(ResourceDef): @staticmethod def resource_type(): return 'LocaleServices' def get_obj_dict(self): body = super(RouterLocaleServiceDef, self).get_obj_dict() self._set_attr_if_specified(body, 'edge_cluster_path') return body class Tier0LocaleServiceDef(RouterLocaleServiceDef): @property def path_pattern(self): return TIER0_LOCALE_SERVICES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'tier0_id', 'service_id') @property def version_dependant_attr_map(self): return {'route_redistribution_config': nsx_constants.NSX_VERSION_3_0_0} def path_defs(self): return (TenantDef, Tier0Def) def get_obj_dict(self): body = super(Tier0LocaleServiceDef, self).get_obj_dict() if (self.has_attr('route_redistribution_config') and self._version_dependant_attr_supported( 'route_redistribution_config')): config = self.get_attr('route_redistribution_config') body['route_redistribution_config'] = ( config.get_obj_dict() if isinstance(config, Tier0RouteRedistributionConfig) else config) return body class Tier1LocaleServiceDef(RouterLocaleServiceDef): @property def path_pattern(self): return TIER1_LOCALE_SERVICES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'tier1_id', 'service_id') def path_defs(self): return (TenantDef, Tier1Def) class Tier0InterfaceDef(ResourceDef): @staticmethod def resource_type(): return 'Tier0Interface' @property def path_pattern(self): return TIER0_LOCALE_SERVICES_PATH_PATTERN + "%s/interfaces/" @property def path_ids(self): return ('tenant', 'tier0_id', 'service_id', 'interface_id') class Tier1InterfaceDef(ResourceDef): @staticmethod def resource_type(): return 'Tier1Interface' @property def path_pattern(self): return TIER1_LOCALE_SERVICES_PATH_PATTERN + "%s/interfaces/" def get_obj_dict(self): body = super(Tier1InterfaceDef, self).get_obj_dict() if self.has_attr('subnets'): # subnets expected to be of type InterfaceSubnet if self.get_attr('subnets'): subnets = [subnet.get_obj_dict() if isinstance(subnet, InterfaceSubnet) else subnet for subnet in self.get_attr('subnets')] self._set_attr_if_specified(body, 'subnets', value=subnets) if self.has_attr('segment_id'): path = "" if self.get_attr('segment_id'): tier1 = SegmentDef(segment_id=self.get_attr('segment_id'), tenant=self.get_tenant()) path = tier1.get_resource_full_path() self._set_attr_if_specified(body, 'segment_id', body_attr='segment_path', value=path) if self.has_attr('ipv6_ndra_profile_id'): if self.get_attr('ipv6_ndra_profile_id'): ndra_profile = Ipv6NdraProfileDef( profile_id=self.get_attr('ipv6_ndra_profile_id'), tenant=self.get_tenant()) else: # Set it to the default profile # This will allow removing the old profile, # as the NSX does not support empty value. ndra_profile = Ipv6NdraProfileDef( profile_id=Ipv6NdraProfileDef.default_profile(), tenant=self.get_tenant()) paths = [ndra_profile.get_resource_full_path()] self._set_attr_if_specified(body, 'ipv6_ndra_profile_id', body_attr='ipv6_profile_paths', value=paths) return body @property def path_ids(self): return ('tenant', 'tier1_id', 'service_id', 'interface_id') class RouterNatRule(ResourceDef): @staticmethod def resource_type(): return 'PolicyNatRule' def get_obj_dict(self): body = super(RouterNatRule, self).get_obj_dict() self._set_attrs_if_specified(body, ['action', 'source_network', 'destination_network', 'translated_network', 'firewall_match', 'log', 'sequence_number', 'enabled']) return body def set_default_mandatory_vals(self): if not self.has_attr('action'): self.attrs['action'] = constants.NAT_ACTION_DNAT class Tier1NatDef(RouterDef): @property def path_pattern(self): return TIER1S_PATH_PATTERN + "%s/nat" @property def path_ids(self): return ('tenant', 'tier1_id') @staticmethod def resource_type(): return 'PolicyNat' class Tier1NatRule(RouterNatRule): @property def path_pattern(self): return TIER1S_PATH_PATTERN + "%s/nat/%s/nat-rules/" @property def path_ids(self): return ('tenant', 'tier1_id', 'nat_id', 'nat_rule_id') def path_defs(self): return (TenantDef, Tier1Def, Tier1NatDef) class RouteAdvertisementRule(object): def __init__(self, name, action=constants.ADV_RULE_PERMIT, prefix_operator=constants.ADV_RULE_OPERATOR_GE, route_advertisement_types=None, subnets=None): self.name = name self.action = action self.prefix_operator = prefix_operator self.route_advertisement_types = route_advertisement_types self.subnets = subnets def get_obj_dict(self): return {'name': self.name, 'action': self.action, 'prefix_operator': self.prefix_operator, 'route_advertisement_types': self.route_advertisement_types, 'subnets': self.subnets} class RouterStaticRoute(ResourceDef): @staticmethod def resource_type(): return 'StaticRoutes' def get_obj_dict(self): body = super(RouterStaticRoute, self).get_obj_dict() self._set_attrs_if_specified(body, ['network']) # next hops if self.has_attr('next_hop'): next_hop = self.get_attr('next_hop') next_hops = [{'ip_address': next_hop}] self._set_attr_if_specified(body, 'next_hop', body_attr='next_hops', value=next_hops) return body class Tier1StaticRoute(RouterStaticRoute): @property def path_pattern(self): return TIER1S_PATH_PATTERN + "%s/static-routes/" @property def path_ids(self): return ('tenant', 'tier1_id', 'static_route_id') def path_defs(self): return (TenantDef, Tier1Def) class Tier0StaticRoute(RouterStaticRoute): @property def path_pattern(self): return TIER0S_PATH_PATTERN + "%s/static-routes/" @property def path_ids(self): return ('tenant', 'tier0_id', 'static_route_id') def path_defs(self): return (TenantDef, Tier0Def) class Tier0NatDef(RouterDef): @property def path_pattern(self): return TIER0S_PATH_PATTERN + "%s/nat" @property def path_ids(self): return ('tenant', 'tier0_id') @staticmethod def resource_type(): return 'PolicyNat' class Tier0NatRule(RouterNatRule): @property def path_pattern(self): return TIER0S_PATH_PATTERN + "%s/nat/%s/nat-rules/" @property def path_ids(self): return ('tenant', 'tier0_id', 'nat_id', 'nat_rule_id') def path_defs(self): return (TenantDef, Tier0Def, Tier0NatDef) class Subnet(object): def __init__(self, gateway_address, dhcp_ranges=None, dhcp_config=None): self.gateway_address = gateway_address self.dhcp_ranges = dhcp_ranges self.dhcp_config = dhcp_config def get_obj_dict(self): body = {'gateway_address': self.gateway_address} if self.dhcp_ranges: body['dhcp_ranges'] = self.dhcp_ranges if self.dhcp_config: body['dhcp_config'] = ( self.dhcp_config.get_obj_dict() if isinstance(self.dhcp_config, SegmentDhcpConfig) else self.dhcp_config) return body class SegmentDhcpConfig(object): def __init__(self, server_address=None, dns_servers=None, lease_time=None, options=None, is_ipv6=False): if is_ipv6: self.resource_type = 'SegmentDhcpV6Config' else: self.resource_type = 'SegmentDhcpV4Config' self.server_address = server_address self.dns_servers = dns_servers self.lease_time = lease_time self.options = options def get_obj_dict(self): body = {'resource_type': self.resource_type} if self.server_address: body['server_address'] = self.server_address if self.dns_servers: body['dns_servers'] = self.dns_servers if self.lease_time: body['lease_time'] = self.lease_time if self.options: body['options'] = ( self.options.get_obj_dict() if isinstance(self.options, DhcpOptions) else self.options) return body class DhcpOptions(object): def __init__(self, option_121=None, others=None, is_ipv6=False): if is_ipv6: self.resource_type = 'DhcpV6Options' else: self.resource_type = 'DhcpV4Options' self.option_121 = option_121 self.others = others def get_obj_dict(self): body = {'resource_type': self.resource_type} if self.option_121: body['option_121'] = self.option_121 if self.others: body['others'] = self.others return body class InterfaceSubnet(object): def __init__(self, ip_addresses, prefix_len): self.ip_addresses = ip_addresses self.prefix_len = prefix_len def get_obj_dict(self): body = {'ip_addresses': self.ip_addresses, 'prefix_len': self.prefix_len} return body class BaseSegmentDef(ResourceDef): def get_obj_dict(self): body = super(BaseSegmentDef, self).get_obj_dict() if self.has_attr('subnets'): subnets = [] if self.get_attr('subnets'): subnets = [subnet.get_obj_dict() for subnet in self.get_attr('subnets')] self._set_attr_if_specified(body, 'subnets', value=subnets) if self.has_attr('ip_pool_id'): ip_pool_id = self.get_attr('ip_pool_id') adv_cfg = self._get_adv_config(ip_pool_id) self._set_attr_if_specified(body, 'ip_pool_id', body_attr='advanced_config', value=adv_cfg) self._set_attrs_if_specified(body, ['domain_name', 'vlan_ids']) return body @staticmethod def resource_type(): return 'Segment' def _get_adv_config(self, ip_pool_id): ip_pool_def = IpPoolDef(ip_pool_id=ip_pool_id) ip_pool_path = ip_pool_def.get_resource_full_path() return {'address_pool_paths': [ip_pool_path]} class Tier1SegmentDef(BaseSegmentDef): '''Tier1 segments can not move to different tier1 ''' @property def path_pattern(self): return TIER1S_PATH_PATTERN + "%s/segments/" @property def path_ids(self): return ('tenant', 'tier1_id', 'segment_id') def path_defs(self): return (TenantDef, Tier1Def) class SegmentDef(BaseSegmentDef): '''These segments don't belong to particular tier1. And can be attached and re-attached to different tier1s ''' @property def path_pattern(self): return SEGMENTS_PATH_PATTERN @property def path_ids(self): return ('tenant', 'segment_id') def path_defs(self): return (TenantDef,) @property def version_dependant_attr_map(self): return {'metadata_proxy_id': nsx_constants.NSX_VERSION_3_0_0, 'dhcp_server_config_id': nsx_constants.NSX_VERSION_3_0_0, 'admin_state': nsx_constants.NSX_VERSION_3_0_0} def get_obj_dict(self): body = super(SegmentDef, self).get_obj_dict() if self.has_attr('tier1_id'): path = "" if self.get_attr('tier1_id'): tier1 = Tier1Def(tier1_id=self.get_attr('tier1_id'), tenant=self.get_tenant()) path = tier1.get_resource_full_path() self._set_attr_if_specified(body, 'tier1_id', body_attr='connectivity_path', value=path) if self.has_attr('tier0_id'): path = "" if self.get_attr('tier0_id'): tier0 = Tier0Def(tier0_id=self.get_attr('tier0_id'), tenant=self.get_tenant()) path = tier0.get_resource_full_path() self._set_attr_if_specified(body, 'tier0_id', body_attr='connectivity_path', value=path) if self.has_attr('transport_zone_id'): path = "" if self.get_attr('transport_zone_id'): tz = TransportZoneDef( tz_id=self.get_attr('transport_zone_id'), ep_id=constants.DEFAULT_ENFORCEMENT_POINT, tenant=self.get_tenant()) path = tz.get_resource_full_path() self._set_attr_if_specified(body, 'transport_zone_id', body_attr='transport_zone_path', value=path) if (self.has_attr('metadata_proxy_id') and self._version_dependant_attr_supported('metadata_proxy_id')): # To remove the metadata proxy, paths must be set to None paths = None if self.get_attr('metadata_proxy_id'): mdproxy = MetadataProxyDef( mdproxy_id=self.get_attr('metadata_proxy_id'), tenant=self.get_tenant()) paths = [mdproxy.get_resource_full_path()] self._set_attr_if_specified(body, 'metadata_proxy_id', body_attr='metadata_proxy_paths', value=paths) # TODO(asarfaty): Also support relay config here if (self.has_attr('dhcp_server_config_id') and self._version_dependant_attr_supported('dhcp_server_config_id')): # To remove the dhcp config, path must be set to None path = None if self.get_attr('dhcp_server_config_id'): dhcp_config = DhcpServerConfigDef( config_id=self.get_attr('dhcp_server_config_id'), tenant=self.get_tenant()) path = dhcp_config.get_resource_full_path() self._set_attr_if_specified(body, 'dhcp_server_config_id', body_attr='dhcp_config_path', value=path) if (self.has_attr('admin_state') and self._version_dependant_attr_supported('admin_state')): if self.get_attr('admin_state'): admin_state = nsx_constants.ADMIN_STATE_UP else: admin_state = nsx_constants.ADMIN_STATE_DOWN self._set_attr_if_specified(body, 'admin_state', value=admin_state) return body class DhcpV4StaticBindingConfig(ResourceDef): @property def path_pattern(self): return SEGMENTS_PATH_PATTERN + "%s/dhcp-static-binding-configs/" @property def path_ids(self): return ('tenant', 'segment_id', 'binding_id') @staticmethod def resource_type(): return 'DhcpV4StaticBindingConfig' def path_defs(self): return (TenantDef, SegmentDef) def get_obj_dict(self): body = super(DhcpV4StaticBindingConfig, self).get_obj_dict() # TODO(asarfaty): add object or v4/6 options self._set_attrs_if_specified(body, ['gateway_address', 'host_name', 'ip_address', 'lease_time', 'mac_address', 'options']) return body class DhcpV6StaticBindingConfig(DhcpV4StaticBindingConfig): @staticmethod def resource_type(): return 'DhcpV6StaticBindingConfig' def path_defs(self): return (TenantDef, SegmentDef) def get_obj_dict(self): body = super(DhcpV6StaticBindingConfig, self).get_obj_dict() self._set_attrs_if_specified(body, ['domain_names', 'dns_nameservers', 'ip_addresses', 'sntp_servers', 'preferred_time']) return body class PortAddressBinding(object): def __init__(self, ip_address, mac_address, vlan_id=None): self.ip_address = ip_address self.mac_address = mac_address self.vlan_id = vlan_id def get_obj_dict(self): data = {'ip_address': self.ip_address, 'mac_address': self.mac_address} if self.vlan_id is not None: data['vlan_id'] = self.vlan_id return data class SegmentPortDef(ResourceDef): '''Infra segment port''' @property def path_pattern(self): return SEGMENTS_PATH_PATTERN + "%s/ports/" @property def path_ids(self): return ('tenant', 'segment_id', 'port_id') @staticmethod def resource_type(): return 'SegmentPort' def path_defs(self): return (TenantDef, SegmentDef) def get_obj_dict(self): body = super(SegmentPortDef, self).get_obj_dict() address_bindings = self.get_attr('address_bindings') if address_bindings: body['address_bindings'] = [binding.get_obj_dict() for binding in address_bindings] if (self.has_attr('attachment_type') or self.has_attr('vif_id') or self.has_attr('hyperbus_mode')): if (not self.get_attr('attachment_type') and not self.get_attr('vif_id') and not self.get_attr('hyperbus_mode')): # detach operation body['attachment'] = None else: attachment = {} if self.get_attr('attachment_type'): attachment['type'] = self.get_attr('attachment_type') if self.get_attr('vif_id'): attachment['id'] = self.get_attr('vif_id') if self.get_attr('hyperbus_mode'): self._set_attr_if_supported(attachment, 'hyperbus_mode') self._set_attrs_if_specified(attachment, ['context_id', 'app_id', 'traffic_tag', 'allocate_addresses']) body['attachment'] = attachment if (self.has_attr('admin_state') and self._version_dependant_attr_supported('admin_state')): if self.get_attr('admin_state'): admin_state = nsx_constants.ADMIN_STATE_UP else: admin_state = nsx_constants.ADMIN_STATE_DOWN self._set_attr_if_specified(body, 'admin_state', value=admin_state) return body @property def version_dependant_attr_map(self): return {'hyperbus_mode': nsx_constants.NSX_VERSION_3_0_0, 'admin_state': nsx_constants.NSX_VERSION_3_0_0} class SegmentBindingMapDefBase(ResourceDef): @property def path_ids(self): return ('tenant', 'segment_id', 'map_id') def path_defs(self): return (TenantDef, SegmentDef) class SegmentSecProfilesBindingMapDef(SegmentBindingMapDefBase): @property def path_pattern(self): return (SEGMENTS_PATH_PATTERN + "%s/segment-security-profile-binding-maps/") @staticmethod def resource_type(): return 'SegmentSecurityProfileBindingMap' def get_obj_dict(self): body = super(SegmentSecProfilesBindingMapDef, self).get_obj_dict() if self.has_attr('segment_security_profile_id'): path = "" if self.get_attr('segment_security_profile_id'): profile = SegmentSecurityProfileDef( profile_id=self.get_attr('segment_security_profile_id'), tenant=self.get_tenant()) path = profile.get_resource_full_path() self._set_attr_if_specified( body, 'segment_security_profile_id', body_attr='segment_security_profile_path', value=path) if self.has_attr('spoofguard_profile_id'): path = "" if self.get_attr('spoofguard_profile_id'): profile = SpoofguardProfileDef( profile_id=self.get_attr('spoofguard_profile_id'), tenant=self.get_tenant()) path = profile.get_resource_full_path() self._set_attr_if_specified( body, 'spoofguard_profile_id', body_attr='spoofguard_profile_path', value=path) return body class SegmentPortBindingMapDefBase(ResourceDef): @property def path_ids(self): return ('tenant', 'segment_id', 'port_id', 'map_id') def path_defs(self): return (TenantDef, SegmentDef, SegmentPortDef) class SegmentPortSecProfilesBindingMapDef(SegmentPortBindingMapDefBase): @property def path_pattern(self): return (SEGMENTS_PATH_PATTERN + "%s/ports/%s/port-security-profile-binding-maps/") @staticmethod def resource_type(): return 'PortSecurityProfileBindingMap' def get_obj_dict(self): body = super(SegmentPortSecProfilesBindingMapDef, self).get_obj_dict() if self.has_attr('segment_security_profile_id'): path = "" if self.get_attr('segment_security_profile_id'): profile = SegmentSecurityProfileDef( profile_id=self.get_attr('segment_security_profile_id'), tenant=self.get_tenant()) path = profile.get_resource_full_path() self._set_attr_if_specified( body, 'segment_security_profile_id', body_attr='segment_security_profile_path', value=path) if self.has_attr('spoofguard_profile_id'): path = "" if self.get_attr('spoofguard_profile_id'): profile = SpoofguardProfileDef( profile_id=self.get_attr('spoofguard_profile_id'), tenant=self.get_tenant()) path = profile.get_resource_full_path() self._set_attr_if_specified( body, 'spoofguard_profile_id', body_attr='spoofguard_profile_path', value=path) return body class SegmentPortDiscoveryProfilesBindingMapDef(SegmentPortBindingMapDefBase): @property def path_pattern(self): return (SEGMENTS_PATH_PATTERN + "%s/ports/%s/port-discovery-profile-binding-maps/") @staticmethod def resource_type(): return 'PortDiscoveryProfileBindingMap' def get_obj_dict(self): body = super(SegmentPortDiscoveryProfilesBindingMapDef, self).get_obj_dict() if self.has_attr('mac_discovery_profile_id'): path = "" if self.get_attr('mac_discovery_profile_id'): profile = MacDiscoveryProfileDef( profile_id=self.get_attr('mac_discovery_profile_id'), tenant=self.get_tenant()) path = profile.get_resource_full_path() self._set_attr_if_specified( body, 'mac_discovery_profile_id', body_attr='mac_discovery_profile_path', value=path) if self.has_attr('ip_discovery_profile_id'): path = "" if self.get_attr('ip_discovery_profile_id'): profile = IpDiscoveryProfileDef( profile_id=self.get_attr('ip_discovery_profile_id'), tenant=self.get_tenant()) path = profile.get_resource_full_path() self._set_attr_if_specified( body, 'ip_discovery_profile_id', body_attr='ip_discovery_profile_path', value=path) return body class SegmentPortQoSProfilesBindingMapDef(SegmentPortBindingMapDefBase): @property def path_pattern(self): return (SEGMENTS_PATH_PATTERN + "%s/ports/%s/port-qos-profile-binding-maps/") @staticmethod def resource_type(): return 'PortQoSProfileBindingMap' def get_obj_dict(self): body = super(SegmentPortQoSProfilesBindingMapDef, self).get_obj_dict() if self.has_attr('qos_profile_id'): path = "" if self.get_attr('qos_profile_id'): profile = QosProfileDef( profile_id=self.get_attr('qos_profile_id'), tenant=self.get_tenant()) path = profile.get_resource_full_path() self._set_attr_if_specified( body, 'qos_profile_id', body_attr='qos_profile_path', value=path) return body class Tier1SegmentPortDef(SegmentPortDef): '''Tier1 segment port''' @property def path_pattern(self): return TIER1S_PATH_PATTERN + "%s/segments/%s/ports/" @property def path_ids(self): return ('tenant', 'tier1_id', 'segment_id', 'port_id') def path_defs(self): return (TenantDef, Tier1Def, SegmentDef) class IpBlockDef(ResourceDef): '''Infra IpBlock''' @property def path_pattern(self): return IP_BLOCKS_PATH_PATTERN @property def path_ids(self): return ('tenant', 'ip_block_id') @staticmethod def resource_type(): return 'IpAddressBlock' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(IpBlockDef, self).get_obj_dict() self._set_attr_if_specified(body, 'cidr') return body class IpPoolDef(ResourceDef): '''Infra IpPool''' @property def path_pattern(self): return IP_POOLS_PATH_PATTERN @property def path_ids(self): return ('tenant', 'ip_pool_id') @staticmethod def resource_type(): return 'IpAddressPool' def path_defs(self): return (TenantDef,) class IpPoolAllocationDef(ResourceDef): '''Infra IpPoolAllocation''' @property def path_pattern(self): return IP_POOLS_PATH_PATTERN + "%s/ip-allocations/" @property def path_ids(self): return ('tenant', 'ip_pool_id', 'ip_allocation_id') @staticmethod def resource_type(): return 'IpAddressAllocation' def path_defs(self): return (TenantDef, IpPoolDef) def get_obj_dict(self): body = super(IpPoolAllocationDef, self).get_obj_dict() self._set_attr_if_specified(body, 'allocation_ip') return body class IpPoolSubnetDef(ResourceDef): '''Infra IpPool Subnet''' @property def path_pattern(self): return IP_POOLS_PATH_PATTERN + "%s/ip-subnets/" @property def path_ids(self): return ('tenant', 'ip_pool_id', 'ip_subnet_id') @classmethod def resource_class(cls): return 'IpAddressPoolSubnet' def path_defs(self): return (TenantDef, IpPoolDef) class IpPoolBlockSubnetDef(IpPoolSubnetDef): '''Infra IpPoolSubnet belonging to IpBlock''' @staticmethod def resource_type(): return 'IpAddressPoolBlockSubnet' def get_obj_dict(self): body = super(IpPoolBlockSubnetDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['auto_assign_gateway', 'size']) if self.has_attr('ip_block_id'): # Format the IP Block ID to its path ip_block_id = self.get_attr('ip_block_id') ip_block_def = IpBlockDef(ip_block_id=ip_block_id, tenant=self.get_tenant()) ip_block_path = ip_block_def.get_resource_full_path() self._set_attr_if_specified( body, 'ip_block_id', body_attr='ip_block_path', value=ip_block_path) self._set_attr_if_supported(body, 'start_ip') return body @property def version_dependant_attr_map(self): return {'start_ip': nsx_constants.NSX_VERSION_3_0_0} class IpPoolStaticSubnetDef(IpPoolSubnetDef): '''Infra IpPool static subnet''' @staticmethod def resource_type(): return 'IpAddressPoolStaticSubnet' def get_obj_dict(self): body = super(IpPoolStaticSubnetDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['cidr', 'allocation_ranges', 'gateway_ip']) return body class Condition(object): def __init__(self, value, key=constants.CONDITION_KEY_TAG, member_type=constants.CONDITION_MEMBER_PORT, operator=constants.CONDITION_OP_EQUALS): self.value = value self.key = key self.member_type = member_type self.operator = operator def get_obj_dict(self): return {'resource_type': 'Condition', 'member_type': self.member_type, 'key': self.key, 'value': self.value, 'operator': self.operator} class IPAddressExpression(object): def __init__(self, ip_addresses): self.ip_addresses = ip_addresses def get_obj_dict(self): return {'resource_type': 'IPAddressExpression', 'ip_addresses': self.ip_addresses} class PathExpression(object): def __init__(self, paths): self.paths = paths def get_obj_dict(self): return {'resource_type': 'PathExpression', 'paths': self.paths} class ConjunctionOperator(object): def __init__(self, operator=constants.CONDITION_OP_AND): self.operator = operator def get_obj_dict(self): return {'resource_type': 'ConjunctionOperator', 'conjunction_operator': self.operator} class NestedExpression(object): def __init__(self, expressions=None): self.expressions = expressions or [] def get_obj_dict(self): return {'resource_type': 'NestedExpression', 'expressions': [ex.get_obj_dict() for ex in self.expressions]} class GroupDef(ResourceDef): @property def path_pattern(self): return DOMAINS_PATH_PATTERN + "%s/groups/" @property def path_ids(self): return ('tenant', 'domain_id', 'group_id') @staticmethod def resource_type(): return 'Group' def path_defs(self): return (TenantDef, DomainDef) def get_obj_dict(self): body = super(GroupDef, self).get_obj_dict() conds = self.get_attr('conditions') # If conditions were IGNORE, conds would be None here. # Otherwise, conds could be an empty list which denotes # updating group expression to empty list. if conds is not None: conds = conds if isinstance(conds, list) else [conds] body['expression'] = [condition.get_obj_dict() for condition in conds] return body class ServiceDef(ResourceDef): def __init__(self, **kwargs): super(ServiceDef, self).__init__(**kwargs) self.service_entries = [] @property def path_pattern(self): return SERVICES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'service_id') @staticmethod def resource_type(): return 'Service' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(ServiceDef, self).get_obj_dict() entries = [entry.get_obj_dict() for entry in self.service_entries] if entries: body['service_entries'] = entries return body @staticmethod def sub_entries_path(): return ServiceEntryDef().get_last_section_dict_key class ServiceEntryDef(ResourceDef): @property def path_pattern(self): return SERVICES_PATH_PATTERN + "%s/service-entries/" @property def path_ids(self): return ('tenant', 'service_id', 'entry_id') def path_defs(self): return (TenantDef, ServiceDef) @classmethod def resource_class(cls): return 'ServiceEntry' class L4ServiceEntryDef(ServiceEntryDef): @staticmethod def resource_type(): return 'L4PortSetServiceEntry' def get_obj_dict(self): body = super(L4ServiceEntryDef, self).get_obj_dict() self._set_attr_if_specified(body, 'protocol', 'l4_protocol') self._set_attr_if_specified(body, 'dest_ports', 'destination_ports') self._set_attr_if_specified(body, 'source_ports', 'source_ports') return body class IcmpServiceEntryDef(ServiceEntryDef): @staticmethod def resource_type(): return 'ICMPTypeServiceEntry' def get_obj_dict(self): body = super(IcmpServiceEntryDef, self).get_obj_dict() if self.get_attr('version'): body['protocol'] = 'ICMPv' + str(self.get_attr('version')) for attr in ('icmp_type', 'icmp_code'): # Note that icmp_type and icmp_code could be 0. if self.get_attr(attr) is not None: body[attr] = self.get_attr(attr) return body class IPProtocolServiceEntryDef(ServiceEntryDef): @staticmethod def resource_type(): return 'IPProtocolServiceEntry' def get_obj_dict(self): body = super(IPProtocolServiceEntryDef, self).get_obj_dict() if self.get_attr('protocol_number') is not None: # Note that protocol_number could be 0. body['protocol_number'] = self.get_attr('protocol_number') return body class SecurityPolicyBaseDef(ResourceDef): @property def path_ids(self): return ('tenant', 'domain_id', 'map_id') def path_defs(self): return (TenantDef, DomainDef) def get_obj_dict(self): body = super(SecurityPolicyBaseDef, self).get_obj_dict() self._set_attr_if_specified(body, 'category') if self.has_attr('map_sequence_number'): seq_number = self.get_attr('map_sequence_number') self._set_attr_if_specified(body, 'map_sequence_number', body_attr='sequence_number', value=seq_number) return body class CommunicationMapDef(SecurityPolicyBaseDef): """AKA security policy""" @property def path_pattern(self): return (DOMAINS_PATH_PATTERN + "%s/security-policies/") @staticmethod def resource_type(): return 'SecurityPolicy' @staticmethod def sub_entries_path(): return CommunicationMapEntryDef().get_last_section_dict_key class GatewayPolicyDef(SecurityPolicyBaseDef): @property def path_pattern(self): return (DOMAINS_PATH_PATTERN + "%s/gateway-policies/") @staticmethod def resource_type(): return 'GatewayPolicy' @staticmethod def sub_entries_path(): return GatewayPolicyRuleDef().get_last_section_dict_key class SecurityPolicyRuleBaseDef(ResourceDef): def get_groups_path(self, domain_id, group_ids): if not group_ids: return [constants.ANY_GROUP] return [GroupDef(domain_id=domain_id, group_id=group_id, tenant=self.get_tenant()).get_resource_full_path() for group_id in group_ids] def get_service_path(self, service_id): return ServiceDef( service_id=service_id, tenant=self.get_tenant()).get_resource_full_path() def get_services_path(self, service_ids): if service_ids: return [self.get_service_path(service_id) for service_id in service_ids] return [constants.ANY_SERVICE] @property def path_ids(self): return ('tenant', 'domain_id', 'map_id', 'entry_id') @staticmethod def resource_type(): return 'Rule' def get_obj_dict(self): body = super(SecurityPolicyRuleBaseDef, self).get_obj_dict() domain_id = self.get_attr('domain_id') if self.has_attr('source_groups'): body['source_groups'] = self.get_groups_path( domain_id, self.get_attr('source_groups')) if self.has_attr('dest_groups'): body['destination_groups'] = self.get_groups_path( domain_id, self.get_attr('dest_groups')) self._set_attrs_if_specified(body, ['sequence_number', 'scope', 'action', 'direction', 'logged', 'ip_protocol', 'tag']) if self.has_attr('service_ids'): service_ids = self.get_attr('service_ids') body['services'] = self.get_services_path(service_ids) self._set_attr_if_supported(body, 'service_entries') return body @classmethod def adapt_from_rule_dict(cls, rule_dict, domain_id, map_id): entry_id = rule_dict.pop('id', None) name = rule_dict.pop('display_name', None) rule_def = cls(tenant=constants.POLICY_INFRA_TENANT, domain_id=domain_id, map_id=map_id, entry_id=entry_id, name=name) rule_def.set_obj_dict(rule_dict) return rule_def @property def version_dependant_attr_map(self): return {'service_entries': nsx_constants.NSX_VERSION_3_0_0} class CommunicationMapEntryDef(SecurityPolicyRuleBaseDef): @property def path_pattern(self): return (DOMAINS_PATH_PATTERN + "%s/security-policies/%s/rules/") def path_defs(self): return (TenantDef, DomainDef, CommunicationMapDef) class GatewayPolicyRuleDef(SecurityPolicyRuleBaseDef): @property def path_pattern(self): return (DOMAINS_PATH_PATTERN + "%s/gateway-policies/%s/rules/") def path_defs(self): return (TenantDef, DomainDef, GatewayPolicyDef) # Currently supports only NSXT class EnforcementPointDef(ResourceDef): @property def path_pattern(self): return ENFORCEMENT_POINT_PATTERN @property def path_ids(self): return ('tenant', 'ep_id') @staticmethod def resource_type(): return 'EnforcementPoint' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(EnforcementPointDef, self).get_obj_dict() body['id'] = self.get_id() if 'connection_info' not in body: body['connection_info'] = {'resource_type': 'NSXTConnectionInfo'} info = body['connection_info'] self._set_attrs_if_specified(info, ['thumbprint', 'username', 'password', 'ip_address']) if self.get_attr('ip_address'): info['enforcement_point_address'] = self.get_attr('ip_address') if self.get_attr('edge_cluster_id'): body['connection_info']['edge_cluster_ids'] = [ self.get_attr('edge_cluster_id')] if self.get_attr('transport_zone_id'): body['connection_info']['transport_zone_ids'] = [ self.get_attr('transport_zone_id')] return body class TransportZoneDef(ResourceDef): @property def path_pattern(self): return TRANSPORT_ZONE_PATTERN @property def path_ids(self): return ('tenant', 'ep_id', 'tz_id') @staticmethod def resource_type(): return 'PolicyTransportZone' @staticmethod def resource_use_cache(): return True class EdgeClusterDef(ResourceDef): @property def path_pattern(self): return EDGE_CLUSTER_PATTERN @property def path_ids(self): return ('tenant', 'ep_id', 'ec_id') @staticmethod def resource_type(): return 'PolicyEdgeCluster' @staticmethod def resource_use_cache(): return True class EdgeClusterNodeDef(ResourceDef): @property def path_pattern(self): return (EDGE_CLUSTER_PATTERN + '%s/edge-nodes/') @property def path_ids(self): return ('tenant', 'ep_id', 'ec_id', 'node_id') @staticmethod def resource_type(): return 'PolicyEdgeNode' @staticmethod def resource_use_cache(): return True # Currently assumes one deployment point per id class DeploymentMapDef(ResourceDef): @property def path_pattern(self): return (DOMAINS_PATH_PATTERN + '%s/domain-deployment-maps/') @property def path_ids(self): return ('tenant', 'domain_id', 'map_id') @staticmethod def resource_type(): return 'DeploymentMap' def path_defs(self): return (TenantDef, DomainDef) def get_obj_dict(self): body = super(DeploymentMapDef, self).get_obj_dict() body['id'] = self.get_id() ep_id = self.get_attr('ep_id') tenant = self.get_tenant() body['enforcement_point_path'] = EnforcementPointDef( ep_id=ep_id, tenant=tenant).get_resource_full_path() if ep_id else None return body class SegmentSecurityProfileDef(ResourceDef): DEFAULT_PROFILE = 'default-segment-security-profile' @property def path_pattern(self): return SEGMENT_SECURITY_PROFILES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'profile_id') @staticmethod def resource_type(): return 'SegmentSecurityProfile' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(SegmentSecurityProfileDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['bpdu_filter_enable', 'dhcp_client_block_enabled', 'dhcp_client_block_v6_enabled', 'dhcp_server_block_enabled', 'dhcp_server_block_v6_enabled', 'non_ip_traffic_block_enabled', 'ra_guard_enabled', 'rate_limits_enabled']) return body class QoSObjectBase(object): keys = [] def __init__(self, **kwargs): self.attrs = kwargs def get_obj_dict(self): obj_dict = {} for key in self.attrs: if key in self.keys: obj_dict[key] = self.attrs[key] return obj_dict class QoSRateLimiter(QoSObjectBase): INGRESS_RATE_LIMITER_TYPE = 'IngressRateLimiter' EGRESS_RATE_LIMITER_TYPE = 'EgressRateLimiter' INGRESS_BRD_RATE_LIMITER_TYPE = 'IngressBroadcastRateLimiter' keys = ['resource_type', 'average_bandwidth', # Mb/s 'peak_bandwidth', # Mb/s 'burst_size', # byes 'enabled' ] class QoSDscp(QoSObjectBase): QOS_DSCP_TRUSTED = 'TRUSTED' QOS_DSCP_UNTRUSTED = 'UNTRUSTED' keys = ['mode', 'priority'] class QosProfileDef(ResourceDef): @property def path_pattern(self): return QOS_PROFILES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'profile_id') @staticmethod def resource_type(): return 'QoSProfile' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(QosProfileDef, self).get_obj_dict() self._set_attr_if_specified(body, 'class_of_service') if self.has_attr('dscp'): value = None if self.get_attr('dscp'): value = self.get_attr('dscp').get_obj_dict() self._set_attr_if_specified(body, 'dscp', value=value) if self.has_attr('shaper_configurations'): value = None if self.get_attr('shaper_configurations'): value = [s.get_obj_dict() for s in self.get_attr('shaper_configurations')] self._set_attr_if_specified(body, 'shaper_configurations', value=value) return body class SpoofguardProfileDef(ResourceDef): DEFAULT_PROFILE = 'default-spoofguard-profile' @property def path_pattern(self): return SPOOFGUARD_PROFILES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'profile_id') @staticmethod def resource_type(): return 'SpoofGuardProfile' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(SpoofguardProfileDef, self).get_obj_dict() # TODO(asarfaty): add all attributes here self._set_attr_if_specified(body, 'address_binding_whitelist') return body class IpDiscoveryProfileDef(ResourceDef): DEFAULT_PROFILE = 'default-ip-discovery-profile' @property def path_pattern(self): return IP_DISCOVERY_PROFILES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'profile_id') @staticmethod def resource_type(): return 'IPDiscoveryProfile' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(IpDiscoveryProfileDef, self).get_obj_dict() # TODO(asarfaty): add all attributes here. currently used for read only return body class MacDiscoveryProfileDef(ResourceDef): DEFAULT_PROFILE = 'default-mac-discovery-profile' @property def path_pattern(self): return MAC_DISCOVERY_PROFILES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'profile_id') @staticmethod def resource_type(): return 'MacDiscoveryProfile' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(MacDiscoveryProfileDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['mac_change_enabled', 'mac_learning_enabled', 'unknown_unicast_flooding_enabled', 'mac_limit_policy', 'mac_limit']) return body class Ipv6NdraProfileDef(ResourceDef): @property def path_pattern(self): return IPV6_NDRA_PROFILES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'profile_id') @staticmethod def resource_type(): return 'Ipv6NdraProfile' @staticmethod def default_profile(): return 'default' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(Ipv6NdraProfileDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['ra_mode', 'reachable_timer', 'retransmit_interval']) # Use default settings for dns and RA for now # TODO(annak): expose when required body['dns_config'] = {} body['ra_config'] = {} return body class DhcpRelayConfigDef(ResourceDef): @property def path_pattern(self): return DHCP_REALY_PATTERN @property def path_ids(self): return ('tenant', 'config_id') @staticmethod def resource_type(): return 'DhcpRelayConfig' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(DhcpRelayConfigDef, self).get_obj_dict() self._set_attr_if_specified(body, 'server_addresses') return body class DhcpServerConfigDef(ResourceDef): @property def path_pattern(self): return DHCP_SERVER_PATTERN @property def path_ids(self): return ('tenant', 'config_id') @staticmethod def resource_type(): return 'DhcpServerConfig' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(DhcpServerConfigDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['edge_cluster_path', 'server_addresses', 'lease_time']) return body class WAFProfileDef(ResourceDef): @property def path_pattern(self): return WAF_PROFILES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'profile_id') @staticmethod def resource_type(): return 'WAFProfile' def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(WAFProfileDef, self).get_obj_dict() # TODO(asarfaty): add all attributes here. # Currently used for read only return body class MetadataProxyDef(ResourceDef): @property def path_pattern(self): return MDPROXY_PATTERN @property def path_ids(self): return ('tenant', 'mdproxy_id') @staticmethod def resource_type(): return 'MetadataProxyConfig' @staticmethod def resource_use_cache(): return True def path_defs(self): return (TenantDef,) def get_obj_dict(self): body = super(MetadataProxyDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['edge_cluster_path', 'enable_standby_relocation', 'secret', 'server_address']) return body class CertificateDef(ResourceDef): @property def path_pattern(self): return CERTIFICATE_PATH_PATTERN @property def path_ids(self): return ('tenant', 'certificate_id') @staticmethod def resource_type(): return "TlsTrustData" def get_obj_dict(self): body = super(CertificateDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['pem_encoded', 'key_algo', 'private_key', 'passphrase']) return body class GlobalConfigDef(ResourceDef): @property def path_pattern(self): return GLOBAL_CONFIG_PATH_PATTERN @property def path_ids(self): # Adding dummy 2nd key to satisfy get_section_path # This resource has no keys, since it is a single object return ('tenant', 'dummy') @staticmethod def resource_type(): return "GlobalConfig" def get_obj_dict(self): body = super(GlobalConfigDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['l3_forwarding_mode']) return body class ExcludeListDef(ResourceDef): @property def path_pattern(self): return EXCLUDE_LIST_PATH_PATTERN @property def path_ids(self): # Adding dummy 2nd key to satisfy get_section_path # This resource has no keys, since it is a single object return ('tenant', 'Dummy') @staticmethod def resource_type(): return "PolicyExcludeList" def get_obj_dict(self): body = super(ExcludeListDef, self).get_obj_dict() self._set_attr_if_specified(body, 'members') return body class NsxPolicyApi(object): def __init__(self, client): self.client = client self.cache = utils.NsxLibCache(utils.DEFAULT_CACHE_AGE_SEC) self.partial_updates = True def disable_partial_updates(self): self.partial_updates = False def partial_updates_supported(self): return self.partial_updates def create_or_update(self, resource_def, partial_updates=False): """Create or update a policy object. This api will update an existing object, or create a new one if it doesn't exist. The policy API supports PATCH for create/update operations """ path = resource_def.get_resource_path() if resource_def.resource_use_cache(): self.cache.remove(path) body = resource_def.get_obj_dict() headers = None if partial_updates: headers = {'nsx-enable-partial-patch': 'true'} self.client.patch(path, body, headers=headers) def create_with_parent(self, parent_def, resource_def): path = parent_def.get_resource_path() body = parent_def.get_obj_dict() if isinstance(resource_def, list): child_dict_key = resource_def[0].get_last_section_dict_key body[child_dict_key] = [r.get_obj_dict() for r in resource_def] else: child_dict_key = resource_def.get_last_section_dict_key body[child_dict_key] = [resource_def.get_obj_dict()] self.client.patch(path, body) def delete(self, resource_def): path = resource_def.get_resource_path() if resource_def.resource_use_cache(): self.cache.remove(path) self.client.delete(path) def get(self, resource_def, silent=False): path = resource_def.get_resource_path() if resource_def.resource_use_cache(): # try to get it from the cache result = self.cache.get(path) if result: return result # call the client result = self.client.get(path, silent=silent) if resource_def.resource_use_cache(): # add the result to the cache self.cache.update(path, result) return result def list(self, resource_def, silent=False): path = resource_def.get_section_path() return self.client.list(path, silent=silent) def get_realized_entities(self, path, silent=False): return self.client.list(REALIZATION_PATH % path, silent=silent)['results'] def get_realized_entity(self, path, silent=False): # Return first realization entity if exists # Useful for resources with single realization entity entities = self.get_realized_entities(path, silent=silent) if entities: return entities[0] def get_realized_state(self, path, silent=False): entity = self.get_realized_entity(path, silent=silent) if entity: return entity['state'] class RouteMapEntry(object): def __init__(self, action, community_list_matches=None, prefix_list_matches=None, entry_set=None): self.action = action self.community_list_matches = community_list_matches self.prefix_list_matches = prefix_list_matches self.entry_set = entry_set def get_obj_dict(self): body = {'action': self.action} if self.community_list_matches: body['community_list_matches'] = [community.get_obj_dict() for community in self.community_list_matches] if self.prefix_list_matches: body['prefix_list_matches'] = ( self.prefix_list_matches if isinstance(self.prefix_list_matches, list) else [self.prefix_list_matches]) if self.entry_set: body['set'] = self.entry_set.get_obj_dict() return body class RouteMapEntrySet(object): def __init__(self, local_preference=100, as_path_prepend=None, community=None, med=None, weight=None): self.local_preference = local_preference self.as_path_prepend = as_path_prepend self.community = community self.med = med self.weight = weight def get_obj_dict(self): body = {'local_preference': self.local_preference} if self.as_path_prepend: body['as_path_prepend'] = self.as_path_prepend if self.community: body['community'] = self.community if self.med: body['med'] = self.med if self.weight: body['weight'] = self.weight return body class CommunityMatchCriteria(object): def __init__(self, criteria, match_operator=None): self.criteria = criteria self.match_operator = match_operator def get_obj_dict(self): body = {'criteria': self.criteria} if self.match_operator: body['match_operator'] = self.match_operator return body class Tier0RouteMapDef(ResourceDef): @property def path_pattern(self): return TIER0S_PATH_PATTERN + "%s/route-maps/" @property def path_ids(self): return ('tenant', 'tier0_id', 'route_map_id') @staticmethod def resource_type(): return 'Tier0RouteMap' def path_defs(self): return (TenantDef, Tier0Def) def get_obj_dict(self): body = super(Tier0RouteMapDef, self).get_obj_dict() entries = self.get_attr('entries') if entries: entries = [entry.get_obj_dict() if isinstance(entry, RouteMapEntry) else entry for entry in self.get_attr('entries')] body['entries'] = entries return body class PrefixEntry(object): def __init__(self, network, le=None, ge=None, action=constants.ADV_RULE_PERMIT): self.network = network self.le = le self.ge = ge self.action = action def get_obj_dict(self): body = {'network': self.network, 'action': self.action} if self.le is not None: body['le'] = self.le if self.ge is not None: body['ge'] = self.ge return body class Tier0PrefixListDef(ResourceDef): @property def path_pattern(self): return TIER0S_PATH_PATTERN + "%s/prefix-lists/" @property def path_ids(self): return ('tenant', 'tier0_id', 'prefix_list_id') @staticmethod def resource_type(): return 'PrefixList' def path_defs(self): return (TenantDef, Tier0Def) def get_obj_dict(self): body = super(Tier0PrefixListDef, self).get_obj_dict() prefixes = self.get_attr('prefixes') if prefixes: prefixes = [prefix.get_obj_dict() for prefix in prefixes] body['prefixes'] = prefixes return body class BgpRoutingConfigDef(ResourceDef): @staticmethod def resource_type(): return 'BgpRoutingConfig' @property def path_pattern(self): return TIER0_LOCALE_SERVICES_PATH_PATTERN + "%s/bgp" @property def path_ids(self): # Adding dummy key to satisfy get_section_path # This resource has no keys, since it is a single object return ('tenant', 'tier0_id', 'service_id', 'dummy') class Tier0RouteRedistributionConfig(object): def __init__(self, enabled=None, redistribution_rules=None): self.enabled = enabled self.redistribution_rules = redistribution_rules def get_obj_dict(self): body = {} if self.enabled: body['enabled'] = self.enabled if self.redistribution_rules is not None: rules = [rule.get_obj_dict() if isinstance(rule, Tier0RouteRedistributionRule) else rule for rule in self.redistribution_rules] body['redistribution_rules'] = rules return body class Tier0RouteRedistributionRule(object): def __init__(self, name=None, route_redistribution_types=None, route_map_path=None): self.name = name self.route_redistribution_types = route_redistribution_types or [] self.route_map_path = route_map_path def get_obj_dict(self): body = {'route_redistribution_types': self.route_redistribution_types} if self.name: body['name'] = self.name if self.route_map_path: body['route_map_path'] = self.route_map_path return body vmware-nsxlib-15.0.6/vmware_nsxlib/v3/policy/utils.py0000664000175000017500000000125113623151571022647 0ustar zuulzuul00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def path_to_id(path): return path.split('/')[-1] vmware-nsxlib-15.0.6/vmware_nsxlib/v3/policy/transaction.py0000664000175000017500000001614113623151571024040 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import threading from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3.policy import constants from vmware_nsxlib.v3.policy import core_defs class NsxPolicyTransactionException(exceptions.NsxLibException): message = _("Policy Transaction Error: %(msg)s") class NsxPolicyTransaction(object): # stores current transaction per thread # nested transactions not supported data = threading.local() def __init__(self): # For now only infra tenant is supported self.defs = [core_defs.TenantDef( tenant=constants.POLICY_INFRA_TENANT)] self.client = None def __enter__(self): if self.get_current(): raise NsxPolicyTransactionException( "Nested transactions not supported") self.data.instance = self return self def __exit__(self, e_type, e_value, e_traceback): # Always reset transaction regardless of exceptions self.data.instance = None if e_type: # If exception occured in the "with" block, raise it # without applying to backend return False # exception might happen here and will be raised self.apply_defs() def store_def(self, resource_def, client): if self.client and client != self.client: raise NsxPolicyTransactionException( "All operations under transaction must have same client") self.client = client # TODO(annak): raise exception for different tenants if isinstance(resource_def, list): self.defs.extend(resource_def) else: self.defs.append(resource_def) def _sort_defs(self): sorted_defs = [] while len(self.defs): for resource_def in self.defs: if resource_def in sorted_defs: continue # We want all parents to appear before the child if not resource_def.path_defs(): # top level resource sorted_defs.append(resource_def) continue parent_type = resource_def.path_defs()[-1] parents = [d for d in self.defs if isinstance(d, parent_type)] missing_parents = [d for d in parents if d not in sorted_defs] if not missing_parents: # All parents are appended to sorted list, child can go in sorted_defs.append(resource_def) unsorted = [d for d in self.defs if d not in sorted_defs] self.defs = unsorted self.defs = sorted_defs def _build_wrapper_dict(self, resource_class, node, delete=False): wrapper_dict = {'resource_type': 'Child%s' % resource_class, resource_class: node} if delete: wrapper_dict.update({'marked_for_delete': True}) return wrapper_dict def _find_parent_in_dict(self, d, resource_def, level=1): res_path_defs = resource_def.path_defs() if not res_path_defs or len(res_path_defs) <= level: return parent_type = resource_def.path_defs()[level] is_leaf = (level + 1 == len(res_path_defs)) resource_type = parent_type.resource_type() resource_class = parent_type.resource_class() parent_id = resource_def.get_attr(resource_def.path_ids[level]) def create_missing_node(): node = {'resource_type': resource_type, 'id': parent_id, 'children': []} return self._build_wrapper_dict(resource_class, node), node # iterate over all objects in d, and look for resource type for child in d: if resource_type in child and child[resource_type]: parent = child[resource_type] # If resource type matches, check for id if parent['id'] == parent_id: if is_leaf: return parent if 'children' not in parent: parent['children'] = [] return self._find_parent_in_dict( parent['children'], resource_def, level + 1) # Parent not found - create a node for missing parent wrapper, node = create_missing_node() d.append(wrapper) if is_leaf: # This is the last parent that needs creation return node return self._find_parent_in_dict(node['children'], resource_def, level + 1) def apply_defs(self): # TODO(annak): find longest common URL, for now always # applying on tenant level if not self.defs or not self.client: # Empty transaction return self._sort_defs() top_def = self.defs[0] url = top_def.get_resource_path() body = {'resource_type': top_def.resource_type(), 'children': []} # iterate over defs (except top level def) for resource_def in self.defs[1:]: parent_dict = None if 'children' in body: parent_dict = self._find_parent_in_dict(body['children'], resource_def) if not parent_dict: # Top level resource parent_dict = body if 'children' not in parent_dict: parent_dict['children'] = [] resource_class = resource_def.resource_class() node = resource_def.get_obj_dict() if resource_def.mandatory_child_def: # This is a workaround for policy issue that involves required # children (see comment on definition of mandatory_child_def) # TODO(annak): remove when policy solves the issue child_def = resource_def.mandatory_child_def child_dict_key = child_def.get_last_section_dict_key node[child_dict_key] = [child_def.get_obj_dict()] parent_dict['children'].append( self._build_wrapper_dict(resource_class, node, resource_def.get_delete())) if body: headers = {'nsx-enable-partial-patch': 'true'} self.client.patch(url, body, headers=headers) @staticmethod def get_current(): if hasattr(NsxPolicyTransaction.data, 'instance'): return NsxPolicyTransaction.data.instance vmware-nsxlib-15.0.6/vmware_nsxlib/v3/policy/ipsec_vpn_resources.py0000664000175000017500000005055613623151571025603 0ustar zuulzuul00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from vmware_nsxlib.v3.policy import constants from vmware_nsxlib.v3.policy import core_resources from vmware_nsxlib.v3.policy import ipsec_vpn_defs LOG = logging.getLogger(__name__) IGNORE = core_resources.IGNORE class NsxIpsecVpnIkeProfileApi(core_resources.NsxPolicyResourceBase): @property def entry_def(self): return ipsec_vpn_defs.IpsecVpnIkeProfileDef def create_or_overwrite(self, name, profile_id=None, description=IGNORE, ike_version=IGNORE, encryption_algorithms=IGNORE, digest_algorithms=IGNORE, dh_groups=IGNORE, sa_life_time=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): profile_id = self._init_obj_uuid(profile_id) profile_def = self._init_def( profile_id=profile_id, name=name, description=description, ike_version=ike_version, encryption_algorithms=encryption_algorithms, digest_algorithms=digest_algorithms, dh_groups=dh_groups, sa_life_time=sa_life_time, tags=tags, tenant=tenant) self._create_or_store(profile_def) return profile_id def delete(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(profile_id=profile_id, tenant=tenant) self.policy_api.delete(profile_def) def get(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(profile_id=profile_id, tenant=tenant) return self.policy_api.get(profile_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(tenant=tenant) return self._list(profile_def) def get_by_name(self, name, tenant=constants.POLICY_INFRA_TENANT): return super(NsxIpsecVpnIkeProfileApi, self).get_by_name( name, tenant=tenant) def update(self, profile_id, name=IGNORE, description=IGNORE, ike_version=IGNORE, encryption_algorithms=IGNORE, digest_algorithms=IGNORE, dh_groups=IGNORE, sa_life_time=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update(profile_id=profile_id, name=name, description=description, ike_version=ike_version, encryption_algorithms=encryption_algorithms, digest_algorithms=digest_algorithms, dh_groups=dh_groups, sa_life_time=sa_life_time, tags=tags, tenant=tenant) class NsxIpsecVpnTunnelProfileApi(core_resources.NsxPolicyResourceBase): @property def entry_def(self): return ipsec_vpn_defs.IpsecVpnTunnelProfileDef def create_or_overwrite(self, name, profile_id=None, description=IGNORE, enable_perfect_forward_secrecy=IGNORE, encryption_algorithms=IGNORE, digest_algorithms=IGNORE, dh_groups=IGNORE, sa_life_time=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): profile_id = self._init_obj_uuid(profile_id) profile_def = self._init_def( profile_id=profile_id, name=name, description=description, enable_perfect_forward_secrecy=enable_perfect_forward_secrecy, encryption_algorithms=encryption_algorithms, digest_algorithms=digest_algorithms, dh_groups=dh_groups, sa_life_time=sa_life_time, tags=tags, tenant=tenant) self._create_or_store(profile_def) return profile_id def delete(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(profile_id=profile_id, tenant=tenant) self.policy_api.delete(profile_def) def get(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(profile_id=profile_id, tenant=tenant) return self.policy_api.get(profile_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(tenant=tenant) return self._list(profile_def) def get_by_name(self, name, tenant=constants.POLICY_INFRA_TENANT): return super(NsxIpsecVpnTunnelProfileApi, self).get_by_name( name, tenant=tenant) def update(self, profile_id, name=IGNORE, description=IGNORE, enable_perfect_forward_secrecy=IGNORE, encryption_algorithms=IGNORE, digest_algorithms=IGNORE, dh_groups=IGNORE, sa_life_time=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( profile_id=profile_id, name=name, description=description, enable_perfect_forward_secrecy=enable_perfect_forward_secrecy, encryption_algorithms=encryption_algorithms, digest_algorithms=digest_algorithms, dh_groups=dh_groups, sa_life_time=sa_life_time, tags=tags, tenant=tenant) class NsxIpsecVpnDpdProfileApi(core_resources.NsxPolicyResourceBase): @property def entry_def(self): return ipsec_vpn_defs.IpsecVpnDpdProfileDef def create_or_overwrite(self, name, profile_id=None, description=IGNORE, dpd_probe_interval=IGNORE, enabled=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): profile_id = self._init_obj_uuid(profile_id) profile_def = self._init_def( profile_id=profile_id, name=name, description=description, dpd_probe_interval=dpd_probe_interval, enabled=enabled, tags=tags, tenant=tenant) self._create_or_store(profile_def) return profile_id def delete(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(profile_id=profile_id, tenant=tenant) self.policy_api.delete(profile_def) def get(self, profile_id, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(profile_id=profile_id, tenant=tenant) return self.policy_api.get(profile_def) def list(self, tenant=constants.POLICY_INFRA_TENANT): profile_def = self.entry_def(tenant=tenant) return self._list(profile_def) def get_by_name(self, name, tenant=constants.POLICY_INFRA_TENANT): return super(NsxIpsecVpnDpdProfileApi, self).get_by_name( name, tenant=tenant) def update(self, profile_id, name=IGNORE, description=IGNORE, dpd_probe_interval=IGNORE, enabled=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( profile_id=profile_id, name=name, description=description, dpd_probe_interval=dpd_probe_interval, enabled=enabled, tags=tags, tenant=tenant) class NsxIpsecVpnServiceApi(core_resources.NsxPolicyResourceBase): @property def entry_def(self): return ipsec_vpn_defs.Tier1IPSecVpnServiceDef def _locale_service_id(self, tier1_id): return core_resources.NsxPolicyTier1Api._locale_service_id(tier1_id) def create_or_overwrite(self, name, tier1_id, vpn_service_id=None, description=IGNORE, enabled=IGNORE, ike_log_level=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): vpn_service_id = self._init_obj_uuid(vpn_service_id) service_def = self._init_def( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, name=name, description=description, enabled=enabled, ike_log_level=ike_log_level, tags=tags, tenant=tenant) self._create_or_store(service_def) return vpn_service_id def delete(self, tier1_id, vpn_service_id, tenant=constants.POLICY_INFRA_TENANT): service_def = self.entry_def( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, tenant=tenant) self.policy_api.delete(service_def) def get(self, tier1_id, vpn_service_id, tenant=constants.POLICY_INFRA_TENANT): service_def = self.entry_def( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, tenant=tenant) return self.policy_api.get(service_def) def list(self, tier1_id, tenant=constants.POLICY_INFRA_TENANT): service_def = self.entry_def( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), tenant=tenant) return self._list(service_def) def get_by_name(self, tier1_id, name, tenant=constants.POLICY_INFRA_TENANT): return super(NsxIpsecVpnServiceApi, self).get_by_name( name, tier1_id=tier1_id, tenant=tenant) def update(self, tier1_id, vpn_service_id, name=IGNORE, description=IGNORE, enabled=IGNORE, ike_log_level=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, name=name, description=description, enabled=enabled, ike_log_level=ike_log_level, tags=tags, tenant=tenant) class NsxIpsecVpnLocalEndpointApi(core_resources.NsxPolicyResourceBase): @property def entry_def(self): return ipsec_vpn_defs.IpsecVpnLocalEndpointDef def _locale_service_id(self, tier1_id): return core_resources.NsxPolicyTier1Api._locale_service_id(tier1_id) def create_or_overwrite(self, name, tier1_id, vpn_service_id, endpoint_id=None, description=IGNORE, local_address=IGNORE, local_id=IGNORE, certificate_path=IGNORE, trust_ca_ids=IGNORE, trust_crl_ids=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): endpoint_id = self._init_obj_uuid(endpoint_id) endpoint_def = self._init_def( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, endpoint_id=endpoint_id, name=name, description=description, local_address=local_address, local_id=local_id, certificate_path=certificate_path, trust_ca_ids=trust_ca_ids, trust_crl_ids=trust_crl_ids, tags=tags, tenant=tenant) self._create_or_store(endpoint_def) return endpoint_id def delete(self, tier1_id, vpn_service_id, endpoint_id, tenant=constants.POLICY_INFRA_TENANT): endpoint_def = self.entry_def( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, endpoint_id=endpoint_id, tenant=tenant) self.policy_api.delete(endpoint_def) def get(self, tier1_id, vpn_service_id, endpoint_id, tenant=constants.POLICY_INFRA_TENANT): endpoint_def = self.entry_def( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, endpoint_id=endpoint_id, tenant=tenant) return self.policy_api.get(endpoint_def) def list(self, tier1_id, vpn_service_id, tenant=constants.POLICY_INFRA_TENANT): endpoint_def = self.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, service_id=self._locale_service_id(tier1_id), tenant=tenant) return self._list(endpoint_def) def get_by_name(self, tier1_id, vpn_service_id, name, tenant=constants.POLICY_INFRA_TENANT): return super(NsxIpsecVpnLocalEndpointApi, self).get_by_name( name, tier1_id=tier1_id, vpn_service_id=vpn_service_id, tenant=tenant) def update(self, tier1_id, vpn_service_id, endpoint_id, name=IGNORE, description=IGNORE, local_address=IGNORE, local_id=IGNORE, certificate_path=IGNORE, trust_ca_ids=IGNORE, trust_crl_ids=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, endpoint_id=endpoint_id, name=name, description=description, local_address=local_address, local_id=local_id, certificate_path=certificate_path, trust_ca_ids=trust_ca_ids, trust_crl_ids=trust_crl_ids, tags=tags, tenant=tenant) class NsxIpsecVpnSessionApi(core_resources.NsxPolicyResourceBase): @property def entry_def(self): return ipsec_vpn_defs.Tier1IPSecVpnSessionDef def _locale_service_id(self, tier1_id): return core_resources.NsxPolicyTier1Api._locale_service_id(tier1_id) def create_or_overwrite(self, name, tier1_id, vpn_service_id, session_id=None, description=IGNORE, enabled=IGNORE, peer_address=IGNORE, peer_id=IGNORE, psk=IGNORE, rules=IGNORE, dpd_profile_id=IGNORE, ike_profile_id=IGNORE, tunnel_profile_id=IGNORE, local_endpoint_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): session_id = self._init_obj_uuid(session_id) session_def = self._init_def( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, session_id=session_id, name=name, description=description, enabled=enabled, peer_address=peer_address, peer_id=peer_id, psk=psk, rules=rules, dpd_profile_id=dpd_profile_id, ike_profile_id=ike_profile_id, tunnel_profile_id=tunnel_profile_id, local_endpoint_id=local_endpoint_id, tags=tags, tenant=tenant) self._create_or_store(session_def) return session_id def delete(self, tier1_id, vpn_service_id, session_id, tenant=constants.POLICY_INFRA_TENANT): session_def = self.entry_def( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, session_id=session_id, tenant=tenant) self.policy_api.delete(session_def) def get(self, tier1_id, vpn_service_id, session_id, tenant=constants.POLICY_INFRA_TENANT): session_def = self.entry_def( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, session_id=session_id, tenant=tenant) return self.policy_api.get(session_def) def get_status(self, tier1_id, vpn_service_id, session_id, tenant=constants.POLICY_INFRA_TENANT): status_def = ipsec_vpn_defs.Tier1IPSecVpnSessionStatusDef( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, session_id=session_id, tenant=tenant) return self.policy_api.get(status_def) def list(self, tier1_id, vpn_service_id, tenant=constants.POLICY_INFRA_TENANT): session_def = self.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, service_id=self._locale_service_id(tier1_id), tenant=tenant) return self._list(session_def) def get_by_name(self, tier1_id, vpn_service_id, name, tenant=constants.POLICY_INFRA_TENANT): return super(NsxIpsecVpnSessionApi, self).get_by_name( name, tier1_id=tier1_id, vpn_service_id=vpn_service_id, tenant=tenant) def update(self, tier1_id, vpn_service_id, session_id, name=IGNORE, description=IGNORE, enabled=IGNORE, peer_address=IGNORE, peer_id=IGNORE, psk=IGNORE, rules=IGNORE, dpd_profile_id=IGNORE, ike_profile_id=IGNORE, tunnel_profile_id=IGNORE, local_endpoint_id=IGNORE, tags=IGNORE, tenant=constants.POLICY_INFRA_TENANT): self._update( tier1_id=tier1_id, service_id=self._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, session_id=session_id, name=name, description=description, enabled=enabled, peer_address=peer_address, peer_id=peer_id, psk=psk, rules=rules, dpd_profile_id=dpd_profile_id, ike_profile_id=ike_profile_id, tunnel_profile_id=tunnel_profile_id, local_endpoint_id=local_endpoint_id, tags=tags, tenant=tenant) def build_rule(self, name, rule_id, action=constants.IPSEC_VPN_RULE_PROTECT, description=None, enabled=True, logged=False, destination_cidrs=None, source_cidrs=None, sequence_number=0, tags=None): return ipsec_vpn_defs.IPSecVpnRule( name=name, action=action, description=description, enabled=enabled, rule_id=rule_id, logged=logged, destination_cidrs=destination_cidrs, source_cidrs=source_cidrs, sequence_number=sequence_number, tags=tags) class NsxPolicyIpsecVpnApi(object): """This is the class that have all IPSEC VPN policy apis""" def __init__(self, *args): self.ike_profile = NsxIpsecVpnIkeProfileApi(*args) self.tunnel_profile = NsxIpsecVpnTunnelProfileApi(*args) self.dpd_profile = NsxIpsecVpnDpdProfileApi(*args) self.service = NsxIpsecVpnServiceApi(*args) self.local_endpoint = NsxIpsecVpnLocalEndpointApi(*args) self.session = NsxIpsecVpnSessionApi(*args) vmware-nsxlib-15.0.6/vmware_nsxlib/v3/policy/lb_defs.py0000664000175000017500000004424513623151571023117 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3.policy import constants from vmware_nsxlib.v3.policy.core_defs import ResourceDef LOG = logging.getLogger(__name__) TENANTS_PATH_PATTERN = "%s/" LB_VIRTUAL_SERVERS_PATH_PATTERN = TENANTS_PATH_PATTERN + "lb-virtual-servers/" LB_SERVICES_PATH_PATTERN = TENANTS_PATH_PATTERN + "lb-services/" LB_POOL_PATH_PATTERN = TENANTS_PATH_PATTERN + "lb-pools/" LB_APP_PROFILE_PATTERN = TENANTS_PATH_PATTERN + "lb-app-profiles/" LB_MONITOR_PROFILE_PATTERN = TENANTS_PATH_PATTERN + "lb-monitor-profiles/" LB_CLIENT_SSL_PROFILE_PATTERN = (TENANTS_PATH_PATTERN + "lb-client-ssl-profiles/") LBSERVER_SSL_PROFILE_PATTERN = (TENANTS_PATH_PATTERN + "lb-server-ssl-profiles/") LB_PERSISTENCE_PROFILE_PATTERN = (TENANTS_PATH_PATTERN + "lb-persistence-profiles/") class LBRuleDef(object): def __init__(self, actions, match_conditions=None, name=None, match_strategy=None, phase=None): self.actions = actions self.name = name self.match_conditions = match_conditions self.match_strategy = match_strategy self.phase = phase def get_obj_dict(self): lb_rule = { 'actions': self.actions } if self.match_conditions: lb_rule['match_conditions'] = self.match_conditions if self.name: lb_rule['display_name'] = self.name if self.match_strategy: lb_rule['match_strategy'] = self.match_strategy if self.phase: lb_rule['phase'] = self.phase return lb_rule class LBPoolMemberDef(object): def __init__(self, ip_address, port=None, name=None, weight=None, admin_state=None, backup_member=None): self.name = name self.ip_address = ip_address self.port = port self.weight = weight self.admin_state = admin_state self.backup_member = backup_member def get_obj_dict(self): body = {'ip_address': self.ip_address} if self.name: body['display_name'] = self.name if self.ip_address: body['port'] = self.port if self.weight: body['weight'] = self.weight if self.admin_state: body['admin_state'] = self.admin_state if self.backup_member: body['backup_member'] = self.backup_member return body class LBServerSslProfileDef(ResourceDef): @property def path_pattern(self): return LBSERVER_SSL_PROFILE_PATTERN @property def path_ids(self): return ('tenant', 'server_ssl_profile_id') @staticmethod def resource_type(): return "LBServerSslProfile" def get_obj_dict(self): body = super(LBServerSslProfileDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['cipher_group_label', 'ciphers', 'protocols', 'session_cache_enabled']) return body class LBClientSslProfileDef(ResourceDef): @property def path_pattern(self): return LB_CLIENT_SSL_PROFILE_PATTERN @property def path_ids(self): return ('tenant', 'client_ssl_profile_id') @staticmethod def resource_type(): return "LBClientSslProfile" def get_obj_dict(self): body = super(LBClientSslProfileDef, self).get_obj_dict() self._set_attr_if_specified(body, 'protocols') return body class LBPersistenceProfileBase(ResourceDef): @property def path_pattern(self): return LB_PERSISTENCE_PROFILE_PATTERN @property def path_ids(self): return ('tenant', 'persistence_profile_id') class LBCookiePersistenceProfileDef(LBPersistenceProfileBase): @staticmethod def resource_type(): return "LBCookiePersistenceProfile" def get_obj_dict(self): body = super(LBCookiePersistenceProfileDef, self).get_obj_dict() self._set_attrs_if_specified( body, ['cookie_garble', 'cookie_mode', 'cookie_name', 'cookie_path', 'cookie_time', 'persistence_shared']) return body class LBSourceIpPersistenceProfileDef(LBPersistenceProfileBase): @staticmethod def resource_type(): return "LBSourceIpPersistenceProfile" def get_obj_dict(self): body = super(LBSourceIpPersistenceProfileDef, self).get_obj_dict() self._set_attrs_if_specified( body, ['ha_persistence_mirroring_enabled', 'persistence_shared', 'purge', 'timeout']) return body class LBAppProfileBaseDef(ResourceDef): @property def path_pattern(self): return LB_APP_PROFILE_PATTERN @property def path_ids(self): return ('tenant', 'lb_app_profile_id') def get_obj_dict(self): body = super(LBAppProfileBaseDef, self).get_obj_dict() self._set_attrs_if_specified( body, ['idle_timeout']) return body class LBHttpProfileDef(LBAppProfileBaseDef): @staticmethod def resource_type(): return "LBHttpProfile" def get_obj_dict(self): body = super(LBHttpProfileDef, self).get_obj_dict() self._set_attrs_if_specified( body, ['http_redirect_to', 'http_redirect_to_https', 'ntlm', 'request_body_size', 'request_header_size', 'response_header_size', 'response_timeout', 'x_forwarded_for']) return body class LBFastTcpProfile(LBAppProfileBaseDef): @staticmethod def resource_type(): return "LBFastTcpProfile" def get_obj_dict(self): body = super(LBFastTcpProfile, self).get_obj_dict() self._set_attrs_if_specified( body, ['close_timeout', 'ha_flow_mirroring_enabled']) return body class LBFastUdpProfile(LBAppProfileBaseDef): @staticmethod def resource_type(): return "LBFastUdpProfile" def get_obj_dict(self): body = super(LBFastUdpProfile, self).get_obj_dict() self._set_attrs_if_specified( body, ['flow_mirroring_enabled']) return body class LBPoolDef(ResourceDef): @property def path_pattern(self): return LB_POOL_PATH_PATTERN @property def path_ids(self): return ('tenant', 'lb_pool_id') @staticmethod def resource_type(): return 'LBPool' def get_obj_dict(self): body = super(LBPoolDef, self).get_obj_dict() self._set_attrs_if_specified( body, ['active_monitor_paths', 'algorithm', 'member_group', 'snat_translation']) members = self.get_attr('members') if members is None: members = [] if self.has_attr('members'): members = members if isinstance(members, list) else [members] body['members'] = [] for member in members: # the list contains old json members and newly added member if isinstance(member, LBPoolMemberDef): member = member.get_obj_dict() body['members'].append(member) return body class LBVirtualServerDef(ResourceDef): @property def path_pattern(self): return LB_VIRTUAL_SERVERS_PATH_PATTERN @property def path_ids(self): return ('tenant', 'virtual_server_id') @staticmethod def resource_type(): return 'LBVirtualServer' def get_obj_dict(self): body = super(LBVirtualServerDef, self).get_obj_dict() self._set_attrs_if_specified( body, ['ip_address', 'ports', 'max_concurrent_connections']) client_ssl_binding = self.get_attr('client_ssl_profile_binding') if client_ssl_binding: self._set_attr_if_specified( body, 'client_ssl_profile_binding', value=client_ssl_binding) server_ssl_binding = self.get_attr('server_ssl_profile_binding') if server_ssl_binding: self._set_attr_if_specified( body, 'server_ssl_profile_binding', value=server_ssl_binding) waf_profile_binding = self.get_attr('waf_profile_binding') if waf_profile_binding: if isinstance(waf_profile_binding, WAFProfileBindingDef): waf_profile_binding = waf_profile_binding.get_obj_dict() self._set_attr_if_specified( body, 'waf_profile_binding', value=waf_profile_binding) rules = self.get_attr('rules') if self.has_attr('rules'): rules = rules if isinstance(rules, list) else [rules] body['rules'] = [] for rule in rules: # the list contains old json rules and newly added ruledef rule if isinstance(rule, LBRuleDef): rule = rule.get_obj_dict() body['rules'].append(rule) app_profile_id = self.get_attr('application_profile_id') if app_profile_id: app_profile_def = LBAppProfileBaseDef( lb_app_profile_id=app_profile_id, tenant=self.get_tenant()) body['application_profile_path'] = ( app_profile_def.get_resource_full_path()) if self.has_attr('lb_persistence_profile_id'): path = "" lb_persistence_profile_id = self.get_attr( 'lb_persistence_profile_id') if lb_persistence_profile_id: lb_persistence_profile_def = LBPersistenceProfileBase( persistence_profile_id=lb_persistence_profile_id, tenant=self.get_tenant()) path = lb_persistence_profile_def.get_resource_full_path() body['lb_persistence_profile_path'] = path if self.has_attr('lb_service_id'): path = "" lb_service_id = self.get_attr('lb_service_id') if lb_service_id: lb_service_def = LBServiceDef( lb_service_id=lb_service_id, tenant=self.get_tenant()) path = lb_service_def.get_resource_full_path() body['lb_service_path'] = path if self.has_attr('pool_id'): path = "" lb_pool_id = self.get_attr('pool_id') if lb_pool_id: lb_pool_def = LBPoolDef( lb_pool_id=lb_pool_id, tenant=self.get_tenant()) path = lb_pool_def.get_resource_full_path() body['pool_path'] = path if self.has_attr('access_list_control'): lb_alc = self.get_attr('access_list_control') if isinstance(lb_alc, LBAccessListControlDef): self.attrs['access_list_control'] = lb_alc.get_obj_dict() self._set_attrs_if_supported(body, ['access_list_control']) return body @property def version_dependant_attr_map(self): return {'access_list_control': nsx_constants.NSX_VERSION_3_0_0} class ClientSSLProfileBindingDef(object): def __init__(self, default_certificate_path, sni_certificate_paths=None, ssl_profile_path=None, client_auth_ca_paths=None, client_auth=None): self.default_certificate_path = default_certificate_path self.sni_certificate_paths = sni_certificate_paths self.ssl_profile_path = ssl_profile_path self.client_auth_ca_paths = client_auth_ca_paths self.client_auth = client_auth def get_obj_dict(self): body = { 'default_certificate_path': self.default_certificate_path } if self.sni_certificate_paths: body['sni_certificate_paths'] = self.sni_certificate_paths if self.ssl_profile_path: body['ssl_profile_path'] = self.ssl_profile_path if self.client_auth_ca_paths: body['client_auth_ca_paths'] = self.client_auth_ca_paths if self.client_auth: body['client_auth'] = self.client_auth return body class ServerSSLProfileBindingDef(object): def __init__(self, client_certificate_path=None, certificate_chain_depth=None, server_auth=None, server_auth_ca_paths=None, server_auth_crl_paths=None, ssl_profile_path=None): self.client_certificate_path = client_certificate_path self.certificate_chain_depth = certificate_chain_depth self.server_auth = server_auth self.server_auth_ca_paths = server_auth_ca_paths self.server_auth_crl_paths = server_auth_crl_paths self.ssl_profile_path = ssl_profile_path def get_obj_dict(self): body = {} if self.client_certificate_path: body['client_certificate_path'] = self.client_certificate_path if self.ssl_profile_path: body['certificate_chain_depth'] = self.certificate_chain_depth if self.server_auth: body['server_auth'] = self.server_auth if self.ssl_profile_path: body['server_auth_ca_paths'] = self.server_auth_ca_paths if self.server_auth_crl_paths: body['server_auth_crl_paths'] = self.server_auth_crl_paths if self.ssl_profile_path: body['ssl_profile_path'] = self.ssl_profile_path return body class WAFProfileBindingDef(object): def __init__(self, waf_profile_path, operational_mode=constants.WAF_OPERATIONAL_MODE_PROTECTION, debug_log_level=constants.WAF_LOG_LEVEL_NO_LOG): self.waf_profile_path = waf_profile_path self.operational_mode = operational_mode self.debug_log_level = debug_log_level def get_obj_dict(self): body = { 'waf_profile_path': self.waf_profile_path, 'operational_mode': self.operational_mode, 'debug_log_level': self.debug_log_level } return body class LBServiceDef(ResourceDef): @property def path_pattern(self): return LB_SERVICES_PATH_PATTERN @property def path_ids(self): return ('tenant', 'lb_service_id') @staticmethod def resource_type(): return 'LBService' def get_obj_dict(self): body = super(LBServiceDef, self).get_obj_dict() self._set_attrs_if_specified(body, ['size', 'connectivity_path']) self._set_attrs_if_supported(body, ['relax_scale_validation']) return body @property def version_dependant_attr_map(self): return {'relax_scale_validation': nsx_constants.NSX_VERSION_3_0_0} class LBServiceStatisticsDef(ResourceDef): @property def path_pattern(self): return LB_SERVICES_PATH_PATTERN + '%s/statistics/' @property def path_ids(self): return ('tenant', 'lb_service_id', '') class LBServiceStatusDef(ResourceDef): @property def path_pattern(self): return LB_SERVICES_PATH_PATTERN + '%s/detailed-status/' @property def path_ids(self): return ('tenant', 'lb_service_id', '') class LBServiceUsageDef(ResourceDef): def __init__(self, **kwargs): self.realtime = kwargs.pop('realtime') super(LBServiceUsageDef, self).__init__(**kwargs) @property def path_pattern(self): if self.realtime: return (LB_SERVICES_PATH_PATTERN + '%s/service-usage?source=realtime') return LB_SERVICES_PATH_PATTERN + '%s/service-usage/' @property def path_ids(self): return ('tenant', 'lb_service_id', '') class LBVirtualServerStatusDef(ResourceDef): @property def path_pattern(self): return (LB_SERVICES_PATH_PATTERN + '%s/lb-virtual-servers/%s/detailed-status/') @property def path_ids(self): return ('tenant', 'lb_service_id', 'lb_virtual_server_id', '') class LBMonitorProfileBaseDef(ResourceDef): addl_attrs = ['interval', 'timeout', 'fall_count', 'rise_count'] @property def path_pattern(self): return LB_MONITOR_PROFILE_PATTERN @property def path_ids(self): return ('tenant', 'lb_monitor_profile_id') def get_obj_dict(self): body = super(LBMonitorProfileBaseDef, self).get_obj_dict() self._set_attrs_if_specified(body, self.addl_attrs) return body class LBHttpMonitorProfileDef(LBMonitorProfileBaseDef): addl_attrs = LBMonitorProfileBaseDef.addl_attrs + [ 'monitor_port', 'request_url', 'request_method', 'request_version', 'request_headers', 'request_body', 'response_status_codes'] @staticmethod def resource_type(): return "LBHttpMonitorProfile" class LBHttpsMonitorProfileDef(LBHttpMonitorProfileDef): @staticmethod def resource_type(): return "LBHttpsMonitorProfile" class LBUdpMonitorProfileDef(LBMonitorProfileBaseDef): addl_attrs = LBMonitorProfileBaseDef.addl_attrs + [ 'monitor_port', 'receive', 'send'] @staticmethod def resource_type(): return "LBUdpMonitorProfile" class LBIcmpMonitorProfileDef(LBMonitorProfileBaseDef): @staticmethod def resource_type(): return "LBIcmpMonitorProfile" class LBTcpMonitorProfileDef(LBMonitorProfileBaseDef): addl_attrs = LBMonitorProfileBaseDef.addl_attrs + ['monitor_port'] @staticmethod def resource_type(): return "LBTcpMonitorProfile" class LBAccessListControlDef(object): def __init__(self, action, group_path, enabled=None): self.action = action self.group_path = group_path self.enabled = enabled def get_obj_dict(self): access_list_control = { 'action': self.action, 'group_path': self.group_path } if self.enabled is not None: access_list_control['enabled'] = self.enabled return access_list_control vmware-nsxlib-15.0.6/vmware_nsxlib/v3/exceptions.py0000664000175000017500000001573213623151571022402 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_utils import excutils import six from vmware_nsxlib._i18n import _ class NsxLibException(Exception): """Base NsxLib Exception. To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") def __init__(self, **kwargs): try: super(NsxLibException, self).__init__(self.message % kwargs) self.msg = self.message % kwargs except Exception: with excutils.save_and_reraise_exception() as ctxt: if not self.use_fatal_exceptions(): ctxt.reraise = False # at least get the core message out if something happened super(NsxLibException, self).__init__(self.message) if six.PY2: def __unicode__(self): return unicode(self.msg) if six.PY2 else self.msg # noqa def __str__(self): return self.msg def use_fatal_exceptions(self): return False class ObjectAlreadyExists(NsxLibException): message = _("%(object_type)s already exists") class NotImplemented(NsxLibException): message = _("%(operation)s is not supported") class ObjectNotGenerated(NsxLibException): message = _("%(object_type)s was not generated") class CertificateError(NsxLibException): message = _("Certificate error: %(msg)s") class NsxLibInvalidInput(NsxLibException): message = _("Invalid input for operation: %(error_message)s.") class ManagerError(NsxLibException): message = _("Unexpected error from backend manager (%(manager)s) " "for %(operation)s%(details)s") related_error_codes = [] def __init__(self, **kwargs): details = kwargs.get('details', '') kwargs['details'] = ': %s' % details if details else '' super(ManagerError, self).__init__(**kwargs) try: self.msg = self.message % kwargs except KeyError: self.msg = details self.error_code = kwargs.get('error_code') self.related_error_codes = kwargs.get('related_error_codes', []) self.status_code = kwargs.get('status_code') class ResourceNotFound(ManagerError): message = _("Resource could not be found on backend (%(manager)s) for " "%(operation)s") class BackendResourceNotFound(ResourceNotFound): message = _("%(details)s On backend (%(manager)s) with Operation: " "%(operation)s") class InvalidInput(ManagerError): message = _("%(operation)s failed: Invalid input %(arg_val)s " "for %(arg_name)s") class RealizationError(ManagerError): pass class RealizationErrorStateError(RealizationError): message = _("%(resource_type)s ID %(resource_id)s is in ERROR state: " "%(error)s") class RealizationTimeoutError(RealizationError): message = _("%(resource_type)s ID %(resource_id)s " "was not realized after %(attempts)s attempts " "with %(sleep)s seconds sleep") class DetailedRealizationTimeoutError(RealizationError): message = _("%(resource_type)s ID %(resource_id)s " "was not realized to %(realized_type)s " "for %(related_type)s %(related_id)s " "after %(attempts)s attempts " "with %(sleep)s seconds sleep") class StaleRevision(ManagerError): pass class ServerBusy(ManagerError): pass class TooManyRequests(ServerBusy): pass class ServiceUnavailable(ServerBusy): pass class ClientCertificateNotTrusted(ManagerError): message = _("Certificate not trusted") class BadXSRFToken(ManagerError): message = _("Bad or expired XSRF token") class BadJSONWebTokenProviderRequest(NsxLibException): message = _("Bad or expired JSON web token request from provider: %(msg)s") class ServiceClusterUnavailable(ManagerError): message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, " "check NSX setup and/or configuration") class NSGroupMemberNotFound(ManagerError): message = _("Could not find NSGroup %(nsgroup_id)s member %(member_id)s " "for removal.") class NSGroupIsFull(ManagerError): message = _("NSGroup %(nsgroup_id)s contains has reached its maximum " "capacity, unable to add additional members.") class NumberOfNsgroupCriteriaTagsReached(ManagerError): message = _("Port can be associated with at most %(max_num)s " "security-groups.") class SecurityGroupMaximumCapacityReached(ManagerError): message = _("Security Group %(sg_id)s has reached its maximum capacity, " "no more ports can be associated with this security-group.") class NsxSearchInvalidQuery(NsxLibException): message = _("Invalid input for NSX search query. Reason: %(reason)s") class NsxSearchError(NsxLibException): message = _("Search failed due to error") class NsxIndexingInProgress(NsxSearchError): message = _("Bad Request due to indexing is in progress, please retry " "after sometime") class NsxSearchTimeout(NsxSearchError): message = _("Request timed out. This may occur when system is under load " "or running low on resources") class NsxSearchOutOfSync(NsxSearchError): message = _("Index is currently out of sync") class NsxPendingDelete(NsxLibException): message = _("An object with the same name is marked for deletion. Either " "use another path or wait for the purge cycle to permanently " "remove the deleted object") class NsxSegemntWithVM(ManagerError): message = _("Cannot delete segment as it still has VMs or VIFs attached") class NsxOverlapAddresses(NsxLibInvalidInput): message = _("Overlapping addresses found: %(details)s") class NsxOverlapVlan(NsxLibInvalidInput): message = _("Duplicate logical-switch vlan-ids sharing same underlying " "physical devices resulting in a conflict") class APITransactionAborted(ManagerError): message = _("API transaction aborted as MP cluster is reconfiguring") class CannotConnectToServer(ManagerError): message = _("Cannot connect to server") class ResourceInUse(ManagerError): message = _("The object cannot be deleted as either it has children or it " "is being referenced by other objects") vmware-nsxlib-15.0.6/vmware_nsxlib/v3/config.py0000664000175000017500000002174313623151571021465 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_log import versionutils LOG = log.getLogger(__name__) class NsxLibConfig(object): """Class holding all the configuration parameters used by the nsxlib code. :param nsx_api_managers: List of IP addresses of the NSX managers. Each IP address should be of the form: [://][:] If scheme is not provided https is used. If port is not provided port 80 is used for http and port 443 for https. :param username: User name for the NSX manager :param password: Password for the NSX manager :param client_cert_provider: None, or ClientCertProvider object. If specified, nsxlib will use client cert auth instead of basic authentication. :param insecure: If true, the NSX Manager server certificate is not verified. If false the CA bundle specified via "ca_file" will be used or if unset the "thumbprint" will be used. If "thumbprint" is unset, the default system root CAs will be used. :param ca_file: Specify a CA bundle file to use in verifying the NSX Manager server certificate. This option is ignored if "insecure" is set to True. If "insecure" is set to False and "ca_file" is unset, the "thumbprint" will be used. If "thumbprint" is unset, the system root CAs will be used to verify the server certificate. :param thumbprint: Specify a thumbprint string to use in verifying the NSX Manager server certificate. This option is ignored if "insecure" is set to True or "ca_file" is defined. :param token_provider: None, or instance of implemented AbstractJWTProvider which will return the JSON Web Token used in the requests in NSX for authorization. :param concurrent_connections: Maximum concurrent connections to each NSX manager. :param retries: Maximum number of times to retry a HTTP connection. :param http_timeout: The time in seconds before aborting a HTTP connection to a NSX manager. :param http_read_timeout: The time in seconds before aborting a HTTP read response from a NSX manager. :param conn_idle_timeout: The amount of time in seconds to wait before ensuring connectivity to the NSX manager if no manager connection has been used. :param http_provider: HTTPProvider object, or None. :param max_attempts: Maximum number of times to retry API requests upon stale revision errors. :param plugin_scope: The default scope for the v3 api-version tag :param plugin_tag: The value for the v3 api-version tag :param plugin_ver: The version of the plugin used as the 'os-api-version' tag value in the v3 api-version tag :param dns_nameservers: List of nameservers to configure for the DHCP binding entries. These will be used if there are no nameservers defined on the subnet. :param dns_domain: Domain to use for building the hostnames. :param dhcp_profile_uuid: Currently unused and deprecated. Kept for backward compatibility. :param allow_overwrite_header: If True, a default header of X-Allow-Overwrite:true will be added to all the requests, to allow admin user to update/ delete all entries. :param rate_limit_retry: If True, the client will retry requests failed on "Too many requests" error. :param cluster_unavailable_retry: If True, skip fatal errors when no endpoint in the NSX management cluster is available to serve a request, and retry the request instead. -- Additional parameters which are relevant only for the Policy manager: :param allow_passthrough: If True, use nsx manager api for cases which are not supported by the policy manager api. :param realization_max_attempts: Maximum number of times to retry while waiting for a resource to be realized. :param realization_wait_sec: Number of seconds to wait between attempts for a resource to be realized. """ def __init__(self, nsx_api_managers=None, username=None, password=None, client_cert_provider=None, insecure=True, ca_file=None, thumbprint=None, token_provider=None, concurrent_connections=10, retries=3, http_timeout=10, http_read_timeout=180, conn_idle_timeout=10, http_provider=None, max_attempts=10, plugin_scope=None, plugin_tag=None, plugin_ver=None, dns_nameservers=None, dns_domain='openstacklocal', dhcp_profile_uuid=None, allow_overwrite_header=False, rate_limit_retry=True, cluster_unavailable_retry=False, allow_passthrough=False, realization_max_attempts=50, realization_wait_sec=1.0): self.nsx_api_managers = nsx_api_managers self._username = username self._password = password self._ca_file = ca_file self._thumbprint = thumbprint self.insecure = insecure self.concurrent_connections = concurrent_connections self.retries = retries self.http_timeout = http_timeout self.http_read_timeout = http_read_timeout self.conn_idle_timeout = conn_idle_timeout self.http_provider = http_provider self.client_cert_provider = client_cert_provider self.token_provider = token_provider self.max_attempts = max_attempts self.plugin_scope = plugin_scope self.plugin_tag = plugin_tag self.plugin_ver = plugin_ver self.dns_nameservers = dns_nameservers or [] self.dns_domain = dns_domain self.allow_overwrite_header = allow_overwrite_header self.rate_limit_retry = rate_limit_retry self.cluster_unavailable_retry = cluster_unavailable_retry self.allow_passthrough = allow_passthrough self.realization_max_attempts = realization_max_attempts self.realization_wait_sec = realization_wait_sec if dhcp_profile_uuid: # this is deprecated, and never used. versionutils.report_deprecated_feature( LOG, 'dhcp_profile_uuid is not used by the nsxlib, and will ' 'be removed from its configuration in the future.') def extend(self, keepalive_section, validate_connection_method=None, url_base=None): """Called by library code to initialize application-specific data""" self.keepalive_section = keepalive_section self.validate_connection_method = validate_connection_method self.url_base = url_base def _attribute_by_index(self, scalar_or_list, index): if isinstance(scalar_or_list, list): if not len(scalar_or_list): return None if len(scalar_or_list) > index: return scalar_or_list[index] # if not long enough - use the first one as default return scalar_or_list[0] # this is a scalar return scalar_or_list def username(self, index): return self._attribute_by_index(self._username, index) def password(self, index): return self._attribute_by_index(self._password, index) def ca_file(self, index): return self._attribute_by_index(self._ca_file, index) def thumbprint(self, index): return self._attribute_by_index(self._thumbprint, index) vmware-nsxlib-15.0.6/vmware_nsxlib/v3/native_dhcp.py0000664000175000017500000001312613623151571022500 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr import six from oslo_log import log from oslo_log import versionutils from vmware_nsxlib.v3 import constants from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) class NsxLibNativeDhcp(utils.NsxLibApiBase): def build_static_routes(self, gateway_ip, cidr, host_routes): # The following code is based on _generate_opts_per_subnet() in # neutron/agent/linux/dhcp.py. It prepares DHCP options for a subnet. # Add route for directly connected network. static_routes = [{'network': cidr, 'next_hop': '0.0.0.0'}] # Copy routes from subnet host_routes attribute. if host_routes: for hr in host_routes: if hr['destination'] == constants.IPv4_ANY: if not gateway_ip: gateway_ip = hr['nexthop'] else: static_routes.append({'network': hr['destination'], 'next_hop': hr['nexthop']}) # If gateway_ip is defined, add default route via this gateway. if gateway_ip: static_routes.append({'network': constants.IPv4_ANY, 'next_hop': gateway_ip}) return static_routes, gateway_ip def build_server_name(self, net_name, net_id): return utils.get_name_and_uuid(net_name or 'dhcpserver', net_id) def build_server_domain_name(self, net_dns_domain, default_dns_domain): versionutils.report_deprecated_feature( LOG, 'NsxLibQosNativeDhcp.build_server_domain_name is deprecated.') if net_dns_domain: if isinstance(net_dns_domain, six.string_types): domain_name = net_dns_domain else: domain_name = net_dns_domain['dns_domain'] else: # use the default one, or the globally configured one if default_dns_domain is not None: domain_name = default_dns_domain else: domain_name = self.nsxlib_config.dns_domain return domain_name def build_server(self, name, ip_address, cidr, gateway_ip, dns_domain=None, dns_nameservers=None, host_routes=None, dhcp_profile_id=None, tags=None): # Prepare the configuration for a new logical DHCP server. server_ip = "%s/%u" % (ip_address, netaddr.IPNetwork(cidr).prefixlen) if not dns_domain: dns_domain = self.nsxlib_config.dns_domain if not dns_nameservers: dns_nameservers = self.nsxlib_config.dns_nameservers if not utils.is_attr_set(gateway_ip): gateway_ip = None static_routes, gateway_ip = self.build_static_routes( gateway_ip, cidr, host_routes) options = {'option121': {'static_routes': static_routes}} body = {'name': name, 'server_ip': server_ip, 'dns_nameservers': dns_nameservers, 'domain_name': dns_domain, 'gateway_ip': gateway_ip, 'options': options, 'tags': tags} if dhcp_profile_id: body['dhcp_profile_id'] = dhcp_profile_id return body def build_server_config(self, network, subnet, port, tags, default_dns_nameservers=None, default_dns_domain=None): versionutils.report_deprecated_feature( LOG, 'NsxLibQosNativeDhcp.build_server_config is deprecated. ' 'Please use build_server instead') # Prepare the configuration for a new logical DHCP server. server_ip = "%s/%u" % (port['fixed_ips'][0]['ip_address'], netaddr.IPNetwork(subnet['cidr']).prefixlen) dns_nameservers = subnet['dns_nameservers'] if not dns_nameservers or not utils.is_attr_set(dns_nameservers): # use the default one , or the globally configured one if default_dns_nameservers is not None: dns_nameservers = default_dns_nameservers else: dns_nameservers = self.nsxlib_config.dns_nameservers gateway_ip = subnet['gateway_ip'] if not utils.is_attr_set(gateway_ip): gateway_ip = None static_routes, gateway_ip = self.build_static_routes( gateway_ip, subnet['cidr'], subnet['host_routes']) options = {'option121': {'static_routes': static_routes}} name = self.build_server_name(network['name'], network['id']) domain_name = self.build_server_domain_name(network.get('dns_domain'), default_dns_domain) return {'name': name, 'server_ip': server_ip, 'dns_nameservers': dns_nameservers, 'domain_name': domain_name, 'gateway_ip': gateway_ip, 'options': options, 'tags': tags} vmware-nsxlib-15.0.6/vmware_nsxlib/v3/token_provider.py0000664000175000017500000000264113623151571023246 0ustar zuulzuul00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six # NOTE: Consider inheriting from an abstract TokenProvider class to share # interface with XSRF token @six.add_metaclass(abc.ABCMeta) class AbstractJWTProvider(object): """Interface for providers of JSON Web Tokens(JWT) Responsible to provide the token value and refresh it once expired, or on demand, for authorization of requests to NSX. """ @abc.abstractmethod def get_token(self, refresh_token=False): """Request JWT value. :param refresh_token: Boolean value, indicating whether a new token value is to be retrieved. :raises vmware_nsxlib.v3.exceptions.BadJSONWebTokenProviderRequest: """ pass def get_header_value(self, token_value): return "Bearer %s" % token_value vmware-nsxlib-15.0.6/vmware_nsxlib/v3/lib.py0000664000175000017500000003112013623151571020754 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from distutils import version from oslo_log import log import six from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import client from vmware_nsxlib.v3 import cluster from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class NsxLibBase(object): def __init__(self, nsxlib_config): self.nsx_version = None self.nsx_api = None self.default_headers = None self.set_config(nsxlib_config) self.set_default_headers(nsxlib_config) # create the Cluster self.cluster = cluster.NSXClusteredAPI(self.nsxlib_config) # create the Client self.client = client.NSX3Client( self.cluster, nsx_api_managers=self.nsxlib_config.nsx_api_managers, max_attempts=self.nsxlib_config.max_attempts, url_path_base=self.client_url_prefix, rate_limit_retry=self.nsxlib_config.rate_limit_retry, default_headers=self.default_headers) self.general_apis = utils.NsxLibApiBase( self.client, self.nsxlib_config) self.init_api() super(NsxLibBase, self).__init__() def set_config(self, nsxlib_config): """Set config user provided and extend it according to application""" self.nsxlib_config = nsxlib_config self.nsxlib_config.extend( keepalive_section=self.keepalive_section, validate_connection_method=self.validate_connection_method, url_base=self.client_url_prefix) def set_default_headers(self, nsxlib_config): """Set the default headers with token information""" if nsxlib_config.token_provider: try: token_value = nsxlib_config.token_provider.get_token() except exceptions.BadJSONWebTokenProviderRequest as e: LOG.error("Error in retrieving JSON Web Token: %s", e) return bearer_token = "Bearer %s" % token_value self.default_headers = self.default_headers or {} self.default_headers["Authorization"] = bearer_token @abc.abstractproperty def client_url_prefix(self): pass @abc.abstractproperty def keepalive_section(self): pass @abc.abstractproperty def validate_connection_method(self): pass @abc.abstractmethod def init_api(self): pass @abc.abstractmethod def feature_supported(self, feature): pass @abc.abstractmethod def get_version(self): pass def build_v3_api_version_tag(self): return self.general_apis.build_v3_api_version_tag() def is_internal_resource(self, nsx_resource): return self.general_apis.is_internal_resource(nsx_resource) def build_v3_api_version_project_tag(self, project_name, project_id=None): return self.general_apis.build_v3_api_version_project_tag( project_name, project_id=project_id) def build_v3_tags_payload(self, resource, resource_type, project_name): return self.general_apis.build_v3_tags_payload( resource, resource_type, project_name) def reinitialize_cluster(self, resource, event, trigger, payload=None): self.cluster._reinit_cluster() def subscribe(self, callback, event): self.cluster.subscribe(callback, event) def _add_pagination_parameters(self, url, cursor, page_size): if cursor: url += "&cursor=%d" % cursor if page_size: url += "&page_size=%d" % page_size return url def _get_search_url(self): if (version.LooseVersion(self.get_version()) >= version.LooseVersion(nsx_constants.NSX_VERSION_3_0_0)): return "search/query?query=%s" return "search?query=%s" # TODO(abhiraut): Revisit this method to generate complex boolean # queries to search resources. def search_by_tags(self, tags, resource_type=None, cursor=None, page_size=None, **extra_attrs): """Return the list of resources searched based on tags. Currently the query only supports AND boolean operator. :param tags: List of dictionaries containing tags. Each NSX tag dictionary is of the form: {'scope': , 'tag': } :param resource_type: Optional string parameter to limit the scope of the search to the given ResourceType. :param cursor: Opaque cursor to be used for getting next page of records (supplied by current result page). :param page_size: Maximum number of results to return in this page. :param extra_attrs: Support querying by user specified attributes. Multiple attributes will be ANDed. """ if not tags: reason = _("Missing required argument 'tags'") raise exceptions.NsxSearchInvalidQuery(reason=reason) # Query will return nothing if the same scope is repeated. query_tags = self._build_query(tags) query = 'resource_type:%s' % resource_type if resource_type else None if query: query += " AND %s" % query_tags else: query = query_tags if extra_attrs: query += " AND %s" % " AND ".join( ['%s:%s' % (k, v) for (k, v) in extra_attrs.items()]) url = self._add_pagination_parameters(self._get_search_url() % query, cursor, page_size) # Retry the search in case of error @utils.retry_upon_exception(exceptions.NsxSearchError, max_attempts=self.client.max_attempts) def do_search(url): return self.client.url_get(url) return do_search(url) def search_resource_by_attributes(self, resource_type, cursor=None, page_size=None, **attributes): """Search resources of a given type matching specific attributes. It is optional to specify attributes. If multiple attributes are specified they are ANDed together to form the search query. :param resource_type: String parameter specifying the desired resource_type :param cursor: Opaque cursor to be used for getting next page of records (supplied by current result page). :param page_size: Maximum number of results to return in this page. :param **attributes: an optional set of keyword arguments specifying filters for the search query. Wildcards will not be interpeted. :returns: a list of resources of the requested type matching specified filters. """ if not resource_type: raise exceptions.NsxSearchInvalidQuery( reason=_("Resource type was not specified")) attributes_query = " AND ".join(['%s:%s' % (k, v) for (k, v) in attributes.items()]) query = 'resource_type:%s' % resource_type + ( " AND %s" % attributes_query if attributes_query else "") url = self._add_pagination_parameters(self._get_search_url() % query, cursor, page_size) # Retry the search in case of error @utils.retry_upon_exception(exceptions.NsxSearchError, max_attempts=self.client.max_attempts) def do_search(url): return self.client.url_get(url) return do_search(url) def search_all_by_tags(self, tags, resource_type=None, **extra_attrs): """Return all the results searched based on tags.""" results = [] cursor = 0 while True: response = self.search_by_tags( resource_type=resource_type, tags=tags, cursor=cursor, **extra_attrs) if not response['results']: return results results.extend(response['results']) cursor = int(response['cursor']) result_count = int(response['result_count']) if cursor >= result_count: return results def search_all_resource_by_attributes(self, resource_type, **attributes): """Return all the results searched based on attributes.""" results = [] cursor = 0 while True: response = self.search_resource_by_attributes( resource_type=resource_type, cursor=cursor, **attributes) if not response['results']: return results results.extend(response['results']) cursor = int(response['cursor']) result_count = int(response['result_count']) if cursor >= result_count: return results def get_id_by_resource_and_tag(self, resource_type, scope, tag, alert_not_found=False, alert_multiple=False): """Search a resource type by 1 scope&tag. Return the id of the result only if it is single. """ query_tags = [{'scope': utils.escape_tag_data(scope), 'tag': utils.escape_tag_data(tag)}] query_result = self.search_by_tags( tags=query_tags, resource_type=resource_type) if not query_result['result_count']: if alert_not_found: msg = _("No %(type)s found for tag '%(scope)s:%(tag)s'") % { 'type': resource_type, 'scope': scope, 'tag': tag} LOG.warning(msg) raise exceptions.ResourceNotFound( manager=self.nsxlib_config.nsx_api_managers, operation=msg) elif query_result['result_count'] == 1: return query_result['results'][0]['id'] else: # multiple results if alert_multiple: msg = _("Multiple %(type)s found for tag '%(scope)s:" "%(tag)s'") % { 'type': resource_type, 'scope': scope, 'tag': tag} LOG.warning(msg) raise exceptions.ManagerError( manager=self.nsxlib_config.nsx_api_managers, operation=msg, details='') def _build_tag_query(self, tag): # Validate that the correct keys are used if set(tag.keys()) - set(('scope', 'tag')): reason = _("Only 'scope' and 'tag' keys are supported") raise exceptions.NsxSearchInvalidQuery(reason=reason) _scope = tag.get('scope') _tag = tag.get('tag') if _scope and _tag: return 'tags.scope:%s AND tags.tag:%s' % (_scope, _tag) elif _scope: return 'tags.scope:%s' % _scope else: return 'tags.tag:%s' % _tag def _build_query(self, tags): return " AND ".join([self._build_tag_query(item) for item in tags]) def get_tag_limits(self): try: result = self.client.url_get('spec/vmware/types/Tag') scope_length = result['properties']['scope'].get( 'maxLength', utils.MAX_RESOURCE_TYPE_LEN) tag_length = result['properties']['tag'].get( 'maxLength', utils.MAX_TAG_LEN) except Exception as e: LOG.error("Unable to read tag limits. Reason: %s", e) scope_length = utils.MAX_RESOURCE_TYPE_LEN tag_length = utils.MAX_TAG_LEN try: result = self.client.url_get('spec/vmware/types/ManagedResource') max_tags = result['properties']['tags']['maxItems'] except Exception as e: LOG.error("Unable to read maximum tags. Reason: %s", e) max_tags = utils.MAX_TAGS return utils.TagLimits(scope_length, tag_length, max_tags) vmware-nsxlib-15.0.6/vmware_nsxlib/v3/client.py0000664000175000017500000003411413623151571021472 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import re import time from oslo_log import log from oslo_serialization import jsonutils import requests import six.moves.urllib.parse as urlparse from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) NULL_CURSOR_PREFIX = '0000' def http_error_to_exception(status_code, error_code): errors = { requests.codes.NOT_FOUND: {'202': exceptions.BackendResourceNotFound, 'default': exceptions.ResourceNotFound}, requests.codes.BAD_REQUEST: {'60508': exceptions.NsxIndexingInProgress, '60514': exceptions.NsxSearchTimeout, '60515': exceptions.NsxSearchOutOfSync, '8327': exceptions.NsxOverlapVlan, '500045': exceptions.NsxPendingDelete, '500030': exceptions.ResourceInUse, '500105': exceptions.NsxOverlapAddresses, '503040': exceptions.NsxSegemntWithVM}, requests.codes.CONFLICT: exceptions.StaleRevision, requests.codes.PRECONDITION_FAILED: exceptions.StaleRevision, requests.codes.INTERNAL_SERVER_ERROR: {'98': exceptions.CannotConnectToServer, '99': exceptions.ClientCertificateNotTrusted, '607': exceptions.APITransactionAborted}, requests.codes.FORBIDDEN: {'98': exceptions.BadXSRFToken}, requests.codes.TOO_MANY_REQUESTS: exceptions.TooManyRequests, requests.codes.SERVICE_UNAVAILABLE: exceptions.ServiceUnavailable} if status_code in errors: if isinstance(errors[status_code], dict): # choose based on error code if error_code and str(error_code) in errors[status_code]: return errors[status_code][str(error_code)] elif 'default' in errors[status_code]: return errors[status_code]['default'] else: return errors[status_code] # default exception return exceptions.ManagerError class RESTClient(object): _VERB_RESP_CODES = { 'get': [requests.codes.ok], 'post': [requests.codes.created, requests.codes.ok], 'put': [requests.codes.created, requests.codes.ok], 'patch': [requests.codes.created, requests.codes.ok], 'delete': [requests.codes.ok] } def __init__(self, connection, url_prefix=None, default_headers=None, client_obj=None): self._conn = connection self._url_prefix = url_prefix or "" self._default_headers = default_headers or {} def new_client_for(self, *uri_segments): uri = self._build_url('/'.join(uri_segments)) return self.__class__( self._conn, url_prefix=uri, default_headers=self._default_headers, client_obj=self) def list(self, resource='', headers=None, silent=False): return self.url_list(resource, headers=headers, silent=silent) def get(self, uuid, headers=None, silent=False, with_retries=True): return self.url_get(uuid, headers=headers, silent=silent, with_retries=with_retries) def delete(self, uuid, headers=None, expected_results=None): return self.url_delete(uuid, headers=headers, expected_results=expected_results) def update(self, uuid, body=None, headers=None, expected_results=None): return self.url_put(uuid, body, headers=headers, expected_results=expected_results) def create(self, resource='', body=None, headers=None, expected_results=None): return self.url_post(resource, body, headers=headers, expected_results=expected_results) def patch(self, resource='', body=None, headers=None): return self.url_patch(resource, body, headers=headers) def url_list(self, url, headers=None, silent=False): concatenate_response = self.url_get(url, headers=headers, silent=silent) cursor = concatenate_response.get('cursor', NULL_CURSOR_PREFIX) op = '&' if urlparse.urlparse(url).query else '?' url += op + 'cursor=' while cursor and not cursor.startswith(NULL_CURSOR_PREFIX): page = self.url_get(url + cursor, headers=headers, silent=silent) concatenate_response['results'].extend(page.get('results', [])) cursor = page.get('cursor', NULL_CURSOR_PREFIX) return concatenate_response def url_get(self, url, headers=None, silent=False, with_retries=True): return self._rest_call(url, method='GET', headers=headers, silent=silent, with_retries=with_retries) def url_delete(self, url, headers=None, expected_results=None): return self._rest_call(url, method='DELETE', headers=headers, expected_results=expected_results) def url_put(self, url, body, headers=None, expected_results=None): return self._rest_call(url, method='PUT', body=body, headers=headers, expected_results=expected_results) def url_post(self, url, body, headers=None, expected_results=None): return self._rest_call(url, method='POST', body=body, headers=headers, expected_results=expected_results) def url_patch(self, url, body, headers=None): return self._rest_call(url, method='PATCH', body=body, headers=headers) def _raise_error(self, status_code, operation, result_msg, error_code=None, related_error_codes=None): error = http_error_to_exception(status_code, error_code) raise error(manager='', operation=operation, details=result_msg, error_code=error_code, related_error_codes=related_error_codes, status_code=status_code) def _validate_result(self, result, expected, operation, silent=False): if result.status_code not in expected: result_msg = result.json() if result.content else '' if not silent: LOG.warning("The HTTP request returned error code " "%(result)s, whereas %(expected)s response " "codes were expected. Response body %(body)s", {'result': result.status_code, 'expected': '/'.join([str(code) for code in expected]), 'body': result_msg}) error_code = None related_error_codes = [] if isinstance(result_msg, dict) and 'error_message' in result_msg: error_code = result_msg.get('error_code') related_errors = [error['error_message'] for error in result_msg.get('related_errors', [])] related_error_codes = [str(error['error_code']) for error in result_msg.get('related_errors', []) if error.get('error_code')] result_msg = result_msg['error_message'] if related_errors: result_msg += " relatedErrors: %s" % ' '.join( related_errors) self._raise_error(result.status_code, operation, result_msg, error_code=error_code, related_error_codes=related_error_codes) @classmethod def merge_headers(cls, *headers): merged = {} for header in headers: if header: merged.update(header) return merged def _build_url(self, uri): prefix = urlparse.urlparse(self._url_prefix) uri = ("/%s/%s" % (prefix.path, uri)).replace('//', '/').strip('/') if prefix.netloc: uri = "%s/%s" % (prefix.netloc, uri) if prefix.scheme: uri = "%s://%s" % (prefix.scheme, uri) return uri def _mask_password(self, json): '''Mask password value in json format''' if not json: return json pattern = r'\"password\": [^,}]*' return re.sub(pattern, '"password": "********"', json) def _rest_call(self, url, method='GET', body=None, headers=None, silent=False, expected_results=None, **kwargs): request_headers = headers.copy() if headers else {} request_headers.update(self._default_headers) if utils.INJECT_HEADERS_CALLBACK: inject_headers = utils.INJECT_HEADERS_CALLBACK() request_headers.update(inject_headers) request_url = self._build_url(url) do_request = getattr(self._conn, method.lower()) if not silent: LOG.debug("REST call: %s %s. Headers: %s. Body: %s", method, request_url, utils.censor_headers(request_headers), self._mask_password(body)) ts = time.time() result = do_request( request_url, data=body, headers=request_headers) te = time.time() if not silent: LOG.debug("REST call: %s %s. Response: %s. Took %2.4f", method, request_url, result.json() if result.content else '', te - ts) if not expected_results: expected_results = RESTClient._VERB_RESP_CODES[method.lower()] self._validate_result( result, expected_results, _("%(verb)s %(url)s") % {'verb': method, 'url': request_url}, silent=silent) return result class JSONRESTClient(RESTClient): _DEFAULT_HEADERS = { 'Accept': 'application/json', 'Content-Type': 'application/json' } def __init__(self, connection, url_prefix=None, default_headers=None, client_obj=None): super(JSONRESTClient, self).__init__( connection, url_prefix=url_prefix, default_headers=RESTClient.merge_headers( JSONRESTClient._DEFAULT_HEADERS, default_headers), client_obj=None) def _rest_call(self, *args, **kwargs): if kwargs.get('body') is not None: kwargs['body'] = jsonutils.dumps(kwargs['body'], sort_keys=True) result = super(JSONRESTClient, self)._rest_call(*args, **kwargs) return result.json() if result.content else result class NSX3Client(JSONRESTClient): NSX_V1_API_PREFIX = 'api/v1/' NSX_POLICY_V1_API_PREFIX = 'policy/api/v1/' def __init__(self, connection, url_prefix=None, default_headers=None, nsx_api_managers=None, max_attempts=utils.DEFAULT_MAX_ATTEMPTS, rate_limit_retry=True, client_obj=None, url_path_base=NSX_V1_API_PREFIX): # If the client obj is defined - copy configuration from it if client_obj: self.nsx_api_managers = client_obj.nsx_api_managers or [] self.max_attempts = client_obj.max_attempts self.rate_limit_retry = client_obj.rate_limit_retry else: self.nsx_api_managers = nsx_api_managers or [] self.max_attempts = max_attempts self.rate_limit_retry = rate_limit_retry url_prefix = url_prefix or url_path_base if url_prefix and url_path_base not in url_prefix: if url_prefix.startswith('http'): url_prefix += '/' + url_path_base else: url_prefix = "%s/%s" % (url_path_base, url_prefix or '') self.max_attempts = max_attempts super(NSX3Client, self).__init__( connection, url_prefix=url_prefix, default_headers=default_headers, client_obj=client_obj) def _raise_error(self, status_code, operation, result_msg, error_code=None, related_error_codes=None): """Override the Rest client errors to add the manager IPs""" error = http_error_to_exception(status_code, error_code) raise error(manager=self.nsx_api_managers, operation=operation, details=result_msg, error_code=error_code, related_error_codes=related_error_codes, status_code=status_code) def _rest_call(self, url, **kwargs): if kwargs.get('with_retries', True): # Retry on "607: Persistence layer is currently reconfiguring" # and on "98: Cannot connect to server" retry_codes = [exceptions.APITransactionAborted, exceptions.CannotConnectToServer] if self.rate_limit_retry: # If too many requests are handled by the nsx at the same time, # error "429: Too Many Requests" or "503: Server Unavailable" # will be returned. retry_codes.append(exceptions.ServerBusy) # the client is expected to retry after a random 400-600 milli, # and later exponentially until 5 seconds wait @utils.retry_random_upon_exception( tuple(retry_codes), max_attempts=self.max_attempts) def _rest_call_with_retry(self, url, **kwargs): return super(NSX3Client, self)._rest_call(url, **kwargs) return _rest_call_with_retry(self, url, **kwargs) else: return super(NSX3Client, self)._rest_call(url, **kwargs) vmware-nsxlib-15.0.6/vmware_nsxlib/v3/router.py0000664000175000017500000003324113623151571021534 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NSX-V3 Plugin router module """ import copy from oslo_log import log from vmware_nsxlib._i18n import _ from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) MIN_EDGE_NODE_NUM = 1 TIER0_ROUTER_LINK_PORT_NAME = "TIER0-RouterLinkPort" TIER1_ROUTER_LINK_PORT_NAME = "TIER1-RouterLinkPort" ROUTER_INTF_PORT_NAME = "Tier1-RouterDownLinkPort" FIP_NAT_PRI = 900 GW_NAT_PRI = 1000 class RouterLib(object): def __init__(self, router_client, router_port_client, nsxlib): self._router_client = router_client self._router_port_client = router_port_client self.nsxlib = nsxlib def validate_tier0(self, tier0_groups_dict, tier0_uuid): err_msg = None if not tier0_uuid: err_msg = _("validate_tier0 should be called with tier0 uuid") raise exceptions.NsxLibInvalidInput(error_message=err_msg) try: lrouter = self._router_client.get(tier0_uuid) except exceptions.ResourceNotFound: err_msg = (_("Tier0 router %s not found at the backend. Either a " "valid UUID must be specified or a default tier0 " "router UUID must be configured in nsx.ini") % tier0_uuid) else: edge_cluster_uuid = lrouter.get('edge_cluster_id') if not edge_cluster_uuid: err_msg = _("Failed to get edge cluster uuid from tier0 " "router %s at the backend") % lrouter else: edge_cluster = self.nsxlib.edge_cluster.get(edge_cluster_uuid) member_index_list = [member['member_index'] for member in edge_cluster['members']] if len(member_index_list) < MIN_EDGE_NODE_NUM: err_msg = _("%(act_num)s edge members found in " "edge_cluster %(cluster_id)s, however we " "require at least %(exp_num)s edge nodes " "in edge cluster for use") % { 'act_num': len(member_index_list), 'exp_num': MIN_EDGE_NODE_NUM, 'cluster_id': edge_cluster_uuid} if err_msg: raise exceptions.NsxLibInvalidInput(error_message=err_msg) else: tier0_groups_dict[tier0_uuid] = { 'edge_cluster_uuid': edge_cluster_uuid, 'member_index_list': member_index_list} def add_router_link_port(self, tier1_uuid, tier0_uuid, tags): # Create Tier0 logical router link port t0_tags = copy.copy(tags) t0_tags = utils.add_v3_tag(t0_tags, 'os-tier0-uuid', tier0_uuid) tier0_link_port = self._router_port_client.create( tier0_uuid, display_name=TIER0_ROUTER_LINK_PORT_NAME, tags=t0_tags, resource_type=nsx_constants.LROUTERPORT_LINKONTIER0, logical_port_id=None, address_groups=None) linked_logical_port_id = tier0_link_port['id'] # Create Tier1 logical router link port t1_tags = copy.copy(tags) t1_tags = utils.add_v3_tag(t1_tags, 'os-tier1-uuid', tier1_uuid) tier1_link_port = self._router_port_client.create( tier1_uuid, display_name=TIER1_ROUTER_LINK_PORT_NAME, tags=t1_tags, resource_type=nsx_constants.LROUTERPORT_LINKONTIER1, logical_port_id=linked_logical_port_id, address_groups=None) return tier0_link_port, tier1_link_port def remove_router_link_port(self, tier1_uuid, tier0_uuid=None): # Note(asarfaty): tier0_uuid is not used by this method and can # be removed. try: tier1_link_port = ( self._router_port_client.get_tier1_link_port(tier1_uuid)) except exceptions.ResourceNotFound: LOG.warning("Logical router link port for tier1 router: %s " "not found at the backend", tier1_uuid) return tier1_link_port_id = tier1_link_port['id'] tier0_link_port_id = ( tier1_link_port['linked_logical_router_port_id'].get('target_id')) self._router_port_client.delete(tier1_link_port_id) self._router_port_client.delete(tier0_link_port_id) def add_centralized_service_port( self, logical_router_id, display_name=None, tags=None, logical_port_id=None, address_groups=None): return self._router_port_client.create( logical_router_id, display_name=display_name, tags=tags, logical_port_id=logical_port_id, address_groups=address_groups, resource_type=nsx_constants.LROUTERPORT_CENTRALIZED) def update_advertisement(self, logical_router_id, advertise_route_nat, advertise_route_connected, advertise_route_static=False, enabled=True, advertise_lb_vip=False, advertise_lb_snat_ip=False): return self.nsxlib.logical_router.update_advertisement( logical_router_id, advertise_nat_routes=advertise_route_nat, advertise_nsx_connected_routes=advertise_route_connected, advertise_static_routes=advertise_route_static, enabled=enabled, advertise_lb_vip=advertise_lb_vip, advertise_lb_snat_ip=advertise_lb_snat_ip) def delete_gw_snat_rule(self, logical_router_id, gw_ip): """Delete router snat rule matching the gw ip assuming there is only one """ return self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, translated_network=gw_ip) def delete_gw_snat_rule_by_source(self, logical_router_id, gw_ip, source_net, skip_not_found=False): """Delete router snat rule matching the gw ip & source""" return self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, translated_network=gw_ip, match_source_network=source_net, # Do not fail or warn if not found, unless asked for skip_not_found=skip_not_found, strict_mode=(not skip_not_found)) def delete_gw_snat_rules(self, logical_router_id, gw_ip): """Delete all the snat rules on the router with a specific gw ip""" return self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, translated_network=gw_ip, # Do not fail or warn if not found skip_not_found=True, strict_mode=False) def add_gw_snat_rule(self, logical_router_id, gw_ip, source_net=None, bypass_firewall=True, tags=None, display_name=None): return self.nsxlib.logical_router.add_nat_rule( logical_router_id, action="SNAT", translated_network=gw_ip, source_net=source_net, rule_priority=GW_NAT_PRI, bypass_firewall=bypass_firewall, tags=tags, display_name=display_name) def update_router_edge_cluster(self, nsx_router_id, edge_cluster_uuid): return self._router_client.update(nsx_router_id, edge_cluster_id=edge_cluster_uuid) def update_router_transport_zone(self, nsx_router_id, transport_zone_id): return self._router_client.update(nsx_router_id, transport_zone_id=transport_zone_id) def create_logical_router_intf_port_by_ls_id(self, logical_router_id, display_name, tags, ls_id, logical_switch_port_id, address_groups, urpf_mode=None, relay_service_uuid=None, resource_type=None): try: port = self._router_port_client.get_by_lswitch_id(ls_id) except exceptions.ResourceNotFound: if resource_type is None: resource_type = nsx_constants.LROUTERPORT_DOWNLINK return self._router_port_client.create( logical_router_id, display_name, tags, resource_type, logical_switch_port_id, address_groups, urpf_mode=urpf_mode, relay_service_uuid=relay_service_uuid) else: return self._router_port_client.update( port['id'], subnets=address_groups, relay_service_uuid=relay_service_uuid) def add_fip_nat_rules(self, logical_router_id, ext_ip, int_ip, match_ports=None, bypass_firewall=True, tags=None, display_name=None): self.nsxlib.logical_router.add_nat_rule( logical_router_id, action="SNAT", translated_network=ext_ip, source_net=int_ip, rule_priority=FIP_NAT_PRI, bypass_firewall=bypass_firewall, tags=tags, display_name=display_name) self.nsxlib.logical_router.add_nat_rule( logical_router_id, action="DNAT", translated_network=int_ip, dest_net=ext_ip, rule_priority=FIP_NAT_PRI, match_ports=match_ports, bypass_firewall=bypass_firewall, tags=tags, display_name=display_name) def delete_fip_nat_rules_by_internal_ip(self, logical_router_id, int_ip): self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, action="SNAT", match_source_network=int_ip) self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, action="DNAT", translated_network=int_ip) def delete_fip_nat_rules(self, logical_router_id, ext_ip, int_ip): self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, action="SNAT", translated_network=ext_ip, match_source_network=int_ip) self.nsxlib.logical_router.delete_nat_rule_by_values( logical_router_id, action="DNAT", translated_network=int_ip, match_destination_network=ext_ip) def add_static_routes(self, nsx_router_id, route): return self.nsxlib.logical_router.add_static_route( nsx_router_id, route['destination'], route['nexthop']) def change_edge_firewall_status(self, nsx_router_id, action=nsx_constants.FW_DISABLE): return self.nsxlib.logical_router.change_edge_firewall_status( nsx_router_id, action) def delete_static_routes(self, nsx_router_id, route): return self.nsxlib.logical_router.delete_static_route_by_values( nsx_router_id, dest_cidr=route['destination'], nexthop=route['nexthop']) def has_service_router(self, nsx_router_id): lrouter = self._router_client.get(nsx_router_id) if lrouter and lrouter.get('edge_cluster_id'): return True return False def get_tier0_router_tz(self, tier0_uuid): lrouter = self._router_client.get(tier0_uuid) edge_cluster_uuid = lrouter.get('edge_cluster_id') if not edge_cluster_uuid: return [] tier0_transport_nodes = self.nsxlib.edge_cluster.get_transport_nodes( edge_cluster_uuid) tier0_tzs = [] for tn_uuid in tier0_transport_nodes: tier0_tzs.extend(self.nsxlib.transport_node.get_transport_zones( tn_uuid)) return tier0_tzs def get_tier0_router_overlay_tz(self, tier0_uuid): lrouter = self._router_client.get(tier0_uuid) tz_uuid = lrouter.get('advanced_config', {}).get('transport_zone_id') if tz_uuid: return tz_uuid tz_uuids = self.get_tier0_router_tz(tier0_uuid) for tz_uuid in tz_uuids: # look for the overlay tz backend_type = self.nsxlib.transport_zone.get_transport_type( tz_uuid) if (backend_type == self.nsxlib.transport_zone.TRANSPORT_TYPE_OVERLAY): return tz_uuid def get_connected_t0_transit_net(self, tier1_uuid): """Return the IP of the tier1->tier0 link port return None if the router is not connected to a tier0 router """ try: tier1_link_port = ( self._router_port_client.get_tier1_link_port(tier1_uuid)) except exceptions.ResourceNotFound: # No GW return for subnet in tier1_link_port.get('subnets', []): for ip_address in subnet.get('ip_addresses'): # Expecting only 1 ip here. Return it. return ip_address vmware-nsxlib-15.0.6/vmware_nsxlib/v3/security.py0000664000175000017500000006720313623151571022070 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NSX-V3 Plugin security & Distributed Firewall integration module """ from distutils import version from oslo_log import log from oslo_log import versionutils from oslo_utils import excutils from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants as consts from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) PORT_SG_SCOPE = 'os-security-group' class NsxLibNsGroup(utils.NsxLibApiBase): def __init__(self, client, nsxlib_config, firewall_section_handler, nsxlib=None): self.firewall_section = firewall_section_handler super(NsxLibNsGroup, self).__init__(client, nsxlib_config, nsxlib=nsxlib) @property def uri_segment(self): return 'ns-groups' @property def resource_type(self): return 'NSGroup' def update_nsgroup_and_section(self, security_group, nsgroup_id, section_id, log_sg_allowed_traffic): name = self.get_name(security_group) description = security_group['description'] logging = (log_sg_allowed_traffic or security_group.get(consts.LOGGING, False)) rules = self.firewall_section._process_rules_logging_for_update( section_id, logging) self.update(nsgroup_id, name, description) self.firewall_section.update(section_id, name, description, rules=rules) def update_on_backend(self, context, security_group, nsgroup_id, section_id, log_sg_allowed_traffic): # This api is deprecated because of the irrelevant context arg versionutils.report_deprecated_feature( LOG, 'security.NsxLibNsGroup.update_on_backend is deprecated. ' 'Please use security.NsxLibNsGroup.update_nsgroup_and_section ' 'instead.') return self.update_nsgroup_and_section(security_group, nsgroup_id, section_id, log_sg_allowed_traffic) def get_name(self, security_group): # NOTE(roeyc): We add the security-group id to the NSGroup name, # for usability purposes. return '%(name)s - %(id)s' % security_group def get_lport_tags(self, secgroups): if len(secgroups) > utils.MAX_NSGROUPS_CRITERIA_TAGS: raise exceptions.NumberOfNsgroupCriteriaTagsReached( max_num=utils.MAX_NSGROUPS_CRITERIA_TAGS) tags = [] for sg in secgroups: tags = utils.add_v3_tag(tags, PORT_SG_SCOPE, sg) if not tags: # This port shouldn't be associated with any security-group tags = [{'scope': PORT_SG_SCOPE, 'tag': None}] return tags def update_lport_nsgroups(self, lport_id, original_nsgroups, updated_nsgroups): """Update the NSgroups that the logical ports belongs to""" added = set(updated_nsgroups) - set(original_nsgroups) removed = set(original_nsgroups) - set(updated_nsgroups) for nsgroup_id in added: try: self.add_members( nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT, [lport_id]) except exceptions.NSGroupIsFull: for nsgroup_id in added: # NOTE(roeyc): If the port was not added to the nsgroup # yet, then this request will silently fail. self.remove_member( nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT, lport_id) raise exceptions.SecurityGroupMaximumCapacityReached( sg_id=nsgroup_id) except exceptions.ResourceNotFound: with excutils.save_and_reraise_exception(): LOG.error("NSGroup %s doesn't exists", nsgroup_id) for nsgroup_id in removed: self.remove_member( nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT, lport_id) def update_lport(self, context, lport_id, original, updated): # This api is deprecated because of the irrelevant context arg versionutils.report_deprecated_feature( LOG, 'security.NsxLibNsGroup.update_lport is deprecated. ' 'Please use security.NsxLibNsGroup.update_lport_nsgroups instead.') return self.update_lport_nsgroups(lport_id, original, updated) def get_nsservice(self, resource_type, **properties): service = {'resource_type': resource_type} service.update(properties) return {'service': service} def get_nsgroup_complex_expression(self, expressions): return {'resource_type': consts.NSGROUP_COMPLEX_EXP, 'expressions': expressions} def get_switch_tag_expression(self, scope, tag): return {'resource_type': consts.NSGROUP_TAG_EXP, 'target_type': consts.TARGET_TYPE_LOGICAL_SWITCH, 'scope': scope, 'tag': tag} def get_port_tag_expression(self, scope, tag): return {'resource_type': consts.NSGROUP_TAG_EXP, 'target_type': consts.TARGET_TYPE_LOGICAL_PORT, 'scope': scope, 'tag': tag} def create(self, display_name, description, tags, membership_criteria=None, members=None): body = {'display_name': display_name, 'description': description, 'tags': tags, 'members': [] if members is None else members} if membership_criteria: # Allow caller to pass a list of membership criterias. # The 'else' block is maintained for backwards compatibility # where in a caller might only send a single membership criteria. if isinstance(membership_criteria, list): body.update({'membership_criteria': membership_criteria}) else: body.update({'membership_criteria': [membership_criteria]}) return self.client.create(self.get_path(), body) def list(self): return self.client.list( '%s?populate_references=false' % self.get_path()).get( 'results', []) def update(self, nsgroup_id, display_name=None, description=None, membership_criteria=None, members=None, tags_update=None): nsgroup = {} if display_name is not None: nsgroup['display_name'] = display_name if description is not None: nsgroup['description'] = description if members is not None: nsgroup['members'] = members if membership_criteria is not None: if isinstance(membership_criteria, list): nsgroup['membership_criteria'] = membership_criteria else: nsgroup['membership_criteria'] = [membership_criteria] if tags_update is not None: nsgroup['tags_update'] = tags_update return self._update_resource( self.get_path(nsgroup_id), nsgroup, get_params='?populate_references=true', retry=True) def get_member_expression(self, target_type, target_id): return { 'resource_type': consts.NSGROUP_SIMPLE_EXP, 'target_property': 'id', 'target_type': target_type, 'op': consts.EQUALS, 'value': target_id} def _update_with_members(self, nsgroup_id, members, action): members_update = '%s?action=%s' % (self.get_path(nsgroup_id), action) return self.client.create(members_update, members) def add_members(self, nsgroup_id, target_type, target_ids): members = [] for target_id in target_ids: member_expr = self.get_member_expression( target_type, target_id) members.append(member_expr) members = {'members': members} try: return self._update_with_members( nsgroup_id, members, consts.NSGROUP_ADD_MEMBERS) except (exceptions.StaleRevision, exceptions.ResourceNotFound): raise except exceptions.ManagerError: # REVISIT(roeyc): A ManagerError might have been raised for a # different reason, e.g - NSGroup does not exists. LOG.warning("Failed to add %(target_type)s resources " "(%(target_ids)s) to NSGroup %(nsgroup_id)s", {'target_type': target_type, 'target_ids': target_ids, 'nsgroup_id': nsgroup_id}) raise exceptions.NSGroupIsFull(nsgroup_id=nsgroup_id) def remove_member(self, nsgroup_id, target_type, target_id, verify=False): member_expr = self.get_member_expression( target_type, target_id) members = {'members': [member_expr]} try: return self._update_with_members( nsgroup_id, members, consts.NSGROUP_REMOVE_MEMBERS) except exceptions.ManagerError: if verify: raise exceptions.NSGroupMemberNotFound(member_id=target_id, nsgroup_id=nsgroup_id) def read(self, nsgroup_id): return self.client.get( '%s?populate_references=true' % self.get_path(nsgroup_id)) def delete(self, nsgroup_id): try: return self.client.delete( '%s?force=true' % self.get_path(nsgroup_id)) # FIXME(roeyc): Should only except NotFound error. except Exception: LOG.debug("NSGroup %s does not exists for delete request.", nsgroup_id) def find_by_display_name(self, display_name): found = [] for resource in self.list(): if resource['display_name'] == display_name: found.append(resource) return found class NsxLibFirewallSection(utils.NsxLibApiBase): @property def uri_segment(self): return 'firewall/sections' @property def resource_type(self): return 'FirewallSection' def add_member_to_fw_exclude_list(self, target_id, target_type): resource = 'firewall/excludelist?action=add_member' body = {"target_id": target_id, "target_type": target_type} self._create_with_retry(resource, body) def remove_member_from_fw_exclude_list(self, target_id, target_type): resource = ('firewall/excludelist?action=remove_member&object_id=' + target_id) self._create_with_retry(resource) def get_excludelist(self): return self.client.list('firewall/excludelist') def _get_direction(self, sg_rule): return ( consts.IN if sg_rule['direction'] == 'ingress' else consts.OUT ) def get_nsservice(self, resource_type, **properties): service = {'resource_type': resource_type} service.update(properties) return {'service': service} def _decide_service(self, sg_rule): l4_protocol = utils.get_l4_protocol_name(sg_rule['protocol']) if l4_protocol in [consts.TCP, consts.UDP]: # If port_range_min is not specified then we assume all ports are # matched, relying on neutron to perform validation. if sg_rule['port_range_min'] is None: destination_ports = [] elif sg_rule['port_range_min'] != sg_rule['port_range_max']: # NSX API requires a non-empty range (e.g - '22-23') destination_ports = ['%(port_range_min)s-%(port_range_max)s' % sg_rule] else: destination_ports = ['%(port_range_min)s' % sg_rule] return self.get_nsservice( consts.L4_PORT_SET_NSSERVICE, l4_protocol=l4_protocol, source_ports=[], destination_ports=destination_ports) elif l4_protocol == consts.ICMPV4: # Validate the icmp type & code icmp_type = sg_rule['port_range_min'] icmp_code = sg_rule['port_range_max'] icmp_strict = self.nsxlib.feature_supported( consts.FEATURE_ICMP_STRICT) utils.validate_icmp_params(icmp_type, icmp_code, icmp_version=4, strict=icmp_strict) return self.get_nsservice( consts.ICMP_TYPE_NSSERVICE, protocol=l4_protocol, icmp_type=icmp_type, icmp_code=icmp_code) elif l4_protocol is not None: return self.get_nsservice( consts.IP_PROTOCOL_NSSERVICE, protocol_number=l4_protocol) def _build(self, display_name, description, applied_tos, tags): return {'display_name': display_name, 'description': description, 'stateful': True, 'section_type': consts.FW_SECTION_LAYER3, 'applied_tos': [self.get_nsgroup_reference(t_id) for t_id in applied_tos], 'tags': tags} def create_empty(self, display_name, description, applied_tos, tags, operation=consts.FW_INSERT_BOTTOM, other_section=None): resource = '%s?operation=%s' % (self.uri_segment, operation) body = self._build(display_name, description, applied_tos, tags) if other_section: resource += '&id=%s' % other_section return self._create_with_retry(resource, body) def create_with_rules(self, display_name, description, applied_tos=None, tags=None, operation=consts.FW_INSERT_BOTTOM, other_section=None, rules=None): resource = '%s?operation=%s' % (self.uri_segment, operation) body = { 'display_name': display_name, 'description': description, 'stateful': True, 'section_type': consts.FW_SECTION_LAYER3, 'applied_tos': applied_tos or [], 'tags': tags or [] } if rules is not None: resource += '&action=create_with_rules' body['rules'] = rules if other_section: resource += '&id=%s' % other_section return self._create_with_retry(resource, body) def update(self, section_id, display_name=None, description=None, applied_tos=None, rules=None, tags_update=None, force=False): resource = self.get_path(section_id) params = None section = {} if rules is not None: params = '?action=update_with_rules' section['rules'] = rules if display_name is not None: section['display_name'] = display_name if description is not None: section['description'] = description if applied_tos is not None: section['applied_tos'] = [self.get_nsgroup_reference(nsg_id) for nsg_id in applied_tos] if tags_update is not None: section['tags_update'] = tags_update headers = None if force: # shared sections (like default section) can serve multiple # openstack deployments. If some operate under protected # identities, force-overwrite is needed. # REVISIT(annak): find better solution for shared sections headers = {'X-Allow-Overwrite': 'true'} if rules is not None: return self._update_resource(resource, section, headers=headers, create_action=True, action_params=params, retry=True) elif any(p is not None for p in (display_name, description, applied_tos, tags_update)): return self._update_resource(resource, section, headers=headers, action_params=params, retry=True) def list(self): return self.client.list(self.get_path()).get('results', []) def delete(self, section_id): resource = '%s?cascade=true' % section_id return self._delete_with_retry(resource) def get_nsgroup_reference(self, nsgroup_id): return {'target_id': nsgroup_id, 'target_type': consts.NSGROUP} def get_logicalport_reference(self, port_id): return {'target_id': port_id, 'target_type': consts.TARGET_TYPE_LOGICAL_PORT} def get_ip_cidr_reference(self, ip_cidr_block, ip_protocol): target_type = (consts.TARGET_TYPE_IPV4ADDRESS if ip_protocol == consts.IPV4 else consts.TARGET_TYPE_IPV6ADDRESS) return {'target_id': ip_cidr_block, 'target_type': target_type} def get_rule_address(self, target_id, display_name=None, is_valid=True, target_type=consts.TARGET_TYPE_IPV4ADDRESS): return {'target_display_name': display_name or '', 'target_id': target_id, 'is_valid': is_valid, 'target_type': target_type} def get_l4portset_nsservice(self, sources=None, destinations=None, protocol=consts.TCP): return { 'service': { 'resource_type': 'L4PortSetNSService', 'source_ports': sources or [], 'destination_ports': destinations or [], 'l4_protocol': protocol} } def get_rule_dict(self, display_name, sources=None, destinations=None, direction=consts.IN_OUT, ip_protocol=consts.IPV4_IPV6, services=None, action=consts.FW_ACTION_ALLOW, logged=False, disabled=False, applied_tos=None): rule_dict = {'display_name': display_name, 'direction': direction, 'ip_protocol': ip_protocol, 'action': action, 'logged': logged, 'disabled': disabled, 'sources': sources or [], 'destinations': destinations or [], 'services': services or []} if applied_tos is not None: rule_dict['applied_tos'] = applied_tos return rule_dict def add_rule(self, rule, section_id, operation=consts.FW_INSERT_BOTTOM): resource = '%s/rules' % self.get_path(section_id) params = '?operation=%s' % operation if (version.LooseVersion(self.nsxlib.get_version()) >= version.LooseVersion(consts.NSX_VERSION_2_4_0)): rule['_revision'] = self.get(section_id)['_revision'] return self._create_with_retry(resource + params, rule) def add_rules(self, rules, section_id, operation=consts.FW_INSERT_BOTTOM): resource = '%s/rules' % self.get_path(section_id) params = '?action=create_multiple&operation=%s' % operation if (version.LooseVersion(self.nsxlib.get_version()) >= version.LooseVersion(consts.NSX_VERSION_2_4_0)): rev_id = self.get(section_id)['_revision'] for rule in rules: rule['_revision'] = rev_id return self._create_with_retry(resource + params, {'rules': rules}) def delete_rule(self, section_id, rule_id): resource = '%s/rules/%s' % (section_id, rule_id) return self._delete_with_retry(resource) def get_rules(self, section_id): resource = '%s/rules' % self.get_path(section_id) return self.client.get(resource) def get_default_rule(self, section_id): rules = self.get_rules(section_id)['results'] last_rule = rules[-1] if last_rule['is_default']: return last_rule def _get_fw_rule_from_sg_rule(self, sg_rule, nsgroup_id, rmt_nsgroup_id, logged, action): # IPV4 or IPV6 ip_protocol = sg_rule['ethertype'].upper() direction = self._get_direction(sg_rule) if sg_rule.get(consts.LOCAL_IP_PREFIX): local_ip_prefix = self.get_ip_cidr_reference( sg_rule[consts.LOCAL_IP_PREFIX], ip_protocol) else: local_ip_prefix = None source = None local_group = self.get_nsgroup_reference(nsgroup_id) if sg_rule['remote_ip_prefix'] is not None: source = self.get_ip_cidr_reference( sg_rule['remote_ip_prefix'], ip_protocol) destination = local_ip_prefix or local_group else: if rmt_nsgroup_id: source = self.get_nsgroup_reference(rmt_nsgroup_id) destination = local_ip_prefix or local_group if direction == consts.OUT: source, destination = destination, source service = self._decide_service(sg_rule) name = sg_rule['id'] return self.get_rule_dict(name, [source] if source else None, [destination] if destination else None, direction, ip_protocol, [service] if service else None, action, logged) def create_section_rules(self, section_id, nsgroup_id, logging_enabled, action, security_group_rules, ruleid_2_remote_nsgroup_map): # 1. translate rules # 2. insert in section # 3. return the rules firewall_rules = [] for sg_rule in security_group_rules: remote_nsgroup_id = ruleid_2_remote_nsgroup_map[sg_rule['id']] fw_rule = self._get_fw_rule_from_sg_rule( sg_rule, nsgroup_id, remote_nsgroup_id, logging_enabled, action) firewall_rules.append(fw_rule) return self.add_rules(firewall_rules, section_id) def create_rules(self, context, section_id, nsgroup_id, logging_enabled, action, security_group_rules, ruleid_2_remote_nsgroup_map): # This api is deprecated because of the irrelevant context arg versionutils.report_deprecated_feature( LOG, 'security.NsxLibFirewallSection.create_rules is deprecated. ' 'Please use security.NsxLibFirewallSection.create_section_rules ' 'instead.') return self.create_section_rules( section_id, nsgroup_id, logging_enabled, action, security_group_rules, ruleid_2_remote_nsgroup_map) def set_rule_logging(self, section_id, logging): rules = self._process_rules_logging_for_update( section_id, logging) self.update(section_id, rules=rules) def _process_rules_logging_for_update(self, section_id, logging_enabled): rules = self.get_rules(section_id).get('results', []) update_rules = False for rule in rules: if rule['logged'] != logging_enabled: rule['logged'] = logging_enabled update_rules = True return rules if update_rules else None def init_default(self, name, description, nested_groups, log_sg_blocked_traffic): LOG.info("Initializing the default section named %s", name) fw_sections = self.list() for section in reversed(fw_sections): if section['display_name'] == name: LOG.info("Found existing default section %s", section['id']) break else: tags = self.build_v3_api_version_tag() section = self.create_empty( name, description, nested_groups, tags) LOG.info("Creating a new default section %s", section['id']) block_rule = self.get_rule_dict( 'Block All', action=consts.FW_ACTION_DROP, logged=log_sg_blocked_traffic) # TODO(roeyc): Add additional rules to allow IPV6 NDP. dhcp_client = self.get_nsservice( consts.L4_PORT_SET_NSSERVICE, l4_protocol=consts.UDP, source_ports=[67], destination_ports=[68]) dhcp_client_rule_in = self.get_rule_dict( 'DHCP Reply', direction=consts.IN, services=[dhcp_client]) dhcp_server = ( self.get_nsservice( consts.L4_PORT_SET_NSSERVICE, l4_protocol=consts.UDP, source_ports=[68], destination_ports=[67])) dhcp_client_rule_out = self.get_rule_dict( 'DHCP Request', direction=consts.OUT, services=[dhcp_server]) self.update(section['id'], name, section['description'], applied_tos=nested_groups, rules=[dhcp_client_rule_out, dhcp_client_rule_in, block_rule], force=True) return section['id'] class NsxLibIPSet(utils.NsxLibApiBase): @property def uri_segment(self): return 'ip-sets' @property def resource_type(self): return 'IPSet' def create(self, display_name, description=None, ip_addresses=None, tags=None): body = { 'display_name': display_name, 'description': description or '', 'ip_addresses': ip_addresses or [], 'tags': tags or [] } return self.client.create(self.get_path(), body) def update(self, ip_set_id, display_name=None, description=None, ip_addresses=None, tags_update=None, update_payload_cbk=None): # The update_payload_cbk function takes two arguments. # The first one is the result from the internal GET request. # The second one is a dict of user-provided attributes, # which can be changed inside the callback function and # used as the new payload for the following PUT request. # For example, users want to combine the new ip_addresses # passed to update() with the original ip_addresses retrieved # from the internal GET request instead of overriding the # original ip_addresses. ip_set = {} if tags_update: ip_set['tags_update'] = tags_update if display_name is not None: ip_set['display_name'] = display_name if description is not None: ip_set['description'] = description if ip_addresses is not None: ip_set['ip_addresses'] = ip_addresses return self._update_resource(self.get_path(ip_set_id), ip_set, retry=True, update_payload_cbk=update_payload_cbk) def read(self, ip_set_id): return self.client.get('ip-sets/%s' % ip_set_id) def delete(self, ip_set_id): self._delete_with_retry(ip_set_id) def get_ipset_reference(self, ip_set_id): return {'target_id': ip_set_id, 'target_type': consts.IP_SET} vmware-nsxlib-15.0.6/vmware_nsxlib/v3/cluster_management.py0000664000175000017500000000161513623151571024071 0ustar zuulzuul00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsxlib.v3 import utils BASE_SECTION = 'cluster' RESTORE_SECTION = BASE_SECTION + '/restore' class NsxLibClusterManagement(utils.NsxLibApiBase): def get_restore_status(self): resource = RESTORE_SECTION + '/status' return self.client.get(resource) vmware-nsxlib-15.0.6/vmware_nsxlib/version.py0000664000175000017500000000125413623151571021350 0ustar zuulzuul00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('vmware-nsxlib') vmware-nsxlib-15.0.6/vmware_nsxlib/tests/0000775000175000017500000000000013623151652020451 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib/tests/base.py0000664000175000017500000000143213623151571021735 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base class TestCase(base.BaseTestCase): """Test case base class for all unit tests.""" vmware-nsxlib-15.0.6/vmware_nsxlib/tests/__init__.py0000664000175000017500000000000013623151571022550 0ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/0000775000175000017500000000000013623151652021430 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/__init__.py0000664000175000017500000000000013623151571023527 0ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/0000775000175000017500000000000013623151652021760 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_vpn_ipsec.py0000664000175000017500000003451513623151571025367 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_serialization import jsonutils from vmware_nsxlib.tests.unit.v3 import test_client from vmware_nsxlib.tests.unit.v3 import test_constants from vmware_nsxlib.tests.unit.v3 import test_resources from vmware_nsxlib.v3 import vpn_ipsec class TestIkeProfile(test_resources.BaseTestResource): def setUp(self): super(TestIkeProfile, self).setUp( vpn_ipsec.IkeProfile) def test_ike_profile_create(self): mocked_resource = self.get_mocked_resource() name = 'ike_profile' description = 'desc' enc_alg = vpn_ipsec.EncryptionAlgorithmTypes.ENCRYPTION_ALGORITHM_128 dig_alg = vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA1 ike_ver = vpn_ipsec.IkeVersionTypes.IKE_VERSION_V1 dh_group = vpn_ipsec.DHGroupTypes.DH_GROUP_14 lifetime = 100 mocked_resource.create(name, description=description, encryption_algorithm=enc_alg, digest_algorithm=dig_alg, ike_version=ike_ver, dh_group=dh_group, sa_life_time=lifetime) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'description': description, 'encryption_algorithms': [enc_alg], 'digest_algorithms': [dig_alg], 'ike_version': ike_ver, 'dh_groups': [dh_group], 'sa_life_time': lifetime }, sort_keys=True), headers=self.default_headers()) class TestIPSecTunnelProfile(test_resources.BaseTestResource): def setUp(self): super(TestIPSecTunnelProfile, self).setUp( vpn_ipsec.IPSecTunnelProfile) def test_ipsec_profile_create(self): mocked_resource = self.get_mocked_resource() name = 'ipsec_profile' description = 'desc' enc_alg = vpn_ipsec.EncryptionAlgorithmTypes.ENCRYPTION_ALGORITHM_128 dig_alg = vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA1 dh_group = vpn_ipsec.DHGroupTypes.DH_GROUP_14 lifetime = 100 mocked_resource.create(name, description=description, encryption_algorithm=enc_alg, digest_algorithm=dig_alg, pfs=True, dh_group=dh_group, sa_life_time=lifetime) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'description': description, 'encryption_algorithms': [enc_alg], 'digest_algorithms': [dig_alg], 'enable_perfect_forward_secrecy': True, 'dh_groups': [dh_group], 'sa_life_time': lifetime }, sort_keys=True), headers=self.default_headers()) class TestIPSecDpdProfile(test_resources.BaseTestResource): def setUp(self): super(TestIPSecDpdProfile, self).setUp( vpn_ipsec.IPSecDpdProfile) def test_dpd_profile_create(self): mocked_resource = self.get_mocked_resource() name = 'dpd_profile' description = 'desc' timeout = 100 enabled = True mocked_resource.create(name, description=description, timeout=timeout, enabled=enabled) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'description': description, 'dpd_probe_interval': timeout, 'enabled': enabled }, sort_keys=True), headers=self.default_headers()) def test_dpd_profile_update(self): fake_dpd = test_constants.FAKE_DPD.copy() new_timeout = 1000 new_name = 'dpd_profile_updated' new_desc = 'desc updated' uuid = test_constants.FAKE_DPD_ID mocked_resource = self.get_mocked_resource(response=fake_dpd) mocked_resource.update(uuid, timeout=new_timeout, name=new_name, description=new_desc) fake_dpd['dpd_probe_interval'] = new_timeout fake_dpd['display_name'] = new_name fake_dpd['description'] = new_desc test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, uuid), data=jsonutils.dumps(fake_dpd, sort_keys=True), headers=self.default_headers()) class TestIPSecPeerEndpoint(test_resources.BaseTestResource): def setUp(self): super(TestIPSecPeerEndpoint, self).setUp( vpn_ipsec.IPSecPeerEndpoint) def test_peer_endpoint_create(self): mocked_resource = self.get_mocked_resource() name = 'peerep' description = 'desc' peer_address = peer_id = '1.1.1.1' authentication_mode = 'PSK' dpd_profile_id = 'uuid1' ike_profile_id = 'uuid2' ipsec_profile_id = 'uuid3' initiation_mode = 'INITIATOR' psk = 'secret' mocked_resource.create(name, peer_address, peer_id, description=description, authentication_mode=authentication_mode, dpd_profile_id=dpd_profile_id, ike_profile_id=ike_profile_id, ipsec_tunnel_profile_id=ipsec_profile_id, connection_initiation_mode=initiation_mode, psk=psk) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'peer_address': peer_address, 'peer_id': peer_id, 'description': description, 'authentication_mode': authentication_mode, 'dpd_profile_id': dpd_profile_id, 'ike_profile_id': ike_profile_id, 'ipsec_tunnel_profile_id': ipsec_profile_id, 'connection_initiation_mode': initiation_mode, 'psk': psk }, sort_keys=True), headers=self.default_headers()) def test_peer_endpoint_update(self): fake_pep = test_constants.FAKE_PEP.copy() new_desc = 'updated' new_name = 'new' new_psk = 'psk12' uuid = test_constants.FAKE_PEP_ID mocked_resource = self.get_mocked_resource(response=fake_pep) mocked_resource.update(uuid, name=new_name, description=new_desc, psk=new_psk) fake_pep['description'] = new_desc fake_pep['display_name'] = new_name fake_pep['psk'] = new_psk test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, uuid), data=jsonutils.dumps(fake_pep, sort_keys=True), headers=self.default_headers()) class TestLocalEndpoint(test_resources.BaseTestResource): def setUp(self): super(TestLocalEndpoint, self).setUp( vpn_ipsec.LocalEndpoint) def test_local_endpoint_create(self): mocked_resource = self.get_mocked_resource() name = 'localep' description = 'desc' local_address = local_id = '1.1.1.1' ipsec_vpn_service_id = 'uuid1' mocked_resource.create(name, local_address, ipsec_vpn_service_id, description=description, local_id=local_id) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'local_address': local_address, 'local_id': local_id, 'description': description, 'ipsec_vpn_service_id': {'target_id': ipsec_vpn_service_id} }, sort_keys=True), headers=self.default_headers()) def test_local_endpoint_update(self): fake_pep = test_constants.FAKE_LEP.copy() new_desc = 'updated' new_name = 'new' new_addr = '2.2.2.2' uuid = test_constants.FAKE_LEP_ID mocked_resource = self.get_mocked_resource(response=fake_pep) mocked_resource.update(uuid, name=new_name, description=new_desc, local_address=new_addr, local_id=new_addr) fake_pep['description'] = new_desc fake_pep['display_name'] = new_name fake_pep['local_address'] = new_addr fake_pep['local_id'] = new_addr test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, uuid), data=jsonutils.dumps(fake_pep, sort_keys=True), headers=self.default_headers()) class TestSession(test_resources.BaseTestResource): def setUp(self): super(TestSession, self).setUp( vpn_ipsec.Session) def test_session_create(self): mocked_resource = self.get_mocked_resource() name = 'session' description = 'desc' local_ep_id = 'uuid1' peer_ep_id = 'uuid2' policy_rules = [mocked_resource.get_rule_obj(['1.1.1.0/24'], ['2.2.2.0/24'])] mocked_resource.create(name, local_ep_id, peer_ep_id, policy_rules, description=description) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'description': description, 'local_endpoint_id': local_ep_id, 'peer_endpoint_id': peer_ep_id, 'enabled': True, 'resource_type': mocked_resource.resource_type, 'policy_rules': policy_rules, }, sort_keys=True), headers=self.default_headers()) def test_session_update_with_rules(self): fake_sess = test_constants.FAKE_VPN_SESS.copy() mocked_resource = self.get_mocked_resource(response=fake_sess) uuid = test_constants.FAKE_VPN_SESS_ID new_name = 'session' new_desc = 'desc' cidr1 = '1.1.1.0/24' cidr2 = '2.2.2.0/24' policy_rules = [mocked_resource.get_rule_obj([cidr1], [cidr2])] mocked_resource.update(uuid, name=new_name, description=new_desc, policy_rules=policy_rules, enabled=False) fake_sess['description'] = new_desc fake_sess['display_name'] = new_name fake_sess['policy_rules'] = policy_rules fake_sess['enabled'] = False test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, uuid), data=jsonutils.dumps(fake_sess, sort_keys=True), headers=self.default_headers()) def test_session_update_no_rules(self): fake_sess = test_constants.FAKE_VPN_SESS.copy() mocked_resource = self.get_mocked_resource(response=fake_sess) uuid = test_constants.FAKE_VPN_SESS_ID new_name = 'session' new_desc = 'desc' mocked_resource.update(uuid, name=new_name, description=new_desc, enabled=False) fake_sess['description'] = new_desc fake_sess['display_name'] = new_name fake_sess['enabled'] = False test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, uuid), data=jsonutils.dumps(fake_sess, sort_keys=True), headers=self.default_headers()) def test_session_get_status(self): uuid = test_constants.FAKE_VPN_SESS_ID mocked_resource = self.get_mocked_resource() mocked_resource.get_status(uuid) test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s/status?source=realtime' % ( mocked_resource.uri_segment, uuid), headers=self.default_headers()) class TestService(test_resources.BaseTestResource): def setUp(self): super(TestService, self).setUp( vpn_ipsec.Service) def test_service_create(self): mocked_resource = self.get_mocked_resource() router_id = 'abcd' enabled = True log_level = "DEBUG" name = 'service' mocked_resource.create(name, router_id, ike_log_level=log_level, enabled=enabled) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, data=jsonutils.dumps({ 'display_name': name, 'logical_router_id': router_id, 'ike_log_level': log_level, 'enabled': enabled }, sort_keys=True), headers=self.default_headers()) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_trust_management.py0000664000175000017500000000311613623151571026747 0ustar zuulzuul00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_constants as consts class TestNsxLibTrustManagement(nsxlib_testcase.NsxClientTestCase): def test_create_cert_list(self): fake_cert_list = consts.FAKE_CERT_LIST fake_pem = (fake_cert_list[0]['pem_encoded'] + fake_cert_list[1]['pem_encoded']) fake_private_key = 'fake_key' cert_api = self.nsxlib.trust_management body = { 'pem_encoded': fake_pem, 'private_key': fake_private_key, 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: cert_api.create_cert_list( cert_pem=fake_pem, private_key=fake_private_key, tags=consts.FAKE_TAGS) create.assert_called_with( 'trust-management/certificates?action=import', body) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/mocks.py0000664000175000017500000001723413623151571023455 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import requests import six.moves.urllib.parse as urlparse from oslo_serialization import jsonutils from oslo_utils import uuidutils from vmware_nsxlib.v3 import nsx_constants FAKE_NAME = "fake_name" DEFAULT_TIER0_ROUTER_UUID = "efad0078-9204-4b46-a2d8-d4dd31ed448f" NSX_BRIDGE_CLUSTER_NAME = 'default bridge cluster' FAKE_MANAGER = "fake_manager_ip" def make_fake_switch(switch_uuid=None, tz_uuid=None, name=FAKE_NAME): if not switch_uuid: switch_uuid = uuidutils.generate_uuid() if not tz_uuid: tz_uuid = uuidutils.generate_uuid() fake_switch = { "id": switch_uuid, "display_name": name, "resource_type": "LogicalSwitch", "address_bindings": [], "transport_zone_id": tz_uuid, "replication_mode": nsx_constants.MTEP, "admin_state": nsx_constants.ADMIN_STATE_UP, "vni": 50056, "switching_profile_ids": [ { "value": "64814784-7896-3901-9741-badeff705639", "key": "IpDiscoverySwitchingProfile" }, { "value": "fad98876-d7ff-11e4-b9d6-1681e6b88ec1", "key": "SpoofGuardSwitchingProfile" }, { "value": "93b4b7e8-f116-415d-a50c-3364611b5d09", "key": "PortMirroringSwitchingProfile" }, { "value": "fbc4fb17-83d9-4b53-a286-ccdf04301888", "key": "SwitchSecuritySwitchingProfile" }, { "value": "f313290b-eba8-4262-bd93-fab5026e9495", "key": "QosSwitchingProfile" } ], } return fake_switch def make_fake_dhcp_profile(): return {"id": uuidutils.generate_uuid(), "edge_cluster_id": uuidutils.generate_uuid(), "edge_cluster_member_indexes": [0, 1]} def make_fake_metadata_proxy(): return {"id": uuidutils.generate_uuid(), "metadata_server_url": "http://1.2.3.4", "secret": "my secret", "edge_cluster_id": uuidutils.generate_uuid(), "edge_cluster_member_indexes": [0, 1]} class MockRequestsResponse(object): def __init__(self, status_code, content=None): self.status_code = status_code self.content = content def json(self): return jsonutils.loads(self.content) class MockRequestSessionApi(object): def __init__(self): self._store = {} def _format_uri(self, uri): uri = urlparse.urlparse(uri).path while uri.endswith('/'): uri = uri[:-1] while uri.startswith('/'): uri = uri[1:] if not self._is_uuid_uri(uri): uri = "%s/" % uri return uri def _is_uuid_uri(self, uri): return uuidutils.is_uuid_like( urlparse.urlparse(uri).path.split('/')[-1]) def _query(self, search_key, copy=True): items = [] for uri, obj in self._store.items(): if uri.startswith(search_key): items.append(obj.copy() if copy else obj) return items def _build_response(self, url, content=None, status=requests.codes.ok, **kwargs): if isinstance(content, list): content = { 'result_count': len(content), 'results': content } if (content is not None and kwargs.get('headers', {}).get( 'Content-Type') == 'application/json'): content = jsonutils.dumps(content) return MockRequestsResponse(status, content=content) def _get_content(self, **kwargs): content = kwargs.get('data', None) if content and kwargs.get('headers', {}).get( 'Content-Type') == 'application/json': content = jsonutils.loads(content) return content def get(self, url, **kwargs): url = self._format_uri(url) if self._is_uuid_uri(url): item = self._store.get(url) code = requests.codes.ok if item else requests.codes.not_found return self._build_response( url, content=item, status=code, **kwargs) return self._build_response( url, content=self._query(url), status=requests.codes.ok, **kwargs) def _create(self, url, content, **kwargs): resource_id = content.get('id') if resource_id and self._store.get("%s%s" % (url, resource_id)): return self._build_response( url, content=None, status=requests.codes.bad, **kwargs) resource_id = resource_id or uuidutils.generate_uuid() content['id'] = resource_id self._store["%s%s" % (url, resource_id)] = content.copy() return content def post(self, url, **kwargs): parsed_url = urlparse.urlparse(url) url = self._format_uri(url) if self._is_uuid_uri(url): if self._store.get(url) is None: return self._build_response( url, content=None, status=requests.codes.bad, **kwargs) body = self._get_content(**kwargs) if body is None: return self._build_response( url, content=None, status=requests.codes.bad, **kwargs) response_content = None url_queries = urlparse.parse_qs(parsed_url.query) if 'create_multiple' in url_queries.get('action', []): response_content = {} for resource_name, resource_body in body.items(): for new_resource in resource_body: created_resource = self._create( url, new_resource, **kwargs) if response_content.get(resource_name, None) is None: response_content[resource_name] = [] response_content[resource_name].append(created_resource) else: response_content = self._create(url, body, **kwargs) if isinstance(response_content, MockRequestsResponse): return response_content return self._build_response( url, content=response_content, status=requests.codes.created, **kwargs) def put(self, url, **kwargs): url = self._format_uri(url) item = {} if self._is_uuid_uri(url): item = self._store.get(url, None) if item is None: return self._build_response( url, content=None, status=requests.codes.not_found, **kwargs) body = self._get_content(**kwargs) if body is None: return self._build_response( url, content=None, status=requests.codes.bad, **kwargs) item.update(body) self._store[url] = item return self._build_response( url, content=item, status=requests.codes.ok, **kwargs) def delete(self, url, **kwargs): url = self._format_uri(url) if not self._store.get(url): return self._build_response( url, content=None, status=requests.codes.not_found, **kwargs) del self._store[url] return self._build_response( url, content=None, status=requests.codes.ok, **kwargs) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_load_balancer.py0000664000175000017500000007261013623151571026145 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_constants as consts from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import load_balancer app_profile_types = load_balancer.ApplicationProfileTypes app_profiles = [app_profile_types.HTTP, app_profile_types.FAST_TCP, app_profile_types.FAST_UDP] per_profile_types = load_balancer.PersistenceProfileTypes per_profiles = [per_profile_types.COOKIE, per_profile_types.SOURCE_IP] monitor_types = load_balancer.MonitorTypes monitors = [monitor_types.HTTP, monitor_types.HTTPS, monitor_types.ICMP, monitor_types.PASSIVE, monitor_types.TCP, monitor_types.UDP] class TestApplicationProfile(nsxlib_testcase.NsxClientTestCase): def test_create_application_profiles(self): fake_profile = consts.FAKE_APPLICATION_PROFILE.copy() for profile_type in app_profiles: body = { 'display_name': fake_profile['display_name'], 'description': fake_profile['description'], 'resource_type': profile_type, 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.application_profile.create( display_name=body['display_name'], description=body['description'], resource_type=body['resource_type'], tags=consts.FAKE_TAGS) create.assert_called_with('loadbalancer/application-profiles', body) def test_create_fast_tcp_profiles(self): fake_profile = consts.FAKE_APPLICATION_PROFILE.copy() body = { 'display_name': fake_profile['display_name'], 'description': fake_profile['description'], 'resource_type': app_profile_types.FAST_TCP, 'close_timeout': 8, 'ha_flow_mirroring_enabled': True, 'idle_timeout': 1800, 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.application_profile.create( display_name=body['display_name'], description=body['description'], resource_type=body['resource_type'], close_timeout=body['close_timeout'], ha_flow_mirroring_enabled=body['ha_flow_mirroring_enabled'], idle_timeout=body['idle_timeout'], tags=consts.FAKE_TAGS) create.assert_called_with('loadbalancer/application-profiles', body) def test_create_fast_udp_profiles(self): fake_profile = consts.FAKE_APPLICATION_PROFILE.copy() body = { 'display_name': fake_profile['display_name'], 'description': fake_profile['description'], 'resource_type': app_profile_types.FAST_UDP, 'flow_mirroring_enabled': True, 'idle_timeout': 1800, 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.application_profile.create( display_name=body['display_name'], description=body['description'], resource_type=body['resource_type'], flow_mirroring_enabled=body['flow_mirroring_enabled'], idle_timeout=body['idle_timeout'], tags=consts.FAKE_TAGS) create.assert_called_with('loadbalancer/application-profiles', body) def test_create_http_profiles(self): fake_profile = consts.FAKE_APPLICATION_PROFILE.copy() body = { 'display_name': fake_profile['display_name'], 'description': fake_profile['description'], 'resource_type': app_profile_types.HTTP, 'http_redirect_to': fake_profile['http_redirect_to'], 'http_redirect_to_https': fake_profile['http_redirect_to_https'], 'ntlm': fake_profile['ntlm'], 'request_body_size': fake_profile['request_body_size'], 'request_header_size': fake_profile['request_header_size'], 'response_header_size': fake_profile['response_header_size'], 'response_timeout': fake_profile['response_timeout'], 'x_forwarded_for': fake_profile['x_forwarded_for'], 'idle_timeout': fake_profile['idle_timeout'], 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.application_profile.create( display_name=body['display_name'], description=body['description'], resource_type=body['resource_type'], http_redirect_to=body['http_redirect_to'], http_redirect_to_https=body['http_redirect_to_https'], ntlm=body['ntlm'], request_body_size=body['request_body_size'], request_header_size=body['request_header_size'], response_header_size=body['response_header_size'], response_timeout=body['response_timeout'], x_forwarded_for=body['x_forwarded_for'], idle_timeout=body['idle_timeout'], tags=consts.FAKE_TAGS) create.assert_called_with('loadbalancer/application-profiles', body) def test_list_application_profiles(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.application_profile.list() list_call.assert_called_with( resource='loadbalancer/application-profiles') def test_get_application_profile(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_profile = consts.FAKE_APPLICATION_PROFILE.copy() self.nsxlib.load_balancer.application_profile.get( fake_profile['id']) get.assert_called_with( 'loadbalancer/application-profiles/%s' % fake_profile['id']) def test_delete_application_profile(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_profile = consts.FAKE_APPLICATION_PROFILE.copy() self.nsxlib.load_balancer.application_profile.delete( fake_profile['id']) delete.assert_called_with( 'loadbalancer/application-profiles/%s' % fake_profile['id']) class TestPersistenceProfile(nsxlib_testcase.NsxClientTestCase): def test_create_persistence_profiles(self): fake_profile = consts.FAKE_PERSISTENCE_PROFILE.copy() for profile_type in per_profiles: body = { 'display_name': fake_profile['display_name'], 'description': fake_profile['description'], 'resource_type': profile_type, 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.persistence_profile.create( body['display_name'], body['description'], consts.FAKE_TAGS, body['resource_type']) create.assert_called_with('loadbalancer/persistence-profiles', body) def test_list_persistence_profiles(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.persistence_profile.list() list_call.assert_called_with( resource='loadbalancer/persistence-profiles') def test_get_persistence_profile(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_profile = consts.FAKE_APPLICATION_PROFILE.copy() self.nsxlib.load_balancer.persistence_profile.get( fake_profile['id']) get.assert_called_with( 'loadbalancer/persistence-profiles/%s' % fake_profile['id']) def test_delete_persistence_profile(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_profile = consts.FAKE_PERSISTENCE_PROFILE.copy() self.nsxlib.load_balancer.persistence_profile.delete( fake_profile['id']) delete.assert_called_with( 'loadbalancer/persistence-profiles/%s' % fake_profile['id']) class TestRule(nsxlib_testcase.NsxClientTestCase): def test_create_rule(self): fake_rule = consts.FAKE_RULE.copy() body = { 'display_name': fake_rule['display_name'], 'description': fake_rule['description'], 'resource_type': fake_rule['resource_type'], 'phase': fake_rule['phase'], 'match_strategy': fake_rule['match_strategy'], 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.rule.create(**body) create.assert_called_with('loadbalancer/rules', body) def test_list_rules(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.rule.list() list_call.assert_called_with(resource='loadbalancer/rules') def test_get_rule(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_rule = consts.FAKE_RULE.copy() self.nsxlib.load_balancer.rule.get(fake_rule['id']) get.assert_called_with('loadbalancer/rules/%s' % fake_rule['id']) def test_delete_rule(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_rule = consts.FAKE_RULE.copy() self.nsxlib.load_balancer.rule.delete(fake_rule['id']) delete.assert_called_with( 'loadbalancer/rules/%s' % fake_rule['id']) class TestClientSslProfile(nsxlib_testcase.NsxClientTestCase): def test_create_client_ssl_profiles(self): fake_profile = consts.FAKE_CLIENT_SSL_PROFILE.copy() body = { 'display_name': fake_profile['display_name'], 'description': fake_profile['description'], 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.client_ssl_profile.create( body['display_name'], body['description'], consts.FAKE_TAGS) create.assert_called_with('loadbalancer/client-ssl-profiles', body) def test_list_client_ssl_profiles(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.client_ssl_profile.list() list_call.assert_called_with( resource='loadbalancer/client-ssl-profiles') def test_get_client_ssl_profile(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_profile = consts.FAKE_CLIENT_SSL_PROFILE.copy() self.nsxlib.load_balancer.client_ssl_profile.get( fake_profile['id']) get.assert_called_with( 'loadbalancer/client-ssl-profiles/%s' % fake_profile['id']) def test_delete_client_ssl_profile(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_profile = consts.FAKE_CLIENT_SSL_PROFILE.copy() self.nsxlib.load_balancer.client_ssl_profile.delete( fake_profile['id']) delete.assert_called_with( 'loadbalancer/client-ssl-profiles/%s' % fake_profile['id']) class TestServerSslProfile(nsxlib_testcase.NsxClientTestCase): def test_create_server_client_ssl_profiles(self): fake_profile = consts.FAKE_SERVER_SSL_PROFILE.copy() body = { 'display_name': fake_profile['display_name'], 'description': fake_profile['description'], 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.server_ssl_profile.create( body['display_name'], body['description'], consts.FAKE_TAGS) create.assert_called_with('loadbalancer/server-ssl-profiles', body) def test_list_server_ssl_profiles(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.server_ssl_profile.list() list_call.assert_called_with( resource='loadbalancer/server-ssl-profiles') def test_get_server_ssl_profile(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_profile = consts.FAKE_SERVER_SSL_PROFILE.copy() self.nsxlib.load_balancer.server_ssl_profile.get( fake_profile['id']) get.assert_called_with( 'loadbalancer/server-ssl-profiles/%s' % fake_profile['id']) def test_delete_server_ssl_profile(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_profile = consts.FAKE_SERVER_SSL_PROFILE.copy() self.nsxlib.load_balancer.server_ssl_profile.delete( fake_profile['id']) delete.assert_called_with( 'loadbalancer/server-ssl-profiles/%s' % fake_profile['id']) class TestMonitor(nsxlib_testcase.NsxClientTestCase): def test_create_monitors(self): fake_monitor = consts.FAKE_MONITOR.copy() for monitor_type in monitors: body = { 'display_name': fake_monitor['display_name'], 'description': fake_monitor['description'], 'resource_type': monitor_type, 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.monitor.create( body['display_name'], body['description'], consts.FAKE_TAGS, body['resource_type']) create.assert_called_with('loadbalancer/monitors', body) def test_list_monitors(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.monitor.list() list_call.assert_called_with(resource='loadbalancer/monitors') def test_get_monitor(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_monitor = consts.FAKE_MONITOR.copy() self.nsxlib.load_balancer.monitor.get(fake_monitor['id']) get.assert_called_with( 'loadbalancer/monitors/%s' % fake_monitor['id']) def test_delete_monitor(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_monitor = consts.FAKE_MONITOR.copy() self.nsxlib.load_balancer.monitor.delete(fake_monitor['id']) delete.assert_called_with( 'loadbalancer/monitors/%s' % fake_monitor['id']) class TestPool(nsxlib_testcase.NsxClientTestCase): def test_create_pool(self): fake_pool = consts.FAKE_POOL.copy() body = { 'display_name': fake_pool['display_name'], 'description': fake_pool['description'], 'algorithm': fake_pool['algorithm'], 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.pool.create( body['display_name'], body['description'], consts.FAKE_TAGS, algorithm=body['algorithm']) create.assert_called_with('loadbalancer/pools', body) def test_list_pools(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.pool.list() list_call.assert_called_with(resource='loadbalancer/pools') def test_get_pool(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_profile = consts.FAKE_POOL.copy() self.nsxlib.load_balancer.pool.get(fake_profile['id']) get.assert_called_with( 'loadbalancer/pools/%s' % fake_profile['id']) def test_delete_pool(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_profile = consts.FAKE_POOL.copy() self.nsxlib.load_balancer.pool.delete(fake_profile['id']) delete.assert_called_with( 'loadbalancer/pools/%s' % fake_profile['id']) def test_remove_monitor_from_pool(self): fake_pool = consts.FAKE_POOL.copy() fake_pool['active_monitor_ids'] = [consts.FAKE_MONITOR_UUID] body = {'display_name': fake_pool['display_name'], 'description': fake_pool['description'], 'id': fake_pool['id'], 'algorithm': fake_pool['algorithm'], 'active_monitor_ids': []} with mock.patch.object(self.nsxlib.client, 'get', return_value=fake_pool): with mock.patch.object(self.nsxlib.client, 'update') as update: self.nsxlib.load_balancer.pool.remove_monitor_from_pool( fake_pool['id'], consts.FAKE_MONITOR_UUID) resource = 'loadbalancer/pools/%s' % fake_pool['id'] update.assert_called_with(resource, body) def test_remove_non_exist_monitor_from_pool(self): fake_pool = consts.FAKE_POOL.copy() fake_pool['active_monitor_ids'] = [consts.FAKE_MONITOR_UUID] with mock.patch.object(self.nsxlib.client, 'get', return_value=fake_pool): self.assertRaises( nsxlib_exc.ResourceNotFound, self.nsxlib.load_balancer.pool.remove_monitor_from_pool, fake_pool['id'], 'xxx-yyy') def test_add_monitor_to_pool(self): fake_pool = consts.FAKE_POOL.copy() body = {'display_name': fake_pool['display_name'], 'description': fake_pool['description'], 'id': fake_pool['id'], 'algorithm': fake_pool['algorithm'], 'active_monitor_ids': [consts.FAKE_MONITOR_UUID]} with mock.patch.object(self.nsxlib.client, 'get', return_value=fake_pool): with mock.patch.object(self.nsxlib.client, 'update') as update: self.nsxlib.load_balancer.pool.add_monitor_to_pool( fake_pool['id'], consts.FAKE_MONITOR_UUID) resource = 'loadbalancer/pools/%s' % fake_pool['id'] update.assert_called_with(resource, body) class TestVirtualServer(nsxlib_testcase.NsxClientTestCase): def test_create_virtual_server(self): fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() body = { 'display_name': fake_virtual_server['display_name'], 'description': fake_virtual_server['description'], 'ip_protocol': fake_virtual_server['ip_protocol'], 'port': fake_virtual_server['port'], 'enabled': fake_virtual_server['enabled'], 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.load_balancer.virtual_server.create( body['display_name'], body['description'], consts.FAKE_TAGS, ip_protocol=body['ip_protocol'], port=body['port'], enabled=body['enabled']) create.assert_called_with('loadbalancer/virtual-servers', body) def test_list_virtual_servers(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.virtual_server.list() list_call.assert_called_with( resource='loadbalancer/virtual-servers') def test_get_virtual_server(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() self.nsxlib.load_balancer.virtual_server.get( fake_virtual_server['id']) get.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id']) def test_delete_virtual_server(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() self.nsxlib.load_balancer.virtual_server.delete( fake_virtual_server['id']) delete.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id']) def test_add_rule(self): fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() body = { 'display_name': fake_virtual_server['display_name'], 'description': fake_virtual_server['description'], 'id': fake_virtual_server['id'], 'enabled': fake_virtual_server['enabled'], 'port': fake_virtual_server['port'], 'ip_protocol': fake_virtual_server['ip_protocol'], 'rule_ids': [consts.FAKE_RULE_UUID] } with mock.patch.object(self.nsxlib.client, 'get') as mock_get, \ mock.patch.object(self.nsxlib.client, 'update') as mock_update: mock_get.return_value = fake_virtual_server self.nsxlib.load_balancer.virtual_server.add_rule( fake_virtual_server['id'], consts.FAKE_RULE_UUID) mock_update.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id'], body) def test_remove_rule(self): fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() fake_virtual_server['rule_ids'] = [consts.FAKE_RULE_UUID] body = { 'display_name': fake_virtual_server['display_name'], 'description': fake_virtual_server['description'], 'id': fake_virtual_server['id'], 'enabled': fake_virtual_server['enabled'], 'port': fake_virtual_server['port'], 'ip_protocol': fake_virtual_server['ip_protocol'], 'rule_ids': [] } with mock.patch.object(self.nsxlib.client, 'get') as mock_get, \ mock.patch.object(self.nsxlib.client, 'update') as mock_update: mock_get.return_value = fake_virtual_server self.nsxlib.load_balancer.virtual_server.remove_rule( fake_virtual_server['id'], consts.FAKE_RULE_UUID) mock_update.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id'], body) def test_add_client_ssl_profile_binding(self): fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() body = { 'display_name': fake_virtual_server['display_name'], 'description': fake_virtual_server['description'], 'id': fake_virtual_server['id'], 'enabled': fake_virtual_server['enabled'], 'port': fake_virtual_server['port'], 'ip_protocol': fake_virtual_server['ip_protocol'], 'client_ssl_profile_binding': { 'ssl_profile_id': consts.FAKE_CLIENT_SSL_PROFILE_UUID, 'default_certificate_id': consts.FAKE_DEFAULT_CERTIFICATE_ID, 'client_auth': 'IGNORE', 'certificate_chain_depth': 3 } } with mock.patch.object(self.nsxlib.client, 'get') as mock_get, \ mock.patch.object(self.nsxlib.client, 'update') as mock_update: mock_get.return_value = fake_virtual_server vs_client = self.nsxlib.load_balancer.virtual_server vs_client.add_client_ssl_profile_binding( fake_virtual_server['id'], consts.FAKE_CLIENT_SSL_PROFILE_UUID, consts.FAKE_DEFAULT_CERTIFICATE_ID, client_auth='IGNORE', certificate_chain_depth=3, xyz='xyz' ) mock_update.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id'], body) def test_add_server_ssl_profile_binding(self): fake_virtual_server = consts.FAKE_VIRTUAL_SERVER.copy() body = { 'display_name': fake_virtual_server['display_name'], 'description': fake_virtual_server['description'], 'id': fake_virtual_server['id'], 'enabled': fake_virtual_server['enabled'], 'port': fake_virtual_server['port'], 'ip_protocol': fake_virtual_server['ip_protocol'], 'server_ssl_profile_binding': { 'ssl_profile_id': consts.FAKE_SERVER_SSL_PROFILE_UUID, 'server_auth': 'IGNORE', 'certificate_chain_depth': 3 } } with mock.patch.object(self.nsxlib.client, 'get') as mock_get, \ mock.patch.object(self.nsxlib.client, 'update') as mock_update: mock_get.return_value = fake_virtual_server vs_client = self.nsxlib.load_balancer.virtual_server vs_client.add_server_ssl_profile_binding( fake_virtual_server['id'], consts.FAKE_SERVER_SSL_PROFILE_UUID, server_auth='IGNORE', certificate_chain_depth=3, xyz='xyz') mock_update.assert_called_with( 'loadbalancer/virtual-servers/%s' % fake_virtual_server['id'], body) class TestService(nsxlib_testcase.NsxClientTestCase): def test_create_service(self): fake_service = consts.FAKE_SERVICE.copy() body = { 'display_name': fake_service['display_name'], 'description': fake_service['description'], 'enabled': fake_service['enabled'], 'attachment': fake_service['attachment'], 'relax_scale_validation': fake_service['relax_scale_validation'], 'tags': consts.FAKE_TAGS } with mock.patch.object(self.nsxlib.client, 'create') as create, \ mock.patch.object(self.nsxlib, 'feature_supported') as support: support.return_value = True self.nsxlib.load_balancer.service.create( body['display_name'], body['description'], consts.FAKE_TAGS, enabled=body['enabled'], attachment=body['attachment'], relax_scale_validation=body['relax_scale_validation']) create.assert_called_with('loadbalancer/services', body) def test_list_services(self): with mock.patch.object(self.nsxlib.client, 'list') as list_call: self.nsxlib.load_balancer.service.list() list_call.assert_called_with(resource='loadbalancer/services') def test_get_service(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_service = consts.FAKE_SERVICE.copy() self.nsxlib.load_balancer.service.get(fake_service['id']) get.assert_called_with( 'loadbalancer/services/%s' % fake_service['id']) def test_get_stats(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_service = consts.FAKE_SERVICE.copy() self.nsxlib.load_balancer.service.get_stats(fake_service['id']) get.assert_called_with( 'loadbalancer/services/%s/statistics?source=realtime' % fake_service['id']) def test_get_status(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_service = consts.FAKE_SERVICE.copy() self.nsxlib.load_balancer.service.get_status(fake_service['id']) get.assert_called_with( 'loadbalancer/services/%s/status' % fake_service['id']) def test_get_virtual_servers_status(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_service = consts.FAKE_SERVICE.copy() self.nsxlib.load_balancer.service.get_virtual_servers_status( fake_service['id']) get.assert_called_with( 'loadbalancer/services/%s/virtual-servers/status' % fake_service['id']) def test_delete_service(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_service = consts.FAKE_SERVICE.copy() self.nsxlib.load_balancer.service.delete(fake_service['id']) delete.assert_called_with( 'loadbalancer/services/%s' % fake_service['id']) def test_get_usage(self): with mock.patch.object(self.nsxlib.client, 'get') as get: fake_service = consts.FAKE_SERVICE.copy() self.nsxlib.load_balancer.service.get_usage(fake_service['id']) get.assert_called_with( 'loadbalancer/services/%s/usage' % fake_service['id']) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_client.py0000664000175000017500000003312213623151571024650 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy from oslo_log import log from oslo_serialization import jsonutils import requests from vmware_nsxlib.tests.unit.v3 import mocks from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import client from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = log.getLogger(__name__) DFT_ACCEPT_HEADERS = { 'Accept': '*/*', 'Cookie': 'JSESSIONID=%s;' % nsxlib_testcase.JSESSIONID } JSON_DFT_ACCEPT_HEADERS = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'JSESSIONID=%s;' % nsxlib_testcase.JSESSIONID } PARTIAL_UPDATE_HEADERS = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'Cookie': 'JSESSIONID=%s;' % nsxlib_testcase.JSESSIONID, 'nsx-enable-partial-patch': 'true' } def _headers(**kwargs): headers = copy.copy(DFT_ACCEPT_HEADERS) headers.update(kwargs) return headers def assert_call(verb, client_or_resource, url, verify=nsxlib_testcase.NSX_CERT, data=None, headers=DFT_ACCEPT_HEADERS, timeout=(nsxlib_testcase.NSX_HTTP_TIMEOUT, nsxlib_testcase.NSX_HTTP_READ_TIMEOUT), single_call=True): nsx_client = client_or_resource if getattr(nsx_client, 'client', None) is not None: nsx_client = nsx_client.client cluster = nsx_client._conn if single_call: cluster.assert_called_once( verb, **{'url': url, 'verify': verify, 'body': data, 'headers': headers, 'cert': None, 'timeout': timeout}) else: cluster.assert_any_call( verb, **{'url': url, 'verify': verify, 'body': data, 'headers': headers, 'cert': None, 'timeout': timeout}) def mock_calls_count(verb, client_or_resource): nsx_client = client_or_resource if getattr(nsx_client, 'client', None) is not None: nsx_client = nsx_client.client cluster = nsx_client._conn return cluster.call_count(verb) def assert_json_call(verb, client_or_resource, url, verify=nsxlib_testcase.NSX_CERT, data=None, headers=JSON_DFT_ACCEPT_HEADERS, single_call=True): return assert_call(verb, client_or_resource, url, verify=verify, data=data, headers=headers, single_call=single_call) class NsxV3RESTClientTestCase(nsxlib_testcase.NsxClientTestCase): def test_client_url_prefix(self): api = self.new_mocked_client(client.RESTClient, url_prefix='/cloud/api') api.list() assert_call( 'get', api, 'https://1.2.3.4/cloud/api') api = self.new_mocked_client(client.RESTClient, url_prefix='/cloud/api') api.url_list('v1/ports') assert_call( 'get', api, 'https://1.2.3.4/cloud/api/v1/ports') def test_client_headers(self): default_headers = {'Content-Type': 'application/golang'} api = self.new_mocked_client( client.RESTClient, default_headers=default_headers, url_prefix='/v1/api') api.list() assert_call( 'get', api, 'https://1.2.3.4/v1/api', headers=_headers(**default_headers)) api = self.new_mocked_client( client.RESTClient, default_headers=default_headers, url_prefix='/v1/api') method_headers = {'X-API-Key': 'strong-crypt'} api.url_list('ports/33', headers=method_headers) method_headers.update(default_headers) assert_call( 'get', api, 'https://1.2.3.4/v1/api/ports/33', headers=_headers(**method_headers)) def test_client_for(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/') sub_api = api.new_client_for('switch/ports') sub_api.get('11a2b') assert_call( 'get', sub_api, 'https://1.2.3.4/api/v1/switch/ports/11a2b') def test_client_list(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.list() assert_call( 'get', api, 'https://1.2.3.4/api/v1/ports') def test_client_get(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.get('unique-id') assert_call( 'get', api, 'https://1.2.3.4/api/v1/ports/unique-id') def test_client_delete(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.delete('unique-id') assert_call( 'delete', api, 'https://1.2.3.4/api/v1/ports/unique-id') def test_client_update(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.update('unique-id', jsonutils.dumps({'name': 'a-new-name'})) assert_call( 'put', api, 'https://1.2.3.4/api/v1/ports/unique-id', data=jsonutils.dumps({'name': 'a-new-name'})) def test_client_create(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.create(body=jsonutils.dumps({'resource-name': 'port1'})) assert_call( 'post', api, 'https://1.2.3.4/api/v1/ports', data=jsonutils.dumps({'resource-name': 'port1'})) def test_client_url_list(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') json_headers = {'Content-Type': 'application/json'} api.url_list('/connections', json_headers) assert_call( 'get', api, 'https://1.2.3.4/api/v1/ports/connections', headers=_headers(**json_headers)) def test_client_url_get(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.url_get('connections/1') assert_call( 'get', api, 'https://1.2.3.4/api/v1/ports/connections/1') def test_client_url_delete(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.url_delete('1') assert_call( 'delete', api, 'https://1.2.3.4/api/v1/ports/1') def test_client_url_put(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.url_put('connections/1', jsonutils.dumps({'name': 'conn1'})) assert_call( 'put', api, 'https://1.2.3.4/api/v1/ports/connections/1', data=jsonutils.dumps({'name': 'conn1'})) def test_client_url_post(self): api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/ports') api.url_post('1/connections', jsonutils.dumps({'name': 'conn1'})) assert_call( 'post', api, 'https://1.2.3.4/api/v1/ports/1/connections', data=jsonutils.dumps({'name': 'conn1'})) def test_client_validate_result(self): def _verb_response_code(http_verb, status_code, error_code=None): content = None if error_code: content = jsonutils.dumps({'httpStatus': 'dummy', 'error_code': error_code, 'module_name': 'dummy', 'error_message': 'bad', 'related_errors': [{ 'error_message': 'bla', 'error_code': 'code'}]}) response = mocks.MockRequestsResponse( status_code, content) client_api = self.new_mocked_client( client.RESTClient, mock_validate=False, session_response=response) client_call = getattr(client_api, "url_%s" % http_verb) client_call('', None) for verb in ['get', 'post', 'put', 'delete']: for code in client.RESTClient._VERB_RESP_CODES.get(verb): _verb_response_code(verb, code) with self.assertRaises(nsxlib_exc.ManagerError) as e: _verb_response_code(verb, requests.codes.INTERNAL_SERVER_ERROR) self.assertEqual(e.exception.status_code, requests.codes.INTERNAL_SERVER_ERROR) with self.assertRaises(nsxlib_exc.ResourceNotFound) as e: _verb_response_code(verb, requests.codes.NOT_FOUND) self.assertEqual(e.exception.status_code, requests.codes.NOT_FOUND) with self.assertRaises(nsxlib_exc.BackendResourceNotFound) as e: _verb_response_code(verb, requests.codes.NOT_FOUND, 202) self.assertEqual(e.exception.status_code, requests.codes.NOT_FOUND) def test_inject_headers_callback(self): self.injected = None def inject_header(): self.injected = True return {} utils.set_inject_headers_callback(inject_header) api = self.new_mocked_client( client.RESTClient, url_prefix='/v1/api') api.list() injected_headers = {} assert_call( 'get', api, 'https://1.2.3.4/v1/api', headers=_headers(**injected_headers)) api = self.new_mocked_client( client.RESTClient, url_prefix='/v1/api') utils.set_inject_headers_callback(None) self.assertIsNotNone(self.injected) def test_http_error_to_exception(self): exc = client.http_error_to_exception(500, 607) self.assertEqual(exc, nsxlib_exc.APITransactionAborted) class NsxV3JSONClientTestCase(nsxlib_testcase.NsxClientTestCase): def test_json_request(self): resp = mocks.MockRequestsResponse( 200, jsonutils.dumps({'result': {'ok': 200}})) api = self.new_mocked_client(client.JSONRESTClient, session_response=resp, url_prefix='api/v2/nat') resp = api.create(body={'name': 'mgmt-egress'}) assert_json_call( 'post', api, 'https://1.2.3.4/api/v2/nat', data=jsonutils.dumps({'name': 'mgmt-egress'})) self.assertEqual(resp, {'result': {'ok': 200}}) def test_mask_password(self): pwds = ('my!pwd0#', 'some0therlong$pwd') body = {'name_pwd': 'name1', 'password': pwds[0], 'some_list': {'name_password': 'name2', 'password': pwds[1]}} cl = client.RESTClient(None) json_body = jsonutils.dumps(body) masked_body = cl._mask_password(json_body) for pwd in pwds: json_body = json_body.replace('"' + pwd + '"', '"********"') self.assertEqual(json_body, masked_body) class NsxV3APIClientTestCase(nsxlib_testcase.NsxClientTestCase): def test_api_call(self): api = self.new_mocked_client(client.NSX3Client) api.get('ports') assert_json_call( 'get', api, 'https://1.2.3.4/api/v1/ports') def test_raise_error(self): api = self.new_mocked_client(client.NSX3Client) with self.assertRaises(nsxlib_exc.ManagerError) as e: api._raise_error(requests.codes.INTERNAL_SERVER_ERROR, 'GET', '') self.assertEqual(e.exception.status_code, requests.codes.INTERNAL_SERVER_ERROR) # NOTE(boden): remove this when tmp brigding removed class NsxV3APIClientBridgeTestCase(nsxlib_testcase.NsxClientTestCase): def test_get_resource(self): api = self.new_mocked_client(client.NSX3Client) api.get('ports') assert_json_call( 'get', api, 'https://1.2.3.4/api/v1/ports') def test_create_resource(self): api = self.new_mocked_client(client.NSX3Client) api.create('ports', {'resource-name': 'port1'}) assert_json_call( 'post', api, 'https://1.2.3.4/api/v1/ports', data=jsonutils.dumps({'resource-name': 'port1'})) def test_update_resource(self): api = self.new_mocked_client(client.NSX3Client) api.update('ports/1', {'name': 'a-new-name'}) assert_json_call( 'put', api, 'https://1.2.3.4/api/v1/ports/1', data=jsonutils.dumps({'name': 'a-new-name'})) def test_delete_resource(self): api = self.new_mocked_client(client.NSX3Client) api.delete('ports/11') assert_json_call( 'delete', api, 'https://1.2.3.4/api/v1/ports/11') vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_cert.py0000664000175000017500000003171613623151571024336 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os import mock from OpenSSL import crypto from oslo_serialization import jsonutils from vmware_nsxlib.tests.unit.v3 import mocks from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_client from vmware_nsxlib.tests.unit.v3 import test_constants as const from vmware_nsxlib.v3 import client from vmware_nsxlib.v3 import client_cert from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import trust_management as tm class DummyStorageDriver(dict): """Storage driver simulation - just a dictionary""" def store_cert(self, project_id, certificate, private_key): self[project_id] = {} self[project_id]['cert'] = certificate self[project_id]['key'] = private_key def get_cert(self, project_id): if project_id not in self: return (None, None) return (self[project_id]['cert'], self[project_id]['key']) def delete_cert(self, project_id): del(self[project_id]) def is_empty(self, project_id): return project_id not in self class NsxV3ClientCertificateTestCase(nsxlib_testcase.NsxClientTestCase): identity = 'drumknott' cert_id = "00000000-1111-2222-3333-444444444444" identity_id = "55555555-6666-7777-8888-999999999999" node_id = "meh" def _get_mocked_response(self, status_code, results): return mocks.MockRequestsResponse( status_code, jsonutils.dumps({'results': results})) def _get_mocked_error_response(self, status_code, error_code): return mocks.MockRequestsResponse( status_code, jsonutils.dumps({'httpStatus': 'go away', 'error_code': error_code, 'module_name': 'never mind', 'error_message': 'bad luck'})) def _get_mocked_trust(self, action, cert_pem): fake_responses = [] if 'create' in action: # import cert and return its id results = [{'id': self.cert_id}] fake_responses.append(self._get_mocked_response(201, results)) # and then bind this id to principal identity fake_responses.append(self._get_mocked_response(201, [])) if 'delete' in action: # get certs list, including same cert imported twice edge case results = [{'resource_type': 'Certificate', 'id': 'dont care', 'pem_encoded': 'some junk'}, {'resource_type': 'Certificate', 'id': 'some_other_cert_id', 'pem_encoded': cert_pem}, {'resource_type': 'Certificate', 'id': self.cert_id, 'pem_encoded': cert_pem}] fake_responses.append(self._get_mocked_response(200, results)) # get principal identities list results = [{'resource_type': 'Principal Identity', 'id': 'dont care', 'name': 'willikins', 'certificate_id': 'some other id'}, {'resource_type': 'Principal Identity', 'id': self.identity_id, 'name': self.identity, 'certificate_id': self.cert_id}] fake_responses.append(self._get_mocked_response(200, results)) # delete certificate fake_responses.append(self._get_mocked_response(204, [])) # delete identity fake_responses.append(self._get_mocked_response(204, [])) mock_client = self.new_mocked_client( client.JSONRESTClient, url_prefix='api/v1', session_response=fake_responses) return tm.NsxLibTrustManagement(mock_client, {}) def _verify_backend_create(self, mocked_trust, cert_pem): """Verify API calls to create cert and identity on backend""" # verify API call to import cert on backend base_uri = 'https://1.2.3.4/api/v1/trust-management' uri = base_uri + '/certificates?action=import' expected_body = {'pem_encoded': cert_pem} test_client.assert_json_call('post', mocked_trust.client, uri, single_call=False, data=jsonutils.dumps(expected_body)) # verify API call to bind cert to identity on backend uri = base_uri + '/principal-identities' expected_body = {'name': self.identity, 'node_id': self.node_id, 'permission_group': 'read_write_api_users', 'certificate_id': self.cert_id, 'is_protected': True} test_client.assert_json_call('post', mocked_trust.client, uri, single_call=False, data=jsonutils.dumps(expected_body, sort_keys=True)) def _verify_backend_delete(self, mocked_trust): """Verify API calls to fetch and delete cert and identity""" # verify API call to query identities in order to get cert id base_uri = 'https://1.2.3.4/api/v1/trust-management' uri = base_uri + '/principal-identities' test_client.assert_json_call('get', mocked_trust.client, uri, single_call=False) # verify API call to delete openstack principal identity uri = uri + '/' + self.identity_id test_client.assert_json_call('delete', mocked_trust.client, uri, single_call=False) # verify API call to delete certificate uri = base_uri + '/certificates/' + self.cert_id test_client.assert_json_call('delete', mocked_trust.client, uri, single_call=False) def test_generate_cert(self): """Test startup without certificate + certificate generation""" storage_driver = DummyStorageDriver() # Prepare fake trust management for "cert create" requests cert_pem, key_pem = storage_driver.get_cert(self.identity) mocked_trust = self._get_mocked_trust('create', cert_pem) cert = client_cert.ClientCertificateManager(self.identity, mocked_trust, storage_driver) self.assertFalse(cert.exists()) cert.generate(subject={}, key_size=2048, valid_for_days=333, node_id=self.node_id) # verify client cert was generated and makes sense self.assertTrue(cert.exists()) self.assertEqual(332, cert.expires_in_days()) cert_pem, key_pem = cert.get_pem() # verify cert ans PK were stored in storage stored_cert, stored_key = storage_driver.get_cert(self.identity) self.assertEqual(cert_pem, stored_cert) self.assertEqual(key_pem, stored_key) # verify backend API calls self._verify_backend_create(mocked_trust, cert_pem) # try to generate cert again and fail self.assertRaises(nsxlib_exc.ObjectAlreadyExists, cert.generate, {}) def _prepare_storage_with_existing_cert(self, key_size, days, alg, subj): # prepare storage driver with existing cert and key # this test simulates system startup cert, key = client_cert.generate_self_signed_cert_pair(key_size, days, alg, subj) storage_driver = DummyStorageDriver() cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) key_pem = crypto.dump_privatekey(crypto.FILETYPE_PEM, key) storage_driver.store_cert(self.identity, cert_pem, key_pem) return storage_driver def test_load_and_delete_existing_cert(self): """Test startup with existing certificate + certificate deletion""" storage_driver = self._prepare_storage_with_existing_cert(4096, 3650, 'sha256', {}) # get mocked backend driver for trust management, # prepared for get request, that preceeds delete operation cert_pem, key_pem = storage_driver.get_cert(self.identity) mocked_trust = self._get_mocked_trust('delete', cert_pem) cert = client_cert.ClientCertificateManager(self.identity, mocked_trust, storage_driver) self.assertTrue(cert.exists()) cert.delete() self.assertFalse(cert.exists()) self.assertTrue(storage_driver.is_empty(self.identity)) self._verify_backend_delete(mocked_trust) def _test_import_and_delete_cert(self, with_pkey=True): filename = '/tmp/test.pem' # this driver simulates storage==none scenario noop_driver = DummyStorageDriver() cert, key = client_cert.generate_self_signed_cert_pair(4096, 20, 'sha256', {}) cert_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, cert) key_pem = crypto.dump_privatekey(crypto.FILETYPE_PEM, key) with open(filename, 'wb') as f: f.write(cert_pem) if with_pkey: f.write(key_pem) mocked_trust = self._get_mocked_trust('create_delete', cert_pem) cert = client_cert.ClientCertificateManager(self.identity, mocked_trust, noop_driver) cert.import_pem(filename, self.node_id) self._verify_backend_create(mocked_trust, cert_pem) cert.delete_pem(filename) self._verify_backend_delete(mocked_trust) os.remove(filename) def test_import_and_delete_cert_pkey(self): self._test_import_and_delete_cert(True) def test_import_and_delete_cert_only(self): self._test_import_and_delete_cert(False) def test_get_certificate_details(self): """Test retrieving cert details for existing cert""" key_size = 2048 days = 999 alg = 'sha256' subj = {'country': 'CA', 'organization': 'squirrel rights', 'hostname': 'www.squirrels.ca', 'unit': 'nuts', 'state': 'BC'} storage_driver = self._prepare_storage_with_existing_cert(key_size, days, alg, subj) with client_cert.ClientCertificateManager(self.identity, None, storage_driver) as cert: self.assertTrue(cert.exists()) self.assertEqual(days - 1, cert.expires_in_days()) self.assertEqual(key_size, cert.get_key_size()) cert_subj = cert.get_subject() self.assertEqual(subj, cert_subj) def test_bad_certificate_values(self): bad_cert_values = [{'key_size': 1024, 'valid_for_days': 10, 'signature_alg': 'sha256', 'subject': {}}, {'key_size': 4096, 'valid_for_days': 100, 'signature_alg': 'sha224', 'subject': {}}] for args in bad_cert_values: self.assertRaises(nsxlib_exc.NsxLibInvalidInput, client_cert.generate_self_signed_cert_pair, **args) def test_find_cert_with_pem(self): with mock.patch.object(self.nsxlib.trust_management, 'get_certs' ) as mock_get_certs: mock_get_certs.return_value = const.FAKE_CERT_LIST cert_ids = self.nsxlib.trust_management.find_cert_with_pem( const.FAKE_CERT_PEM) self.assertEqual(const.FAKE_CERT_LIST[1]['id'], cert_ids[0]) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/nsxlib_testcase.py0000664000175000017500000003611513623151571025532 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import unittest import mock from oslo_serialization import jsonutils from oslo_utils import uuidutils from requests import exceptions as requests_exceptions from requests import models from vmware_nsxlib import v3 from vmware_nsxlib.v3 import client as nsx_client from vmware_nsxlib.v3 import client_cert from vmware_nsxlib.v3 import cluster as nsx_cluster from vmware_nsxlib.v3 import config from vmware_nsxlib.v3 import utils NSX_USER = 'admin' NSX_PASSWORD = 'default' NSX_MANAGER = '1.2.3.4' NSX_INSECURE = False NSX_CERT = '/opt/stack/certs/nsx.pem' CLIENT_CERT = '/opt/stack/certs/client.pem' NSX_HTTP_RETRIES = 10 NSX_HTTP_TIMEOUT = 10 NSX_HTTP_READ_TIMEOUT = 180 NSX_CONCURENT_CONN = 10 NSX_CONN_IDLE_TIME = 10 NSX_MAX_ATTEMPTS = 10 PLUGIN_SCOPE = "plugin scope" PLUGIN_TAG = "plugin tag" PLUGIN_VER = "plugin ver" DNS_NAMESERVERS = ['1.1.1.1'] DNS_DOMAIN = 'openstacklocal' JSESSIONID = 'my_sess_id' def _mock_nsxlib(): def _return_id_key(*args, **kwargs): return {'id': uuidutils.generate_uuid()} def _mock_add_rules_in_section(*args): # NOTE(arosen): the code in the neutron plugin expects the # neutron rule id as the display_name. rules = args[0] return { 'rules': [ {'display_name': rule['display_name'], 'id': uuidutils.generate_uuid()} for rule in rules ]} def _mock_limits(*args): return utils.TagLimits(20, 40, 15) mocking = [] mocking.append(mock.patch( "vmware_nsxlib.v3.cluster.NSXRequestsHTTPProvider" ".validate_connection")) mocking.append(mock.patch( "vmware_nsxlib.v3.security.NsxLibNsGroup.create", side_effect=_return_id_key)) mocking.append(mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection.create_empty", side_effect=_return_id_key)) mocking.append(mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection.init_default", return_value=uuidutils.generate_uuid())) mocking.append(mock.patch( "vmware_nsxlib.v3.security.NsxLibNsGroup.list")) mocking.append(mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection.add_rules", side_effect=_mock_add_rules_in_section)) mocking.append(mock.patch( ("vmware_nsxlib.v3.core_resources." "NsxLibTransportZone.get_id_by_name_or_id"), return_value=uuidutils.generate_uuid())) mocking.append(mock.patch( "vmware_nsxlib.v3.NsxLib.get_tag_limits", side_effect=_mock_limits)) for m in mocking: m.start() return mocking def get_default_nsxlib_config(allow_passthrough=True): return config.NsxLibConfig( username=NSX_USER, password=NSX_PASSWORD, retries=NSX_HTTP_RETRIES, insecure=NSX_INSECURE, token_provider=None, ca_file=NSX_CERT, concurrent_connections=NSX_CONCURENT_CONN, http_timeout=NSX_HTTP_TIMEOUT, http_read_timeout=NSX_HTTP_READ_TIMEOUT, conn_idle_timeout=NSX_CONN_IDLE_TIME, http_provider=None, nsx_api_managers=[], plugin_scope=PLUGIN_SCOPE, plugin_tag=PLUGIN_TAG, plugin_ver=PLUGIN_VER, dns_nameservers=DNS_NAMESERVERS, dns_domain=DNS_DOMAIN, allow_passthrough=allow_passthrough ) def get_nsxlib_config_with_client_cert(): return config.NsxLibConfig( client_cert_provider=client_cert.ClientCertProvider(CLIENT_CERT), retries=NSX_HTTP_RETRIES, insecure=NSX_INSECURE, ca_file=NSX_CERT, token_provider=None, concurrent_connections=NSX_CONCURENT_CONN, http_timeout=NSX_HTTP_TIMEOUT, http_read_timeout=NSX_HTTP_READ_TIMEOUT, conn_idle_timeout=NSX_CONN_IDLE_TIME, http_provider=None, nsx_api_managers=[], plugin_scope=PLUGIN_SCOPE, plugin_tag=PLUGIN_TAG, plugin_ver=PLUGIN_VER) class NsxLibTestCase(unittest.TestCase): def use_client_cert_auth(self): return False def setUp(self, *args, **kwargs): super(NsxLibTestCase, self).setUp() self.mocking = _mock_nsxlib() if self.use_client_cert_auth(): nsxlib_config = get_nsxlib_config_with_client_cert() else: nsxlib_config = get_default_nsxlib_config() self.nsxlib = v3.NsxLib(nsxlib_config) # print diffs when assert comparisons fail self.maxDiff = None def tearDown(self, *args, **kwargs): # stop the mocks for m in self.mocking: m.stop() super(NsxLibTestCase, self).tearDown() class MemoryMockAPIProvider(nsx_cluster.AbstractHTTPProvider): """Acts as a HTTP provider for mocking which is backed by a MockRequestSessionApi. """ def __init__(self, mock_session_api): self._store = mock_session_api @property def provider_id(self): return "Memory mock API" def validate_connection(self, cluster_api, endpoint, conn): return def new_connection(self, cluster_api, provider): # all callers use the same backing return self._store def is_connection_exception(self, exception): return isinstance(exception, requests_exceptions.ConnectionError) def is_timeout_exception(self, exception): return isinstance(exception, requests_exceptions.Timeout) class NsxClientTestCase(NsxLibTestCase): class MockNSXClusteredAPI(nsx_cluster.NSXClusteredAPI): def __init__( self, session_response=None, username=None, password=None, retries=None, insecure=None, ca_file=None, concurrent_connections=None, http_timeout=None, http_read_timeout=None, conn_idle_timeout=None, nsx_api_managers=None, max_attempts=None): nsxlib_config = config.NsxLibConfig( username=username or NSX_USER, password=password or NSX_PASSWORD, retries=retries or NSX_HTTP_RETRIES, insecure=insecure if insecure is not None else NSX_INSECURE, token_provider=None, ca_file=ca_file or NSX_CERT, concurrent_connections=(concurrent_connections or NSX_CONCURENT_CONN), http_timeout=http_timeout or NSX_HTTP_TIMEOUT, http_read_timeout=http_read_timeout or NSX_HTTP_READ_TIMEOUT, conn_idle_timeout=conn_idle_timeout or NSX_CONN_IDLE_TIME, max_attempts=max_attempts or NSX_MAX_ATTEMPTS, http_provider=NsxClientTestCase.MockHTTPProvider( session_response=session_response), nsx_api_managers=nsx_api_managers or [NSX_MANAGER], plugin_scope=PLUGIN_SCOPE, plugin_tag=PLUGIN_TAG, plugin_ver=PLUGIN_VER) super(NsxClientTestCase.MockNSXClusteredAPI, self).__init__( nsxlib_config) self._record = mock.Mock() def record_call(self, request, **kwargs): verb = request.method.lower() # filter out requests specific attributes checked_kwargs = copy.copy(kwargs) del checked_kwargs['proxies'] del checked_kwargs['stream'] if 'allow_redirects' in checked_kwargs: del checked_kwargs['allow_redirects'] for attr in ['url', 'body']: checked_kwargs[attr] = getattr(request, attr, None) # remove headers we don't need to verify checked_kwargs['headers'] = copy.copy(request.headers) for header in ['Accept-Encoding', 'User-Agent', 'Connection', 'Authorization', 'Content-Length']: if header in checked_kwargs['headers']: del checked_kwargs['headers'][header] checked_kwargs['headers'] = request.headers # record the call in the mock object method = getattr(self._record, verb) method(**checked_kwargs) def assert_called_once(self, verb, **kwargs): mock_call = getattr(self._record, verb.lower()) mock_call.assert_called_once_with(**kwargs) def assert_any_call(self, verb, **kwargs): mock_call = getattr(self._record, verb.lower()) mock_call.assert_any_call(**kwargs) def call_count(self, verb): mock_call = getattr(self._record, verb.lower()) return mock_call.call_count @property def recorded_calls(self): return self._record class MockHTTPProvider(nsx_cluster.NSXRequestsHTTPProvider): def __init__(self, session_response=None): super(NsxClientTestCase.MockHTTPProvider, self).__init__() if isinstance(session_response, list): self._session_responses = session_response elif session_response: self._session_responses = [session_response] else: self._session_responses = None def new_connection(self, cluster_api, provider): # wrapper the session so we can intercept and record calls session = super(NsxClientTestCase.MockHTTPProvider, self).new_connection(cluster_api, provider) mock_adapter = mock.Mock() session_send = session.send def _adapter_send(request, **kwargs): # record calls at the requests HTTP adapter level mock_response = mock.Mock() mock_response.history = None mock_response.headers = {'location': ''} # needed to bypass requests internal checks for mock mock_response.raw._original_response = {} # record the request for later verification cluster_api.record_call(request, **kwargs) return mock_response def _session_send(request, **kwargs): # calls at the Session level if self._session_responses: # pop first response current_response = self._session_responses[0] del self._session_responses[0] # consumer has setup a response for the session cluster_api.record_call(request, **kwargs) return (current_response() if hasattr(current_response, '__call__') else current_response) # bypass requests redirect handling for mock kwargs['allow_redirects'] = False # session send will end up calling adapter send return session_send(request, **kwargs) mock_adapter.send = _adapter_send session.send = _session_send def _mock_adapter(*args, **kwargs): # use our mock adapter rather than requests adapter return mock_adapter session.get_adapter = _mock_adapter return session def validate_connection(self, cluster_api, endpoint, conn): assert conn is not None def mock_nsx_clustered_api(self, session_response=None, **kwargs): orig_request = nsx_cluster.TimeoutSession.request def mocked_request(*args, **kwargs): if args[2].endswith('api/session/create'): response = models.Response() response.status_code = 200 response.headers = { 'Set-Cookie': 'JSESSIONID=%s;junk' % JSESSIONID} return response return orig_request(*args, **kwargs) with mock.patch.object(nsx_cluster.TimeoutSession, 'request', new=mocked_request): cluster = NsxClientTestCase.MockNSXClusteredAPI( session_response=session_response, **kwargs) return cluster @staticmethod def default_headers(): return {'Content-Type': 'application/json', 'Accept': 'application/json', 'Cookie': 'JSESSIONID=%s;' % JSESSIONID} def mocked_resource(self, resource_class, mock_validate=True, session_response=None): mocked = resource_class(nsx_client.NSX3Client( self.mock_nsx_clustered_api(session_response=session_response), nsx_api_managers=[NSX_MANAGER], max_attempts=NSX_MAX_ATTEMPTS), nsxlib_config=get_default_nsxlib_config(), nsxlib=self.nsxlib) if mock_validate: mock.patch.object(mocked.client, '_validate_result').start() return mocked def new_mocked_client(self, client_class, mock_validate=True, session_response=None, mock_cluster=None, **kwargs): client = client_class(mock_cluster or self.mock_nsx_clustered_api( session_response=session_response), **kwargs) if mock_validate: mock.patch.object(client, '_validate_result').start() new_client_for = client.new_client_for def _new_client_for(*args, **kwargs): sub_client = new_client_for(*args, **kwargs) if mock_validate: mock.patch.object(sub_client, '_validate_result').start() return sub_client client.new_client_for = _new_client_for return client def new_mocked_cluster(self, conf_managers, validate_conn_func, concurrent_connections=None): mock_provider = mock.Mock() mock_provider.default_scheme = 'https' mock_provider.validate_connection = validate_conn_func nsxlib_config = get_default_nsxlib_config() if concurrent_connections: nsxlib_config.concurrent_connections = concurrent_connections nsxlib_config.http_provider = mock_provider nsxlib_config.nsx_api_managers = conf_managers return nsx_cluster.NSXClusteredAPI(nsxlib_config) def assert_json_call(self, method, client, url, headers=None, timeout=(NSX_HTTP_TIMEOUT, NSX_HTTP_READ_TIMEOUT), data=None): cluster = client._conn if data: data = jsonutils.dumps(data, sort_keys=True) if not headers: headers = self.default_headers() cluster.assert_called_once( method, **{'url': url, 'verify': NSX_CERT, 'body': data, 'headers': headers, 'cert': None, 'timeout': timeout}) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_constants.py0000664000175000017500000005601013623151571025407 0ustar zuulzuul00000000000000# Copyright (c) 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils from vmware_nsxlib.v3 import nsx_constants FAKE_NAME = "fake_name" FAKE_SWITCH_UUID = uuidutils.generate_uuid() FAKE_IP_SET_UUID = uuidutils.generate_uuid() FAKE_PORT_UUID = uuidutils.generate_uuid() FAKE_PORT = { "id": FAKE_PORT_UUID, "display_name": FAKE_NAME, "resource_type": "LogicalPort", "address_bindings": [], "logical_switch_id": FAKE_SWITCH_UUID, "admin_state": "UP", "attachment": { "id": "9ca8d413-f7bf-4276-b4c9-62f42516bdb2", "attachment_type": "VIF" }, "switching_profile_ids": [ { "value": "64814784-7896-3901-9741-badeff705639", "key": "IpDiscoverySwitchingProfile" }, { "value": "fad98876-d7ff-11e4-b9d6-1681e6b88ec1", "key": "SpoofGuardSwitchingProfile" }, { "value": "93b4b7e8-f116-415d-a50c-3364611b5d09", "key": "PortMirroringSwitchingProfile" }, { "value": "fbc4fb17-83d9-4b53-a286-ccdf04301888", "key": "SwitchSecuritySwitchingProfile" }, { "value": "f313290b-eba8-4262-bd93-fab5026e9495", "key": "QosSwitchingProfile" } ] } FAKE_CONTAINER_PORT = { "id": FAKE_PORT_UUID, "display_name": FAKE_NAME, "resource_type": "LogicalPort", "address_bindings": [ { "ip_address": "192.168.1.110", "mac_address": "aa:bb:cc:dd:ee:ff" } ], "logical_switch_id": FAKE_SWITCH_UUID, "admin_state": "UP", "attachment": { "id": "9ca8d413-f7bf-4276-b4c9-62f42516bdb2", "attachment_type": "VIF", "context": { "vlan_tag": 122, "container_host_vif_id": "c6f817a0-4e36-421e-98a6-8a2faed880bc", "resource_type": "VifAttachmentContext", "app_id": "container-1", "vif_type": "CHILD", "allocate_addresses": "Both", } }, "switching_profile_ids": [ { "value": "64814784-7896-3901-9741-badeff705639", "key": "IpDiscoverySwitchingProfile" }, { "value": "fad98876-d7ff-11e4-b9d6-1681e6b88ec1", "key": "SpoofGuardSwitchingProfile" }, { "value": "93b4b7e8-f116-415d-a50c-3364611b5d09", "key": "PortMirroringSwitchingProfile" }, { "value": "fbc4fb17-83d9-4b53-a286-ccdf04301888", "key": "SwitchSecuritySwitchingProfile" }, { "value": "f313290b-eba8-4262-bd93-fab5026e9495", "key": "QosSwitchingProfile" } ], "extra_configs": [ { "config_pair": { "value": "value1", "key": "key1" } }, { "config_pair": { "value": "value2", "key": "key2" } } ] } FAKE_ROUTER_UUID = uuidutils.generate_uuid() FAKE_ROUTER_FW_SEC_UUID = uuidutils.generate_uuid() FAKE_ROUTER = { "resource_type": "LogicalRouter", "revision": 0, "id": FAKE_ROUTER_UUID, "display_name": FAKE_NAME, "firewall_sections": [{ "is_valid": True, "target_type": "FirewallSection", "target_id": FAKE_ROUTER_FW_SEC_UUID }], "advanced_config": { "external_transit_networks": ["100.64.0.0/10"], }, } FAKE_ROUTER_PORT_UUID = uuidutils.generate_uuid() FAKE_ROUTER_PORT = { "resource_type": nsx_constants.LROUTERPORT_UPLINK, "revision": 0, "id": FAKE_ROUTER_PORT_UUID, "display_name": FAKE_NAME, "logical_router_id": FAKE_ROUTER_UUID, "subnets": [{'ip_addresses': ['172.20.1.60'], 'prefix_length': 24}] } FAKE_ROUTER_LINKT1_PORT_UUID = uuidutils.generate_uuid() FAKE_ROUTER_LINKT1_PORT = { "resource_type": nsx_constants.LROUTERPORT_LINKONTIER1, "revision": 0, "id": FAKE_ROUTER_LINKT1_PORT_UUID, "display_name": FAKE_NAME, "logical_router_id": FAKE_ROUTER_UUID, "linked_logical_router_port_id": {'target_id': uuidutils.generate_uuid()} } FAKE_QOS_PROFILE = { "resource_type": "QosSwitchingProfile", "id": uuidutils.generate_uuid(), "display_name": FAKE_NAME, "system_defined": False, "dscp": { "priority": 25, "mode": "UNTRUSTED" }, "tags": [], "description": FAKE_NAME, "class_of_service": 0, "shaper_configuration": [ { "resource_type": "IngressRateShaper", "enabled": False, "peak_bandwidth_mbps": 0, "burst_size_bytes": 0, "average_bandwidth_mbps": 0 }, { "resource_type": "IngressBroadcastRateShaper", "enabled": False, "peak_bandwidth_kbps": 0, "average_bandwidth_kbps": 0, "burst_size_bytes": 0 }, { "resource_type": "EgressRateShaper", "enabled": False, "peak_bandwidth_mbps": 0, "burst_size_bytes": 0, "average_bandwidth_mbps": 0 } ], "_last_modified_user": "admin", "_last_modified_time": 1438383180608, "_create_time": 1438383180608, "_create_user": "admin", "_revision": 0 } FAKE_IP_POOL_UUID = uuidutils.generate_uuid() FAKE_IP_POOL = { "_revision": 0, "id": FAKE_IP_POOL_UUID, "display_name": "IPPool-IPV6-1", "description": "IPPool-IPV6-1 Description", "subnets": [{ "dns_nameservers": [ "2002:a70:cbfa:1:1:1:1:1" ], "allocation_ranges": [{ "start": "2002:a70:cbfa:0:0:0:0:1", "end": "2002:a70:cbfa:0:0:0:0:5" }], "gateway_ip": "2002:a80:cbfa:0:0:0:0:255", "cidr": "2002:a70:cbfa:0:0:0:0:0/24" }], } FAKE_IP_SET = { "id": FAKE_IP_SET_UUID, "display_name": FAKE_NAME, "resource_type": "IPSet", "ip_addresses": [ "192.168.1.1-192.168.1.6", "192.168.1.8", "192.168.4.8/24"] } FAKE_APPLICATION_PROFILE_UUID = uuidutils.generate_uuid() FAKE_APPLICATION_PROFILE = { "resource_type": "LbHttpProfile", "description": "my http profile", "id": FAKE_APPLICATION_PROFILE_UUID, "display_name": "httpprofile1", "ntlm": False, "request_body_size": 65536, "request_header_size": 1024, "response_header_size": 4096, "response_timeout": 60, "http_redirect_to": "redirect_url", "http_redirect_to_https": False, "idle_timeout": 1800, "x_forwarded_for": "INSERT", "_create_user": "admin", "_create_time": 1493834124218, "_last_modified_user": "admin", "_last_modified_time": 1493834124218, "_system_owned": False, "_revision": 0 } FAKE_PERSISTENCE_PROFILE_UUID = uuidutils.generate_uuid() FAKE_PERSISTENCE_PROFILE = { "resource_type": "LbCookiePersistenceProfile", "description": "cookie persistence", "id": FAKE_PERSISTENCE_PROFILE_UUID, "display_name": "cookiePersistence", "cookie_mode": "INSERT", "cookie_garble": True, "cookie_fallback": True, "cookie_name": "ABC", "_create_user": "admin", "_create_time": 1493837413804, "_last_modified_user": "admin", "_last_modified_time": 1493837413804, "_system_owned": False, "_revision": 0 } FAKE_RULE_UUID = uuidutils.generate_uuid() FAKE_RULE = { "resource_type": "LbRule", "description": "LbRule to route login requests to dedicated pool", "id": FAKE_RULE_UUID, "display_name": "LoginRouteRule", "phase": "HTTP_FORWARDING", "match_strategy": "ALL", "match_conditions": [ { "type": "LbHttpRequestUriCondition", "uri": "/login" } ], "actions": [ { "type": "LbSelectPoolAction", "pool_id": "54411c58-046c-4236-8ff1-e1e1aad3e873" } ] } FAKE_CLIENT_SSL_PROFILE_UUID = uuidutils.generate_uuid() FAKE_CLIENT_SSL_PROFILE = { "display_name": "clientSslProfile1", "description": "client ssl profile", "id": FAKE_CLIENT_SSL_PROFILE_UUID, "prefer_server_ciphers": False, "session_cache_enabled": False, "session_cache_timeout": 300 } FAKE_SERVER_SSL_PROFILE_UUID = uuidutils.generate_uuid() FAKE_SERVER_SSL_PROFILE = { "display_name": "serverSslProfile1", "description": "server ssl profile", "id": FAKE_SERVER_SSL_PROFILE_UUID, "session_cache_enabled": False } FAKE_MONITOR_UUID = uuidutils.generate_uuid() FAKE_MONITOR = { "display_name": "httpmonitor1", "description": "my http monitor", "id": FAKE_MONITOR_UUID, "resource_type": "LbHttpMonitor", "interval": 5, "rise_count": 3, "fall_count": 3, "timeout": 15, "request_url": "/", "request_method": "GET", "monitor_port": "80" } FAKE_POOL_UUID = uuidutils.generate_uuid() FAKE_POOL = { "display_name": "httppool1", "description": "my http pool", "id": FAKE_POOL_UUID, "algorithm": "ROUND_ROBIN", } FAKE_VIRTUAL_SERVER_UUID = uuidutils.generate_uuid() FAKE_VIRTUAL_SERVER = { "display_name": "httpvirtualserver1", "description": "my http virtual server", "id": FAKE_VIRTUAL_SERVER_UUID, "enabled": True, "port": "80", "ip_protocol": "TCP", } FAKE_SERVICE_UUID = uuidutils.generate_uuid() FAKE_SERVICE = { "display_name": "my LB web service1", "description": "my LB web service", "id": FAKE_SERVICE_UUID, "enabled": True, "attachment": { "target_id": FAKE_ROUTER_UUID, "target_type": "LogicalRouter" }, "relax_scale_validation": False } FAKE_TZ_UUID = uuidutils.generate_uuid() FAKE_TZ = { "resource_type": "TransportZone", "revision": 0, "id": FAKE_TZ_UUID, "display_name": FAKE_NAME, "transport_type": "OVERLAY", "host_switch_mode": "STANDARD" } FAKE_TN_UUID = uuidutils.generate_uuid() FAKE_TZ_EP_UUID = uuidutils.generate_uuid() FAKE_TZ_EP_UUID2 = uuidutils.generate_uuid() FAKE_TN = { "resource_type": "TransportNode", "revision": 0, "id": FAKE_TZ_UUID, "display_name": FAKE_NAME, "transport_zone_endpoints": [{"transport_zone_id": FAKE_TZ_UUID}], "host_switch_spec": { "host_switches": [ { 'transport_zone_endpoints': [ { 'transport_zone_id': FAKE_TZ_EP_UUID } ] }, { 'transport_zone_endpoints': [ { 'transport_zone_id': FAKE_TZ_EP_UUID2 } ] } ] } } FAKE_MD_UUID = uuidutils.generate_uuid() FAKE_URL = "http://7.7.7.70:3500/abc" FAKE_MD = { "resource_type": "MetadataProxy", "revision": 0, "id": FAKE_MD_UUID, "metadata_server_url": FAKE_URL } FAKE_RELAY_UUID = uuidutils.generate_uuid() FAKE_RELAY_SERVER = "6.6.6.6" FAKE_RELAY_PROFILE = { "id": FAKE_RELAY_UUID, "display_name": "dummy", "server_addresses": [FAKE_RELAY_SERVER], "resource_type": "DhcpRelayProfile" } FAKE_RELAY_SERVICE_UUID = uuidutils.generate_uuid() FAKE_RELAY_SERVICE = { "id": FAKE_RELAY_SERVICE_UUID, "display_name": "dummy", "dhcp_relay_profile_id": FAKE_RELAY_UUID, "resource_type": "DhcpRelayService" } FAKE_DEFAULT_CERTIFICATE_ID = uuidutils.generate_uuid() FAKE_CERT_LIST = [ {'pem_encoded': '-----BEGINCERTIFICATE-----\n' 'MIIDmzCCAoOgAwIBAgIGAV8Rg5RhMA0GCSqGSIb3DQEBCwUAMHoxJzA' 'lBgNVBAMM\nHlZNd2FyZSBOU1hBUEkgVHJ1c3QgTWFuYWdlbWVudDET' 'MBEGA1UECgwKVk13YXJl\nIEluYzEMMAoGA1UECwwDTlNYMQswCQYDV' 'QQGEwJVUzELMAkGA1UECAwCQ0ExEjAQ\nBgNVBAcMCVBhbG8gQWx0bz' 'AeFw0xNzEwMTIxNjU1NTZaFw0yNzEwMTAxNjU1NTZa\nMHoxJzAlBgN' 'VBAMMHlZNd2FyZSBOU1hBUEkgVHJ1c3QgTWFuYWdlbWVudDETMBEG\n' 'A1UECgwKVk13YXJlIEluYzEMMAoGA1UECwwDTlNYMQswCQYDVQQGEwJ' 'VUzELMAkG\nA1UECAwCQ0ExEjAQBgNVBAcMCVBhbG8gQWx0bzCCASIw' 'DQYJKoZIhvcNAQEBBQAD\nggEPADCCAQoCggEBAJuRUtmJLamkJyW3X' 'qpilC7o0dxp3l5vlWWCjnbz3cl+/5Fd\nnpd8dTco9UMeSv5bPBGvLm' 'qSPBZwTYCO3JAowF7aS3qPPWo8tNYWqlMfrZqo5Phc\nGRwtTkfK+GO' '2VN6EG7kTewjrNMW7EAA/68fsNk0QeYIkDJw4ozaX6MhyNDjR+20M\n' '0urN5DEt0ucNZfuQ0pfwYwZoAULHJJODRgUzQG7OT0u64m4ugjQ0uxD' '268aV2IFU\ntSln5HAw2IHXsSn+TVCxInDb+3Uj5E0gjANk5xH7yumi' 'mFXC5DGVvdi1vHdQwZzi\nEklX2Gj2+qEiLul9Jr6BjMM+cor3ediuL' 'KfC05kCAwEAAaMnMCUwDgYDVR0PAQH/\nBAQDAgeAMBMGA1UdJQQMMA' 'oGCCsGAQUFBwMCMA0GCSqGSIb3DQEBCwUAA4IBAQBb\nk498dN3Wid9' '0NIfEJOtTuPtMBSLbCuXgeAqmxGgAB1mYyXCSk50AzkzDZqdt7J9Z\n' 'm3LMe1mfyzfD5zboGiSbb6OrMac3RO9B3nFl2h2pkJtZQAqQDxrighQ' 'qodlbLCum\nw3juA9AIx+YveAOP8mwldo6XJX4ogIXiTol6m1EkOmJ/' '6YnFiVN/BloBhSbbv2zJ\nhk9LKwCjZ23hkWj74zQY94iknhcS3VxEt' 'FlEyk1VrRGkmFfn618JCOCt+8Zuw1M3\nlkn4tA81IVjbj/uWaRIDY1' 'gSfltVX14vNy5fbtCHlQiJgI/A4I4z8UNaktkLO/ie\ntiAwSni6x7S' 'ZWsf3Sy/P\n-----END CERTIFICATE-----\n', 'id': 'c863428e-bfce-4a93-9341-6c9b9ec07657', 'resource_type': 'certificate_self_signed'}, {'pem_encoded': '-----BEGIN CERTIFICATE-----\n' 'MIIEgzCCAmsCCQCmkvlHE5M1KTANBgkqhkiG9w0BAQsFADB0MQswCQY' 'DVQQGEwJV\nUzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJ' 'UGFsbyBBbHRvMQ8wDQYD\nVQQKDAZWTXdhcmUxDTALBgNVBAsMBE5TQ' 'lUxHDAaBgNVBAMME1ZNd2FyZSBOU0JV\nIFJvb3QgQ0EwHhcNMTcxMD' 'EyMjI0NzU0WhcNMTgxMDA3MjI0NzU0WjCBkjELMAkG\nA1UEBhMCVVM' 'xEzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAcMCVBhbG8gQWx0\n' 'bzEPMA0GA1UECgwGVk13YXJlMQ0wCwYDVQQLDAROU0JVMRgwFgYDVQQ' 'DDA93d3cu\nZXhhbXBsZS5jb20xIDAeBgkqhkiG9w0BCQEWEWFkbWlu' 'QGV4YW1wbGUuY29tMIIB\nIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBC' 'gKCAQEA7F2TheIEy9g9CwVMlxlTuZqQ\n6QbJdymQw9RQwR0O09wsbS' 'jx4XJtzwDjCX7aZ1ON7eZBXXNkQx6nWlkYrS7zmR4T\npWmLiIYQWpV' 'H6oIzgEEaeabFOqfs5b0zbYZN868fcFsPVGGgizfKO6I+gJwp5sii\n' 'IQvBa9hCKlXRwbGYYeywThfMf4plxzj/YDIIBkM+4qck58sr7Nhjb5J' 'FD60LrOJK\nSdqzCSinsYlx5eZ4f5GjpMc7euAsS5UVdZFV13CysK83' '6h/KHYyz/LXTjGpGbDd7\n2wPSUZRkjY58I5FU0hVeH3zMoaVJBfXmj' 'X8TVjR2Jk+NcNr5Azmgn3BC8pTqowID\nAQABMA0GCSqGSIb3DQEBCw' 'UAA4ICAQBtGBazJXwQVtIqBeyzmoQDWNctBc5VSTEq\nGT3dAyy0LYJ' 'Tm+4aaCVAY4uiS6HTzb4MQR+EtGxN/1fLyFgs/V3oQ+bRh+aWS85u\n' 'J4sZL87EtO7VlXLt8mAjqrAAJwwywMhbw+PlGVjhJgp8vAjpbDiccmb' 'QRN/noSSF\nTCqUDFtsP4yyf+b8xbipVGvmTLrqTX1Dt9iQKKKD8QYi' 'GG0Bt2t38YVc8hEQg3TC\n8xjs1OcyYN+oCRHj+Nunib9fH8OGMjn3j' 'OpVAJGADpwmTc0rbwkTFtTUweT5HSCD\nrzLZNI0DwjLeR8mDZRMpjN' 'tYaCSERbpzhEUFWEIXuVT3GdrgsPGcNZi520cyeUyz\nTC9ixXgkiy4' 'yS8zqca0v2mryrf9MxhYKu2nek+0GB4WodHO904Tlbcdz9wHnCi4f\n' '6VdS7/lKncvj8yJrqE7yQtzLlNGjBUJNajp/jchzlHpsYLCiuIX7fyh' '6Z+cQVwjJ\nSWkf7yuOO+jEw45A0Jxtyl3aLf5aoptmzLOKLFznscSg' 'tkFvtdh4O/APxORxgPKc\n1WiQCpUecsmxc4qMRulh31tVBFi6uIsKY' 'vrUkP5JaxIxV/nKGBDJyzKbAZWLqdnm\nNd3coEUMwd16vr57QJatJb' 'To/wVMMbvW3vqVy0AuXReHCPVTDF5+vnsMGXK/IV7w\nLzulLswFmA=' '=\n-----END CERTIFICATE-----\n', 'id': 'e4b0ab75-ce14-456e-8f5f-071303dd6275', 'resource_type': 'certificate_signed'} ] FAKE_CERT_PEM = ( "-----BEGIN CERTIFICATE-----\n" "MIIEgzCCAmsCCQCmkvlHE5M1KTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV\n" "UzETMBEGA1UECAwKQ2FsaWZvcm5pYTESMBAGA1UEBwwJUGFsbyBBbHRvMQ8wDQYD\n" "VQQKDAZWTXdhcmUxDTALBgNVBAsMBE5TQlUxHDAaBgNVBAMME1ZNd2FyZSBOU0JV\n" "IFJvb3QgQ0EwHhcNMTcxMDEyMjI0NzU0WhcNMTgxMDA3MjI0NzU0WjCBkjELMAkG\n" "A1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAcMCVBhbG8gQWx0\n" "bzEPMA0GA1UECgwGVk13YXJlMQ0wCwYDVQQLDAROU0JVMRgwFgYDVQQDDA93d3cu\n" "ZXhhbXBsZS5jb20xIDAeBgkqhkiG9w0BCQEWEWFkbWluQGV4YW1wbGUuY29tMIIB\n" "IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA7F2TheIEy9g9CwVMlxlTuZqQ\n" "6QbJdymQw9RQwR0O09wsbSjx4XJtzwDjCX7aZ1ON7eZBXXNkQx6nWlkYrS7zmR4T\n" "pWmLiIYQWpVH6oIzgEEaeabFOqfs5b0zbYZN868fcFsPVGGgizfKO6I+gJwp5sii\n" "IQvBa9hCKlXRwbGYYeywThfMf4plxzj/YDIIBkM+4qck58sr7Nhjb5JFD60LrOJK\n" "SdqzCSinsYlx5eZ4f5GjpMc7euAsS5UVdZFV13CysK836h/KHYyz/LXTjGpGbDd7\n" "2wPSUZRkjY58I5FU0hVeH3zMoaVJBfXmjX8TVjR2Jk+NcNr5Azmgn3BC8pTqowID\n" "AQABMA0GCSqGSIb3DQEBCwUAA4ICAQBtGBazJXwQVtIqBeyzmoQDWNctBc5VSTEq\n" "GT3dAyy0LYJTm+4aaCVAY4uiS6HTzb4MQR+EtGxN/1fLyFgs/V3oQ+bRh+aWS85u\n" "J4sZL87EtO7VlXLt8mAjqrAAJwwywMhbw+PlGVjhJgp8vAjpbDiccmbQRN/noSSF\n" "TCqUDFtsP4yyf+b8xbipVGvmTLrqTX1Dt9iQKKKD8QYiGG0Bt2t38YVc8hEQg3TC\n" "8xjs1OcyYN+oCRHj+Nunib9fH8OGMjn3jOpVAJGADpwmTc0rbwkTFtTUweT5HSCD\n" "rzLZNI0DwjLeR8mDZRMpjNtYaCSERbpzhEUFWEIXuVT3GdrgsPGcNZi520cyeUyz\n" "TC9ixXgkiy4yS8zqca0v2mryrf9MxhYKu2nek+0GB4WodHO904Tlbcdz9wHnCi4f\n" "6VdS7/lKncvj8yJrqE7yQtzLlNGjBUJNajp/jchzlHpsYLCiuIX7fyh6Z+cQVwjJ\n" "SWkf7yuOO+jEw45A0Jxtyl3aLf5aoptmzLOKLFznscSgtkFvtdh4O/APxORxgPKc\n" "1WiQCpUecsmxc4qMRulh31tVBFi6uIsKYvrUkP5JaxIxV/nKGBDJyzKbAZWLqdnm\n" "Nd3coEUMwd16vr57QJatJbTo/wVMMbvW3vqVy0AuXReHCPVTDF5+vnsMGXK/IV7w\n" "LzulLswFmA==\n" "-----END CERTIFICATE-----\n") FAKE_DPD_ID = "c933402b-f111-4634-9d66-cc8fffde0f65" FAKE_DPD = { "resource_type": "IPSecVPNDPDProfile", "description": "neutron dpd profile", "id": FAKE_DPD_ID, "display_name": "con1-dpd-profile", "enabled": True, "timeout": 120, } FAKE_PEP_ID = "a7b2915c-2041-4a33-9ea7-9d22b67bf38e" FAKE_PEP = { "resource_type": "IPSecVPNPeerEndpoint", "id": FAKE_PEP_ID, "display_name": "con1", "connection_initiation_mode": "INITIATOR", "authentication_mode": "PSK", "ipsec_tunnel_profile_id": "76e3707d-22e5-4e36-a9ef-b568215e2481", "dpd_profile_id": "04191f5f-3bdd-4ec1-ae56-154b06778d4f", "ike_profile_id": "df386534-5cec-49b4-9c21-4c212cba3cbf", "peer_address": "172.24.4.233", "peer_id": "172.24.4.233" } FAKE_LEP_ID = "cb57de72-4adb-4dad-9abc-685f9f1d0265" FAKE_LEP = { "resource_type": "IPSecVPNLocalEndpoint", "description": "XXX", "id": FAKE_LEP_ID, "display_name": "XXX", "local_id": "1.1.1.1", "ipsec_vpn_service_id": {"target_id": "aca38a11-981b-46d8-9e2c-9bedc0d96794"}, "local_address": "1.1.1.1", "trust_ca_ids": [], "trust_crl_ids": [], } FAKE_VPN_SESS_ID = "33b2f8ce-4357-4780-8c7c-270094847395" FAKE_VPN_SESS = { "resource_type": "PolicyBasedIPSecVPNSession", "description": "con1", "id": FAKE_VPN_SESS_ID, "display_name": "con1", "ipsec_vpn_service_id": "f5bbbd92-0c57-412f-82e6-83c73298f2e9", "peer_endpoint_id": "7a821e15-93b6-46f9-9d2a-db5a164ee6e3", "local_endpoint_id": "e8a3c141-b866-4cb7-91a4-e556b7bd84d6", "enabled": True, "policy_rules": [{ "id": "1211", "sources": [{"subnet": "10.0.6.0/24"}], "logged": False, "destinations": [{"subnet": "10.0.5.0/24"}], "action": "PROTECT", "enabled": True, }], } FAKE_EDGE_CLUSTER_ID = "69c6bc48-0590-4ff5-87b6-9b49e20b67e0" FAKE_EDGE_CLUSTER = { "resource_type": "EdgeCluster", "description": "edgecluster1", "id": FAKE_EDGE_CLUSTER_ID, "display_name": "edgecluster1", "deployment_type": "VIRTUAL_MACHINE", "member_node_type": "EDGE_NODE", "members": [{ "member_index": 0, "transport_node_id": "321d2746-898e-11e8-9723-000c29391f21" }], "cluster_profile_bindings": [{ "profile_id": "15d3485e-0474-4511-bd79-1506ce777baa", "resource_type": "EdgeHighAvailabilityProfile" }], } FAKE_TIERO_ROUTER_ID = "67927d95-18d3-4763-9eb1-a45ff0e63bbe" FAKE_TIERO_ROUTER = { "resource_type": "LogicalRouter", "description": "Provider Logical Router(Tier0)", "id": FAKE_TIERO_ROUTER_ID, "display_name": "PLR-1 LogicalRouterTier0", "edge_cluster_id": FAKE_EDGE_CLUSTER_ID, "firewall_sections": [{ "is_valid": True, "target_type": "FirewallSection", "target_id": "c3d80576-e340-403d-a2d0-f4a72a1db6e3" }], "advanced_config": { "transport_zone_id": FAKE_TZ_UUID, "external_transit_networks": ["100.64.0.0/16"], "internal_transit_network": "169.254.0.0/28" }, "router_type": "TIER0", "high_availability_mode": "ACTIVE_STANDBY", "failover_mode": "NON_PREEMPTIVE", } FAKE_TRANS_NODE_ID = "f5a2b5ca-8dba-11e8-9799-020039422cc8" FAKE_TRANS_NODE = { "resource_type": "TransportNode", "id": FAKE_TRANS_NODE_ID, "display_name": FAKE_TRANS_NODE_ID, "maintenance_mode": "DISABLED", "transport_zone_endpoints": [{ "transport_zone_id": FAKE_TZ_UUID, "transport_zone_profile_ids": [{ "profile_id": "52035bb3-ab02-4a08-9884-18631312e50a", "resource_type": "BfdHealthMonitoringProfile" }] }], "node_id": "f5a2b5ca-8dba-11e8-9799-020039422cc8" } FAKE_MANAGER_IP1 = "10.192.210.181" FAKE_MANAGER_IP2 = "10.192.210.182" FAKE_CLUSTER_NODES_CONFIG = [{ "resource_type": "ClusterNodeConfig", "manager_role": { "type": "ManagementClusterRoleConfig", "mgmt_cluster_listen_addr": { "port": 0, "ip_address": FAKE_MANAGER_IP1 }, "api_listen_addr": { "port": 443, "ip_address": FAKE_MANAGER_IP1 }, "mgmt_plane_listen_addr": { "port": 5671, "ip_address": FAKE_MANAGER_IP1 } }, "appliance_mgmt_listen_addr": FAKE_MANAGER_IP1 }, { "resource_type": "ClusterNodeConfig", "controller_role": { "type": "ControllerClusterRoleConfig", "control_cluster_listen_addr": { "port": 7777, "ip_address": "127.0.0.1" }, }, }, { "resource_type": "ClusterNodeConfig", "manager_role": { "type": "ManagementClusterRoleConfig", "mgmt_cluster_listen_addr": { "port": 0, "ip_address": FAKE_MANAGER_IP2 }, "api_listen_addr": { "port": 443, "ip_address": FAKE_MANAGER_IP2 }, "mgmt_plane_listen_addr": { "port": 5671, "ip_address": FAKE_MANAGER_IP2 } }, "appliance_mgmt_listen_addr": FAKE_MANAGER_IP2 }] FAKE_TAGS = [ { 'scope': 'os-project-id', 'tag': 'project-1' }, { 'scope': 'os-api-version', 'tag': '2.1.1.0' } ] vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/__init__.py0000664000175000017500000000000013623151571024057 0ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_native_dhcp.py0000664000175000017500000001637113623151571025665 0ustar zuulzuul00000000000000# Copyright (c) 2017 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import native_dhcp class TestNativeDhcp(nsxlib_testcase.NsxLibTestCase): """Tests for vmware_nsxlib.v3.native_dhcp.NsxLibNativeDhcp.""" def setUp(self, *args, **kwargs): super(TestNativeDhcp, self).setUp() self.handler = native_dhcp.NsxLibNativeDhcp( self.nsxlib.client, nsxlib_testcase.get_default_nsxlib_config()) self.net_dns_domain = 'a.com' self.subnet_dns_nameserver = '1.1.1.1' self.default_dns_domain = 'b.com' self.default_dns_nameserver = '2.2.2.2' def _get_server_config(self, with_net_dns=True, with_default_dns=True, tags=None, gateway_ip='2.2.2.2', cidr='5.5.0.0/24', port_ip='5.5.0.1', net_name='dummy', net_id='dummy_uuid'): name = self.handler.build_server_name(net_name, net_id) if not tags: tags = [] dns_domain = None dns_nameservers = None if with_default_dns: dns_domain = self.default_dns_domain dns_nameservers = [self.default_dns_nameserver] if with_net_dns: dns_domain = self.net_dns_domain dns_nameservers = [self.subnet_dns_nameserver] return self.handler.build_server(name, port_ip, cidr, gateway_ip, dns_domain, dns_nameservers, tags=tags) def test_build_server_config_dns_from_net_no_defaults(self): # Verify that net/subnet dns params are used if exist result = self._get_server_config(with_net_dns=True, with_default_dns=False) self.assertEqual(self.net_dns_domain, result['domain_name']) self.assertEqual([self.subnet_dns_nameserver], result['dns_nameservers']) def test_build_server_config_dns_from_net_with_defaults(self): # Verify that net/subnet dns params are used if exist, even if there # are defaults result = self._get_server_config(with_net_dns=True, with_default_dns=True) self.assertEqual(self.net_dns_domain, result['domain_name']) self.assertEqual([self.subnet_dns_nameserver], result['dns_nameservers']) def test_build_server_config_dns_from_defaults(self): # Verify that default dns params are used if net/subnet dns params # are missing result = self._get_server_config(with_net_dns=False, with_default_dns=True) self.assertEqual(self.default_dns_domain, result['domain_name']) self.assertEqual([self.default_dns_nameserver], result['dns_nameservers']) def test_build_server_config_dns_from_config(self): # Verify that config dns params are used if net/subnet and default # dns params are missing result = self._get_server_config(with_net_dns=False, with_default_dns=False) self.assertEqual(nsxlib_testcase.DNS_DOMAIN, result['domain_name']) self.assertEqual(nsxlib_testcase.DNS_NAMESERVERS, result['dns_nameservers']) def test_build_server_config_with_tags(self): tags = [{'scope': 'a', 'value': 'a'}] result = self._get_server_config(tags=tags) self.assertEqual(tags, result['tags']) def test_build_server_config_with_gateway(self): gw_ip = '10.10.10.10' result = self._get_server_config(gateway_ip=gw_ip) self.assertEqual(gw_ip, result['gateway_ip']) def test_build_server_config_with_server_ip(self): result = self._get_server_config(cidr='7.7.7.0/24', port_ip='7.7.7.14') self.assertEqual('7.7.7.14/24', result['server_ip']) def test_build_server_config_with_name(self): net_name = 'net1' net_id = 'uuid1uuid2' result = self._get_server_config(net_name=net_name, net_id=net_id) self.assertEqual('%s_%s...%s' % (net_name, net_id[:5], net_id[-5:]), result['name']) def test_build_server_config_no_name(self): net_id = 'uuid1uuid2' result = self._get_server_config(net_name=None, net_id=net_id) self.assertEqual('dhcpserver_%s...%s' % (net_id[:5], net_id[-5:]), result['name']) def test_build_static_routes(self): gateway_ip = '2.2.2.2' cidr = '5.5.0.0/24' host_routes = [{'nexthop': '81.0.200.254', 'destination': '91.255.255.0/24'}] static_routes, gateway_ip = self.handler.build_static_routes( gateway_ip, cidr, host_routes) expected = [{'network': '5.5.0.0/24', 'next_hop': '0.0.0.0'}, {'network': '91.255.255.0/24', 'next_hop': '81.0.200.254'}, {'network': '0.0.0.0/0', 'next_hop': '2.2.2.2'}] self.assertEqual(expected, static_routes) self.assertEqual('2.2.2.2', gateway_ip) def test_build_static_routes_gw_none(self): gateway_ip = None cidr = '5.5.0.0/24' host_routes = [{'nexthop': '81.0.200.254', 'destination': '91.255.255.0/24'}] static_routes, gateway_ip = self.handler.build_static_routes( gateway_ip, cidr, host_routes) expected = [{'network': '5.5.0.0/24', 'next_hop': '0.0.0.0'}, {'network': '91.255.255.0/24', 'next_hop': '81.0.200.254'}] self.assertEqual(expected, static_routes) self.assertIsNone(gateway_ip) def test_build_static_routes_no_host_routes(self): gateway_ip = '2.2.2.2' cidr = '5.5.0.0/24' host_routes = [] static_routes, gateway_ip = self.handler.build_static_routes( gateway_ip, cidr, host_routes) expected = [{'network': '5.5.0.0/24', 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': '2.2.2.2'}] self.assertEqual(expected, static_routes) self.assertEqual('2.2.2.2', gateway_ip) def test_build_static_routes_gw_none_host_route_any(self): gateway_ip = None cidr = '5.5.0.0/24' host_routes = [{'nexthop': '81.0.200.254', 'destination': '0.0.0.0/0'}] static_routes, gateway_ip = self.handler.build_static_routes( gateway_ip, cidr, host_routes) expected = [{'network': '5.5.0.0/24', 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': '81.0.200.254'}] self.assertEqual(expected, static_routes) self.assertEqual('81.0.200.254', gateway_ip) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_ns_group_manager.py0000664000175000017500000001770413623151571026730 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import ns_group_manager from vmware_nsxlib.v3 import nsx_constants as consts # Pool of fake ns-groups uuids NSG_IDS = ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555'] def _mock_create_and_list_nsgroups(test_method): nsgroups = [] def _create_nsgroup_mock(name, desc, tags, membership_criteria=None): nsgroup = {'id': NSG_IDS[len(nsgroups)], 'display_name': name, 'description': desc, 'tags': tags} nsgroups.append(nsgroup) return nsgroup def wrap(*args, **kwargs): with mock.patch( 'vmware_nsxlib.v3.security.NsxLibNsGroup.create' ) as create_nsgroup_mock: create_nsgroup_mock.side_effect = _create_nsgroup_mock with mock.patch( "vmware_nsxlib.v3.security.NsxLibNsGroup.list" ) as list_nsgroups_mock: list_nsgroups_mock.side_effect = lambda: nsgroups test_method(*args, **kwargs) return wrap class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase): """Tests for vmware_nsxlib.v3.ns_group_manager.NSGroupManager.""" @_mock_create_and_list_nsgroups def test_first_initialization(self): size = 5 cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) nested_groups = cont_manager.nested_groups self.assertEqual({i: NSG_IDS[i] for i in range(size)}, nested_groups) @_mock_create_and_list_nsgroups def test_reconfigure_number_of_nested_groups(self): # We need to test that when changing the number of nested groups then # the NSGroupManager picks the ones which were previously created # and create the ones which are missing, which also verifies that it # also recognizes existing nested groups. size = 2 # Creates 2 nested groups. ns_group_manager.NSGroupManager(self.nsxlib, size) size = 5 # Creates another 3 nested groups. nested_groups = ns_group_manager.NSGroupManager( self.nsxlib, size).nested_groups self.assertEqual({i: NSG_IDS[i] for i in range(size)}, nested_groups) @_mock_create_and_list_nsgroups @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.remove_member') @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.add_members') def test_add_and_remove_nsgroups(self, add_member_mock, remove_member_mock): # We verify that when adding a new nsgroup the properly placed # according to its id and the number of nested groups. size = 5 cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) nsgroup_id = 'nsgroup_id' with mock.patch.object(cont_manager, '_hash_uuid', return_value=7): cont_manager.add_nsgroup(nsgroup_id) cont_manager.remove_nsgroup(nsgroup_id) # There are 5 nested groups, the hash function will return 7, therefore # we expect that the nsgroup will be placed in the 3rd group. add_member_mock.assert_called_once_with( NSG_IDS[2], consts.NSGROUP, [nsgroup_id]) remove_member_mock.assert_called_once_with( NSG_IDS[2], consts.NSGROUP, nsgroup_id, verify=True) @_mock_create_and_list_nsgroups @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.remove_member') @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.add_members') def test_when_nested_group_is_full(self, add_member_mock, remove_member_mock): def _add_member_mock(nsgroup, target_type, target_id): if nsgroup == NSG_IDS[2]: raise nsxlib_exc.NSGroupIsFull(nsgroup_id=nsgroup) def _remove_member_mock(nsgroup, target_type, target_id, verify=False): if nsgroup == NSG_IDS[2]: raise nsxlib_exc.NSGroupMemberNotFound(nsgroup_id=nsgroup, member_id=target_id) add_member_mock.side_effect = _add_member_mock remove_member_mock.side_effect = _remove_member_mock size = 5 cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) nsgroup_id = 'nsgroup_id' with mock.patch.object(cont_manager, '_hash_uuid', return_value=7): cont_manager.add_nsgroup(nsgroup_id) cont_manager.remove_nsgroup(nsgroup_id) # Trying to add nsgroup to the nested group at index 2 will raise # NSGroupIsFull exception, we expect that the nsgroup will be added to # the nested group at index 3. calls = [mock.call(NSG_IDS[2], consts.NSGROUP, [nsgroup_id]), mock.call(NSG_IDS[3], consts.NSGROUP, [nsgroup_id])] add_member_mock.assert_has_calls(calls) # Since the nsgroup was added to the nested group at index 3, it will # fail to remove it from the group at index 2, and then will try to # remove it from the group at index 3. calls = [ mock.call( NSG_IDS[2], consts.NSGROUP, nsgroup_id, verify=True), mock.call( NSG_IDS[3], consts.NSGROUP, nsgroup_id, verify=True)] remove_member_mock.assert_has_calls(calls) @_mock_create_and_list_nsgroups @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.remove_member') @mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.add_members') def test_initialize_with_absent_nested_groups(self, add_member_mock, remove_member_mock): size = 3 cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) # list_nsgroups will return nested group 1 and 3, but not group 2. nsgroups = cont_manager.nsxlib_nsgroup.list() with mock.patch("vmware_nsxlib.v3.security.NsxLibNsGroup.list", side_effect=lambda: nsgroups[::2]): # invoking the initialization process again, it should process # groups 1 and 3 and create group 2. cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) self.assertEqual({0: NSG_IDS[0], 1: NSG_IDS[3], 2: NSG_IDS[2]}, cont_manager.nested_groups) @_mock_create_and_list_nsgroups def test_suggest_nested_group(self): size = 5 cont_manager = ns_group_manager.NSGroupManager(self.nsxlib, size) # We expect that the first suggested index is 2 expected_suggested_groups = NSG_IDS[2:5] + NSG_IDS[:2] with mock.patch.object(cont_manager, '_hash_uuid', return_value=7): for i, suggested in enumerate( cont_manager._suggest_nested_group('fake-id')): self.assertEqual(expected_suggested_groups[i], suggested) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_utils.py0000664000175000017500000003665413623151571024547 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils class TestNsxV3Utils(nsxlib_testcase.NsxClientTestCase): def test_build_v3_tags_payload(self): result = self.nsxlib.build_v3_tags_payload( {'id': 'fake_id', 'project_id': 'fake_proj_id'}, resource_type='os-net-id', project_name='fake_proj_name') expected = [{'scope': 'os-net-id', 'tag': 'fake_id'}, {'scope': 'os-project-id', 'tag': 'fake_proj_id'}, {'scope': 'os-project-name', 'tag': 'fake_proj_name'}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(expected, result) def test_build_v3_tags_payload_internal(self): result = self.nsxlib.build_v3_tags_payload( {'id': 'fake_id', 'project_id': 'fake_proj_id'}, resource_type='os-net-id', project_name=None) expected = [{'scope': 'os-net-id', 'tag': 'fake_id'}, {'scope': 'os-project-id', 'tag': 'fake_proj_id'}, {'scope': 'os-project-name', 'tag': nsxlib_testcase.PLUGIN_TAG}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(expected, result) def test_build_v3_tags_payload_invalid_length(self): self.assertRaises(exceptions.NsxLibInvalidInput, self.nsxlib.build_v3_tags_payload, {'id': 'fake_id', 'project_id': 'fake_proj_id'}, resource_type='os-longer-maldini-rocks-id', project_name='fake') def test_build_v3_api_version_tag(self): result = self.nsxlib.build_v3_api_version_tag() expected = [{'scope': nsxlib_testcase.PLUGIN_SCOPE, 'tag': nsxlib_testcase.PLUGIN_TAG}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(expected, result) def test_build_v3_api_version_project_tag(self): proj = 'project_x' result = self.nsxlib.build_v3_api_version_project_tag(proj) expected = [{'scope': nsxlib_testcase.PLUGIN_SCOPE, 'tag': nsxlib_testcase.PLUGIN_TAG}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}, {'scope': 'os-project-name', 'tag': proj}] self.assertEqual(expected, result) def test_build_v3_api_version_project_id_tag(self): proj = 'project_x' proj_id = 'project_id' result = self.nsxlib.build_v3_api_version_project_tag( proj, project_id=proj_id) expected = [{'scope': nsxlib_testcase.PLUGIN_SCOPE, 'tag': nsxlib_testcase.PLUGIN_TAG}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}, {'scope': 'os-project-name', 'tag': proj}, {'scope': 'os-project-id', 'tag': proj_id}] self.assertEqual(expected, result) def test_is_internal_resource(self): project_tag = self.nsxlib.build_v3_tags_payload( {'id': 'fake_id', 'project_id': 'fake_proj_id'}, resource_type='os-net-id', project_name=None) internal_tag = self.nsxlib.build_v3_api_version_tag() expect_false = self.nsxlib.is_internal_resource({'tags': project_tag}) self.assertFalse(expect_false) expect_true = self.nsxlib.is_internal_resource({'tags': internal_tag}) self.assertTrue(expect_true) def test_get_name_and_uuid(self): uuid = 'afc40f8a-4967-477e-a17a-9d560d1786c7' suffix = '_afc40...786c7' expected = 'maldini%s' % suffix short_name = utils.get_name_and_uuid('maldini', uuid) self.assertEqual(expected, short_name) name = 'X' * 255 expected = '%s%s' % ('X' * (80 - len(suffix)), suffix) short_name = utils.get_name_and_uuid(name, uuid) self.assertEqual(expected, short_name) def test_build_v3_tags_max_length_payload(self): result = self.nsxlib.build_v3_tags_payload( {'id': 'X' * 255, 'project_id': 'X' * 255}, resource_type='os-net-id', project_name='X' * 255) expected = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'X' * 40}, {'scope': 'os-project-name', 'tag': 'X' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(expected, result) def test_add_v3_tag(self): result = utils.add_v3_tag([], 'fake-scope', 'fake-tag') expected = [{'scope': 'fake-scope', 'tag': 'fake-tag'}] self.assertEqual(expected, result) def test_add_v3_tag_max_length_payload(self): result = utils.add_v3_tag([], 'fake-scope', 'X' * 255) expected = [{'scope': 'fake-scope', 'tag': 'X' * 40}] self.assertEqual(expected, result) def test_add_v3_tag_invalid_scope_length(self): self.assertRaises(exceptions.NsxLibInvalidInput, utils.add_v3_tag, [], 'fake-scope-name-is-far-too-long', 'fake-tag') def test_update_v3_tags_addition(self): tags = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] resources = [{'scope': 'os-instance-uuid', 'tag': 'A' * 40}] tags = utils.update_v3_tags(tags, resources) expected = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}, {'scope': 'os-instance-uuid', 'tag': 'A' * 40}] self.assertEqual(sorted(expected, key=lambda x: x.get('tag')), sorted(tags, key=lambda x: x.get('tag'))) def test_update_v3_tags_removal(self): tags = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] resources = [{'scope': 'os-net-id', 'tag': ''}] tags = utils.update_v3_tags(tags, resources) expected = [{'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(sorted(expected, key=lambda x: x.get('tag')), sorted(tags, key=lambda x: x.get('tag'))) def test_update_v3_tags_update(self): tags = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] resources = [{'scope': 'os-project-id', 'tag': 'A' * 40}] tags = utils.update_v3_tags(tags, resources) expected = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'A' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-api-version', 'tag': nsxlib_testcase.PLUGIN_VER}] self.assertEqual(sorted(expected, key=lambda x: x.get('tag')), sorted(tags, key=lambda x: x.get('tag'))) def test_update_v3_tags_repetitive_scopes(self): tags = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-security-group', 'tag': 'SG1'}, {'scope': 'os-security-group', 'tag': 'SG2'}] tags_update = [{'scope': 'os-security-group', 'tag': 'SG3'}, {'scope': 'os-security-group', 'tag': 'SG4'}] tags = utils.update_v3_tags(tags, tags_update) expected = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-security-group', 'tag': 'SG3'}, {'scope': 'os-security-group', 'tag': 'SG4'}] self.assertEqual(sorted(expected, key=lambda x: x.get('tag')), sorted(tags, key=lambda x: x.get('tag'))) def test_update_v3_tags_repetitive_scopes_remove(self): tags = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}, {'scope': 'os-security-group', 'tag': 'SG1'}, {'scope': 'os-security-group', 'tag': 'SG2'}] tags_update = [{'scope': 'os-security-group', 'tag': None}] tags = utils.update_v3_tags(tags, tags_update) expected = [{'scope': 'os-net-id', 'tag': 'X' * 40}, {'scope': 'os-project-id', 'tag': 'Y' * 40}, {'scope': 'os-project-name', 'tag': 'Z' * 40}] self.assertEqual(sorted(expected, key=lambda x: x.get('tag')), sorted(tags, key=lambda x: x.get('tag'))) def test_build_extra_args_positive(self): extra_args = ['fall_count', 'interval', 'monitor_port', 'request_body', 'request_method', 'request_url', 'request_version', 'response_body', 'response_status_codes', 'rise_count', 'timeout'] body = {'display_name': 'httpmonitor1', 'description': 'my http monitor'} expected = {'display_name': 'httpmonitor1', 'description': 'my http monitor', 'interval': 5, 'rise_count': 3, 'fall_count': 3} resp = utils.build_extra_args(body, extra_args, interval=5, rise_count=3, fall_count=3) self.assertEqual(resp, expected) def test_build_extra_args_negative(self): extra_args = ['cookie_domain', 'cookie_fallback', 'cookie_garble', 'cookie_mode', 'cookie_name', 'cookie_path', 'cookie_time'] body = {'display_name': 'persistenceprofile1', 'description': 'my persistence profile', 'resource_type': 'LoadBalancerCookiePersistenceProfile'} expected = {'display_name': 'persistenceprofile1', 'description': 'my persistence profile', 'resource_type': 'LoadBalancerCookiePersistenceProfile', 'cookie_mode': 'INSERT', 'cookie_name': 'ABC', 'cookie_fallback': True} resp = utils.build_extra_args(body, extra_args, cookie_mode='INSERT', cookie_name='ABC', cookie_fallback=True, bogus='bogus') self.assertEqual(resp, expected) def test_retry(self): max_retries = 5 total_count = {'val': 0} @utils.retry_upon_exception(exceptions.NsxLibInvalidInput, max_attempts=max_retries) def func_to_fail(x): total_count['val'] = total_count['val'] + 1 raise exceptions.NsxLibInvalidInput(error_message='foo') self.assertRaises(exceptions.NsxLibInvalidInput, func_to_fail, 99) self.assertEqual(max_retries, total_count['val']) def test_retry_random(self): max_retries = 5 total_count = {'val': 0} @utils.retry_random_upon_exception(exceptions.NsxLibInvalidInput, max_attempts=max_retries) def func_to_fail(x): total_count['val'] = total_count['val'] + 1 raise exceptions.NsxLibInvalidInput(error_message='foo') self.assertRaises(exceptions.NsxLibInvalidInput, func_to_fail, 99) self.assertEqual(max_retries, total_count['val']) def test_retry_random_tuple(self): max_retries = 5 total_count = {'val': 0} @utils.retry_random_upon_exception( (exceptions.NsxLibInvalidInput, exceptions.APITransactionAborted), max_attempts=max_retries) def func_to_fail(x): total_count['val'] = total_count['val'] + 1 raise exceptions.NsxLibInvalidInput(error_message='foo') self.assertRaises(exceptions.NsxLibInvalidInput, func_to_fail, 99) self.assertEqual(max_retries, total_count['val']) @mock.patch.object(utils, '_update_max_nsgroups_criteria_tags') @mock.patch.object(utils, '_update_max_tags') @mock.patch.object(utils, '_update_tag_length') @mock.patch.object(utils, '_update_resource_length') def test_update_limits(self, _update_resource_length, _update_tag_length, _update_max_tags, _update_msx_nsg_criteria): limits = utils.TagLimits(1, 2, 3) utils.update_tag_limits(limits) _update_resource_length.assert_called_with(1) _update_tag_length.assert_called_with(2) _update_max_tags.assert_called_with(3) _update_msx_nsg_criteria.assert_called_with(3) class NsxFeaturesTestCase(nsxlib_testcase.NsxLibTestCase): def test_v2_features(self, current_version='2.0.0'): self.nsxlib.nsx_version = current_version self.assertTrue(self.nsxlib.feature_supported( nsx_constants.FEATURE_ROUTER_FIREWALL)) self.assertTrue(self.nsxlib.feature_supported( nsx_constants.FEATURE_EXCLUDE_PORT_BY_TAG)) def test_v2_features_plus(self): self.test_v2_features(current_version='2.0.1') def test_v2_features_minus(self): self.nsxlib.nsx_version = '1.9.9' self.assertFalse(self.nsxlib.feature_supported( nsx_constants.FEATURE_ROUTER_FIREWALL)) self.assertFalse(self.nsxlib.feature_supported( nsx_constants.FEATURE_EXCLUDE_PORT_BY_TAG)) self.assertTrue(self.nsxlib.feature_supported( nsx_constants.FEATURE_MAC_LEARNING)) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_qos_switching_profile.py0000664000175000017500000002736013623151571030002 0ustar zuulzuul00000000000000# Copyright (c) 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import mock from oslo_log import log from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_constants from vmware_nsxlib.v3 import nsx_constants LOG = log.getLogger(__name__) class NsxLibQosTestCase(nsxlib_testcase.NsxClientTestCase): def _body(self, qos_marking=None, dscp=None, description=test_constants.FAKE_NAME): body = { "resource_type": "QosSwitchingProfile", "tags": [] } if qos_marking: body = self.nsxlib.qos_switching_profile._update_dscp_in_args( body, qos_marking, dscp) body["display_name"] = test_constants.FAKE_NAME body["description"] = description return body def _body_with_shaping(self, shaping_enabled=False, burst_size=None, peak_bandwidth=None, average_bandwidth=None, description=test_constants.FAKE_NAME, qos_marking=None, dscp=0, direction=nsx_constants.EGRESS, body=None): if body is None: body = copy.deepcopy(test_constants.FAKE_QOS_PROFILE) body["display_name"] = test_constants.FAKE_NAME body["description"] = description resource_type = (nsx_constants.EGRESS_SHAPING if direction == nsx_constants.EGRESS else nsx_constants.INGRESS_SHAPING) for shaper in body["shaper_configuration"]: if shaper["resource_type"] == resource_type: shaper["enabled"] = shaping_enabled if burst_size: shaper["burst_size_bytes"] = burst_size if peak_bandwidth: shaper["peak_bandwidth_mbps"] = peak_bandwidth if average_bandwidth: shaper["average_bandwidth_mbps"] = average_bandwidth break if qos_marking: body = self.nsxlib.qos_switching_profile._update_dscp_in_args( body, qos_marking, dscp) return body def test_create_qos_switching_profile(self): """Test creating a qos-switching profile returns the correct response """ with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.qos_switching_profile.create( tags=[], name=test_constants.FAKE_NAME, description=test_constants.FAKE_NAME) create.assert_called_with( 'switching-profiles', self._body()) def test_update_qos_switching_profile(self): """Test updating a qos-switching profile returns the correct response """ original_profile = self._body() new_description = "Test" with mock.patch.object(self.nsxlib.client, 'get', return_value=original_profile): with mock.patch.object(self.nsxlib.client, 'update') as update: # update the description of the profile self.nsxlib.qos_switching_profile.update( test_constants.FAKE_QOS_PROFILE['id'], tags=[], description=new_description) update.assert_called_with( 'switching-profiles/%s' % test_constants.FAKE_QOS_PROFILE['id'], self._body(description=new_description), headers=None) def _enable_qos_switching_profile_shaping( self, direction=nsx_constants.EGRESS, new_burst_size=100): """Test updating a qos-switching profile returns the correct response """ original_burst = 10 original_profile = self._body_with_shaping(direction=direction, burst_size=original_burst) peak_bandwidth = 200 average_bandwidth = 300 qos_marking = "untrusted" dscp = 10 with mock.patch.object(self.nsxlib.client, 'get', return_value=original_profile): with mock.patch.object(self.nsxlib.client, 'update') as update: # update the bw shaping of the profile self.nsxlib.qos_switching_profile.update_shaping( test_constants.FAKE_QOS_PROFILE['id'], shaping_enabled=True, burst_size=new_burst_size, peak_bandwidth=peak_bandwidth, average_bandwidth=average_bandwidth, qos_marking=qos_marking, dscp=dscp, direction=direction) actual_body = copy.deepcopy(update.call_args[0][1]) actual_path = update.call_args[0][0] expected_path = ('switching-profiles/%s' % test_constants.FAKE_QOS_PROFILE['id']) expected_burst = (new_burst_size if new_burst_size is not None else original_burst) expected_body = self._body_with_shaping( shaping_enabled=True, burst_size=expected_burst, peak_bandwidth=peak_bandwidth, average_bandwidth=average_bandwidth, qos_marking="untrusted", dscp=10, direction=direction) self.assertEqual(expected_path, actual_path) self.assertEqual(expected_body, actual_body) def test_enable_qos_switching_profile_egress_shaping(self): self._enable_qos_switching_profile_shaping( direction=nsx_constants.EGRESS) def test_enable_qos_switching_profile_ingress_shaping(self): self._enable_qos_switching_profile_shaping( direction=nsx_constants.INGRESS) def test_update_qos_switching_profile_with_burst_size(self): self._enable_qos_switching_profile_shaping( direction=nsx_constants.EGRESS, new_burst_size=101) def test_update_qos_switching_profile_without_burst_size(self): self._enable_qos_switching_profile_shaping( direction=nsx_constants.EGRESS, new_burst_size=None) def test_update_qos_switching_profile_zero_burst_size(self): self._enable_qos_switching_profile_shaping( direction=nsx_constants.EGRESS, new_burst_size=0) def _disable_qos_switching_profile_shaping( self, direction=nsx_constants.EGRESS): """Test updating a qos-switching profile. Returns the correct response """ burst_size = 100 peak_bandwidth = 200 average_bandwidth = 300 original_profile = self._body_with_shaping( shaping_enabled=True, burst_size=burst_size, peak_bandwidth=peak_bandwidth, average_bandwidth=average_bandwidth, qos_marking="untrusted", dscp=10, direction=direction) with mock.patch.object(self.nsxlib.client, 'get', return_value=original_profile): with mock.patch.object(self.nsxlib.client, 'update') as update: # update the bw shaping of the profile self.nsxlib.qos_switching_profile.update_shaping( test_constants.FAKE_QOS_PROFILE['id'], shaping_enabled=False, qos_marking="trusted", direction=direction) actual_body = copy.deepcopy(update.call_args[0][1]) actual_path = update.call_args[0][0] expected_path = ('switching-profiles/%s' % test_constants.FAKE_QOS_PROFILE['id']) expected_body = self._body_with_shaping(qos_marking="trusted", direction=direction) self.assertEqual(expected_path, actual_path) self.assertEqual(expected_body, actual_body) def test_disable_qos_switching_profile_egress_shaping(self): self._disable_qos_switching_profile_shaping( direction=nsx_constants.EGRESS) def test_disable_qos_switching_profile_ingress_shaping(self): self._disable_qos_switching_profile_shaping( direction=nsx_constants.INGRESS) def test_delete_qos_switching_profile(self): """Test deleting qos-switching-profile""" with mock.patch.object(self.nsxlib.client, 'delete') as delete: self.nsxlib.qos_switching_profile.delete( test_constants.FAKE_QOS_PROFILE['id']) delete.assert_called_with( 'switching-profiles/%s' % test_constants.FAKE_QOS_PROFILE['id']) def test_qos_switching_profile_set_shaping(self): """Test updating a qos-switching profile returns the correct response """ egress_peak_bandwidth = 200 egress_average_bandwidth = 300 egress_burst_size = 500 ingress_peak_bandwidth = 100 ingress_average_bandwidth = 400 ingress_burst_size = 600 qos_marking = "untrusted" dscp = 10 original_profile = self._body_with_shaping() with mock.patch.object(self.nsxlib.client, 'get', return_value=original_profile): with mock.patch.object(self.nsxlib.client, 'update') as update: # update the bw shaping of the profile self.nsxlib.qos_switching_profile.set_profile_shaping( test_constants.FAKE_QOS_PROFILE['id'], ingress_bw_enabled=True, ingress_burst_size=ingress_burst_size, ingress_peak_bandwidth=ingress_peak_bandwidth, ingress_average_bandwidth=ingress_average_bandwidth, egress_bw_enabled=True, egress_burst_size=egress_burst_size, egress_peak_bandwidth=egress_peak_bandwidth, egress_average_bandwidth=egress_average_bandwidth, qos_marking=qos_marking, dscp=dscp) actual_body = copy.deepcopy(update.call_args[0][1]) actual_path = update.call_args[0][0] expected_path = ('switching-profiles/%s' % test_constants.FAKE_QOS_PROFILE['id']) expected_body = self._body_with_shaping( shaping_enabled=True, burst_size=egress_burst_size, peak_bandwidth=egress_peak_bandwidth, average_bandwidth=egress_average_bandwidth, qos_marking="untrusted", dscp=10, direction=nsx_constants.EGRESS) # Add the other direction to the body expected_body = self._body_with_shaping( shaping_enabled=True, burst_size=ingress_burst_size, peak_bandwidth=ingress_peak_bandwidth, average_bandwidth=ingress_average_bandwidth, direction=nsx_constants.INGRESS, body=expected_body) self.assertEqual(expected_path, actual_path) self.assertEqual(expected_body, actual_body) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/policy/0000775000175000017500000000000013623151652023257 5ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/policy/policy_testcase.py0000664000175000017500000000255113623151571027026 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import client from vmware_nsxlib.v3.policy import core_defs as policy BASE_POLICY_URI = "https://1.2.3.4/policy/api/v1/" class TestPolicyApi(nsxlib_testcase.NsxClientTestCase): def setUp(self): self.client = self.new_mocked_client(client.NSX3Client, url_prefix='policy/api/v1/') self.policy_api = policy.NsxPolicyApi(self.client) super(TestPolicyApi, self).setUp() def assert_json_call(self, method, client, url, data=None, headers=None): url = BASE_POLICY_URI + url return super(TestPolicyApi, self).assert_json_call( method, client, url, data=data, headers=headers) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/policy/test_api.py0000664000175000017500000004425113623151571025447 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from vmware_nsxlib.tests.unit.v3.policy import policy_testcase from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3.policy import constants from vmware_nsxlib.v3.policy import core_defs as policy class TestPolicyDomain(policy_testcase.TestPolicyApi): def test_create(self): domain_def = policy.DomainDef( domain_id='archaea', name='prokaryotic cells', description='typically characterized by membrane lipids') self.policy_api.create_or_update(domain_def) self.assert_json_call('PATCH', self.client, 'infra/domains/archaea', data=domain_def.get_obj_dict()) def test_delete(self): domain_def = policy.DomainDef(domain_id='bacteria') self.policy_api.delete(domain_def) self.assert_json_call('DELETE', self.client, 'infra/domains/bacteria') def test_get(self): domain_def = policy.DomainDef(domain_id='eukarya') self.policy_api.get(domain_def) self.assert_json_call('GET', self.client, 'infra/domains/eukarya') def test_list(self): domain_def = policy.DomainDef() self.policy_api.list(domain_def) self.assert_json_call('GET', self.client, 'infra/domains') class TestPolicyGroup(policy_testcase.TestPolicyApi): def test_create(self): group_def = policy.GroupDef( domain_id='eukarya', group_id='cats', name='felis catus') self.policy_api.create_or_update(group_def) self.assert_json_call('PATCH', self.client, 'infra/domains/eukarya/groups/cats', data=group_def.get_obj_dict()) def test_create_with_domain(self): domain_def = policy.DomainDef(domain_id='eukarya', name='eukarya', description='dude with cell membranes') group_def = policy.GroupDef(domain_id='eukarya', group_id='cats', name='Ailuropoda melanoleuca') self.policy_api.create_with_parent(domain_def, group_def) data = domain_def.get_obj_dict() data['groups'] = [group_def.get_obj_dict()] self.assert_json_call('PATCH', self.client, 'infra/domains/eukarya', data=data) def test_create_with_single_tag(self): domain_def = policy.DomainDef(domain_id='eukarya') group_def = policy.GroupDef(domain_id='eukarya', group_id='dogs', conditions=policy.Condition('spaniel')) self.policy_api.create_with_parent(domain_def, group_def) data = domain_def.get_obj_dict() data['groups'] = [group_def.get_obj_dict()] # validate body structure and defaults expected_condition = {'value': 'spaniel', 'operator': 'EQUALS', 'member_type': 'LogicalPort', 'resource_type': 'Condition', 'key': 'Tag'} expected_group = {'id': 'dogs', 'resource_type': 'Group', 'expression': [expected_condition]} expected_data = {'id': 'eukarya', 'resource_type': 'Domain', 'groups': [expected_group]} self.assert_json_call('PATCH', self.client, 'infra/domains/eukarya', data=expected_data) def test_create_with_multi_tag(self): domain_def = policy.DomainDef(domain_id='eukarya') pines = policy.Condition( 'pine', operator=constants.CONDITION_OP_CONTAINS) maples = policy.Condition( 'maple', operator=constants.CONDITION_OP_STARTS_WITH) group_def = policy.GroupDef(domain_id='eukarya', group_id='trees', conditions=[pines, maples]) self.policy_api.create_with_parent(domain_def, group_def) data = domain_def.get_obj_dict() data['groups'] = [group_def.get_obj_dict()] self.assert_json_call('PATCH', self.client, 'infra/domains/eukarya', data=data) def test_delete(self): group_def = policy.GroupDef(domain_id='eukarya', group_id='giraffe') self.policy_api.delete(group_def) self.assert_json_call('DELETE', self.client, 'infra/domains/eukarya/groups/giraffe') class TestPolicyService(policy_testcase.TestPolicyApi): def test_create(self): service_def = policy.ServiceDef(service_id='roomservice') self.policy_api.create_or_update(service_def) self.assert_json_call('PATCH', self.client, 'infra/services/roomservice', data=service_def.get_obj_dict()) def test_create_l4_with_parent(self): service_def = policy.ServiceDef(service_id='roomservice') entry_def = policy.L4ServiceEntryDef(service_id='roomservice', protocol='TCP', entry_id='http', name='room http', dest_ports=[80, 8080]) self.policy_api.create_with_parent(service_def, entry_def) expected_entry = {'id': 'http', 'resource_type': 'L4PortSetServiceEntry', 'display_name': 'room http', 'l4_protocol': 'TCP', 'destination_ports': [80, 8080]} expected_data = {'id': 'roomservice', 'resource_type': 'Service', 'service_entries': [expected_entry]} self.assert_json_call('PATCH', self.client, 'infra/services/roomservice', data=expected_data) def test_create_icmp_with_parent(self): service_def = policy.ServiceDef(name='icmpservice', service_id='icmpservice') entry_def = policy.IcmpServiceEntryDef(service_id='icmpservice', version=4, entry_id='icmp', name='icmpv4') self.policy_api.create_with_parent(service_def, entry_def) expected_entry = {'id': 'icmp', 'resource_type': 'ICMPTypeServiceEntry', 'display_name': 'icmpv4', 'protocol': 'ICMPv4'} expected_data = {'id': 'icmpservice', 'resource_type': 'Service', 'display_name': 'icmpservice', 'service_entries': [expected_entry]} self.assert_json_call('PATCH', self.client, 'infra/services/icmpservice', data=expected_data) def test_create_mixed_with_parent(self): service_def = policy.ServiceDef(name='mixedservice', service_id='mixedservice') l4_entry_def = policy.L4ServiceEntryDef(service_id='mixedservice', protocol='TCP', entry_id='http', name='http', dest_ports=[80, 8080]) icmp_entry_def = policy.IcmpServiceEntryDef(service_id='mixedservice', version=4, entry_id='icmp', name='icmpv4') self.policy_api.create_with_parent(service_def, [l4_entry_def, icmp_entry_def]) expected_l4_entry = {'id': 'http', 'resource_type': 'L4PortSetServiceEntry', 'display_name': 'http', 'l4_protocol': 'TCP', 'destination_ports': [80, 8080]} expected_icmp_entry = {'id': 'icmp', 'resource_type': 'ICMPTypeServiceEntry', 'display_name': 'icmpv4', 'protocol': 'ICMPv4'} expected_data = {'id': 'mixedservice', 'resource_type': 'Service', 'display_name': 'mixedservice', 'service_entries': [ expected_l4_entry, expected_icmp_entry]} self.assert_json_call('PATCH', self.client, 'infra/services/mixedservice', data=expected_data) class TestPolicyCommunicationMap(policy_testcase.TestPolicyApi): def setUp(self): super(TestPolicyCommunicationMap, self).setUp() self.entry1 = policy.CommunicationMapEntryDef( domain_id='d1', map_id='cm1', entry_id='en1', action='ALLOW', sequence_number=12, source_groups=["group1", "group2"], dest_groups=["group1"], service_ids=["service1"], direction=nsx_constants.IN_OUT) self.entry2 = policy.CommunicationMapEntryDef( domain_id='d1', map_id='cm2', entry_id='en2', action='ALLOW', sequence_number=13, source_groups=["group1", "group2"], dest_groups=["group3"], service_ids=["service2"], direction=nsx_constants.IN) self.expected_data1 = {'id': 'en1', 'resource_type': 'Rule', 'sequence_number': 12, 'action': 'ALLOW', 'source_groups': ['/infra/domains/d1/groups/group1', '/infra/domains/d1/groups/group2'], 'destination_groups': ['/infra/domains/d1/groups/group1'], 'services': ['/infra/services/service1'], 'direction': 'IN_OUT'} self.expected_data2 = {'id': 'en2', 'resource_type': 'Rule', 'sequence_number': 13, 'action': 'ALLOW', 'source_groups': ['/infra/domains/d1/groups/group1', '/infra/domains/d1/groups/group2'], 'destination_groups': ['/infra/domains/d1/groups/group3'], 'services': ['/infra/services/service2'], 'direction': 'IN'} def test_create_with_one_entry(self): map_def = policy.CommunicationMapDef(domain_id='d1', map_id='cm1') self.policy_api.create_with_parent(map_def, self.entry1) expected_data = map_def.get_obj_dict() expected_data['rules'] = [self.expected_data1] self.assert_json_call('PATCH', self.client, 'infra/domains/d1/security-policies/cm1', data=expected_data) def test_create_with_two_entries(self): map_def = policy.CommunicationMapDef(domain_id='d1', map_id='cm1') self.policy_api.create_with_parent(map_def, [self.entry1, self.entry2]) expected_data = map_def.get_obj_dict() expected_data['rules'] = [self.expected_data1, self.expected_data2] self.assert_json_call('PATCH', self.client, 'infra/domains/d1/security-policies/cm1', data=expected_data) def test_update_entry(self): self.policy_api.create_or_update(self.entry1) self.assert_json_call('PATCH', self.client, 'infra/domains/d1/security-policies/cm1/' 'rules/en1', data=self.expected_data1) def test_delete_entry(self): self.policy_api.delete(self.entry2) self.assert_json_call('DELETE', self.client, 'infra/domains/d1/security-policies/cm2/' 'rules/en2') class TestPolicyEnforcementPoint(policy_testcase.TestPolicyApi): def test_create(self): ep_def = policy.EnforcementPointDef(ep_id='ep1', name='The Point', ip_address='1.1.1.1', username='admin', password='a') self.policy_api.create_or_update(ep_def) ep_path = policy.EnforcementPointDef(ep_id='ep1').get_resource_path() self.assert_json_call('PATCH', self.client, ep_path, data=ep_def.get_obj_dict()) class TestPolicyTransportZone(policy_testcase.TestPolicyApi): def test_get(self): tz_def = policy.TransportZoneDef(tz_id='tz1', ep_id='default') self.policy_api.get(tz_def) tz_path = tz_def.get_resource_path() self.assert_json_call('GET', self.client, tz_path) class TestPolicyEdgeCluster(policy_testcase.TestPolicyApi): def test_get(self): ec_def = policy.EdgeClusterDef(ec_id='ec1', ep_id='default') self.policy_api.get(ec_def) ec_path = ec_def.get_resource_path() self.assert_json_call('GET', self.client, ec_path) class TestPolicyDeploymentMap(policy_testcase.TestPolicyApi): def test_create(self): map_def = policy.DeploymentMapDef(map_id='dm1', domain_id='d1', ep_id='ep1') self.policy_api.create_or_update(map_def) ep_path = policy.EnforcementPointDef( ep_id='ep1').get_resource_full_path() expected_data = {'id': 'dm1', 'resource_type': 'DeploymentMap', 'enforcement_point_path': ep_path} self.assert_json_call('PATCH', self.client, 'infra/domains/d1/domain-deployment-maps/dm1', data=expected_data) class TestPolicyTier1(policy_testcase.TestPolicyApi): def test_create(self): name = 'test' description = 'desc' tier0_id = '000' tier1_id = '111' route_adv = policy.RouteAdvertisement(static_routes=True, subnets=True, nat=True, lb_vip=False, lb_snat=False) ipv6_ndra_profile_id = '111' tier1_def = policy.Tier1Def( tier1_id=tier1_id, name=name, description=description, route_advertisement=route_adv, tier0=tier0_id, ipv6_ndra_profile_id=ipv6_ndra_profile_id) expected_data = {"id": "%s" % tier1_id, "resource_type": "Tier1", "description": "%s" % description, "display_name": "%s" % name, "tier0_path": "/infra/tier-0s/%s" % tier0_id, "route_advertisement_types": route_adv.get_obj_dict(), "ipv6_profile_paths": ["/infra/ipv6-ndra-profiles/" "%s" % ipv6_ndra_profile_id]} self.policy_api.create_or_update(tier1_def) tier1_path = tier1_def.get_resource_path() self.assert_json_call('PATCH', self.client, tier1_path, data=expected_data) def test_create_no_ipv6_profile(self): name = 'test' description = 'desc' tier0_id = '000' tier1_id = '111' route_adv = policy.RouteAdvertisement(static_routes=True, subnets=True, nat=True, lb_vip=False, lb_snat=False) ipv6_ndra_profile_id = None tier1_def = policy.Tier1Def( tier1_id=tier1_id, name=name, description=description, route_advertisement=route_adv, tier0=tier0_id, ipv6_ndra_profile_id=ipv6_ndra_profile_id) expected_data = {"id": "%s" % tier1_id, "resource_type": "Tier1", "description": "%s" % description, "display_name": "%s" % name, "tier0_path": "/infra/tier-0s/%s" % tier0_id, "route_advertisement_types": route_adv.get_obj_dict(), "ipv6_profile_paths": ["/infra/ipv6-ndra-profiles/" "default"]} self.policy_api.create_or_update(tier1_def) tier1_path = tier1_def.get_resource_path() self.assert_json_call('PATCH', self.client, tier1_path, data=expected_data) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/policy/__init__.py0000664000175000017500000000000013623151571025356 0ustar zuulzuul00000000000000vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/policy/test_ipsec_vpn_resources.py0000664000175000017500000007004713623151571030760 0ustar zuulzuul00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from vmware_nsxlib.tests.unit.v3.policy import test_resources from vmware_nsxlib.v3.policy import ipsec_vpn_defs from vmware_nsxlib.v3 import vpn_ipsec TEST_TENANT = 'test' class TestPolicyIkeProfileApi(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyIkeProfileApi, self).setUp() self.resourceApi = self.policy_lib.ipsec_vpn.ike_profile def test_create(self): name = 'd1' obj_id = 'D1' description = 'desc' ike_version = vpn_ipsec.IkeVersionTypes.IKE_VERSION_V1 encryption_algorithms = [ vpn_ipsec.EncryptionAlgorithmTypes.ENCRYPTION_ALGORITHM_128] digest_algorithms = [ vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA256] dh_groups = [vpn_ipsec.DHGroupTypes.DH_GROUP_15] sa_life_time = vpn_ipsec.IkeSALifetimeLimits.SA_LIFETIME_MIN + 1 tags = [] with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, profile_id=obj_id, description=description, ike_version=ike_version, encryption_algorithms=encryption_algorithms, digest_algorithms=digest_algorithms, dh_groups=dh_groups, sa_life_time=sa_life_time, tags=tags, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, name=name, description=description, ike_version=ike_version, encryption_algorithms=encryption_algorithms, digest_algorithms=digest_algorithms, dh_groups=dh_groups, sa_life_time=sa_life_time, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = self.resourceApi.entry_def( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' with self.mock_get(obj_id, 'old name'), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, description=description, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) class TestPolicyTunnelProfileApi(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyTunnelProfileApi, self).setUp() self.resourceApi = self.policy_lib.ipsec_vpn.tunnel_profile def test_create(self): name = 'd1' obj_id = 'D1' description = 'desc' enable_perfect_forward_secrecy = True encryption_algorithms = [ vpn_ipsec.EncryptionAlgorithmTypes.ENCRYPTION_ALGORITHM_128] digest_algorithms = [ vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA256] dh_groups = [vpn_ipsec.DHGroupTypes.DH_GROUP_15] sa_life_time = vpn_ipsec.IkeSALifetimeLimits.SA_LIFETIME_MIN + 1 tags = [] with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, profile_id=obj_id, description=description, enable_perfect_forward_secrecy=enable_perfect_forward_secrecy, encryption_algorithms=encryption_algorithms, digest_algorithms=digest_algorithms, dh_groups=dh_groups, sa_life_time=sa_life_time, tags=tags, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, name=name, description=description, enable_perfect_forward_secrecy=enable_perfect_forward_secrecy, encryption_algorithms=encryption_algorithms, digest_algorithms=digest_algorithms, dh_groups=dh_groups, sa_life_time=sa_life_time, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = self.resourceApi.entry_def( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' with self.mock_get(obj_id, 'old name'), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, description=description, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) class TestPolicyDpdProfileApi(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyDpdProfileApi, self).setUp() self.resourceApi = self.policy_lib.ipsec_vpn.dpd_profile def test_create(self): name = 'd1' obj_id = 'D1' description = 'desc' dpd_probe_interval = 7 enabled = True tags = [] with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, profile_id=obj_id, description=description, dpd_probe_interval=dpd_probe_interval, enabled=enabled, tags=tags, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, name=name, description=description, dpd_probe_interval=dpd_probe_interval, enabled=enabled, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = self.resourceApi.entry_def( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' with self.mock_get(obj_id, 'old name'), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, description=description, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( profile_id=obj_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) class TestPolicyVpnServiceApi(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyVpnServiceApi, self).setUp() self.resourceApi = self.policy_lib.ipsec_vpn.service def test_create(self): name = 'd1' tier1_id = 'tier1' obj_id = 'D1' description = 'desc' ike_log_level = vpn_ipsec.IkeLogLevelTypes.LOG_LEVEL_ERROR enabled = True tags = [] with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, tier1_id=tier1_id, vpn_service_id=obj_id, description=description, ike_log_level=ike_log_level, enabled=enabled, tags=tags, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=obj_id, name=name, description=description, ike_log_level=ike_log_level, enabled=enabled, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_delete(self): obj_id = '111' tier1_id = 'tier1' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(tier1_id, obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' tier1_id = 'tier1' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(tier1_id, obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' tier1_id = 'tier1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(tier1_id, name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): tier1_id = 'tier1' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tier1_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' tier1_id = 'tier1' name = 'new name' description = 'new desc' with self.mock_get(obj_id, 'old name'), \ self.mock_create_update() as update_call: self.resourceApi.update(tier1_id, obj_id, name=name, description=description, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=obj_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) class TestPolicyVpnLocalEndpointApi(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyVpnLocalEndpointApi, self).setUp() self.resourceApi = self.policy_lib.ipsec_vpn.local_endpoint def test_create(self): name = 'EP1' tier1_id = 'tier1' vpn_service_id = 'vpn1' obj_id = 'ep1' description = 'desc' local_address = '1.1.1.1' local_id = '1' tags = [] with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, tier1_id=tier1_id, vpn_service_id=vpn_service_id, endpoint_id=obj_id, description=description, local_address=local_address, local_id=local_id, tags=tags, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, endpoint_id=obj_id, name=name, description=description, local_address=local_address, local_id=local_id, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_delete(self): obj_id = '111' tier1_id = 'tier1' vpn_service_id = 'vpn1' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(tier1_id, vpn_service_id, obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, endpoint_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' tier1_id = 'tier1' vpn_service_id = 'vpn1' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(tier1_id, vpn_service_id, obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, endpoint_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' tier1_id = 'tier1' vpn_service_id = 'vpn1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(tier1_id, vpn_service_id, name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): tier1_id = 'tier1' vpn_service_id = 'vpn1' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tier1_id, vpn_service_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' tier1_id = 'tier1' vpn_service_id = 'vpn1' name = 'new name' description = 'new desc' with self.mock_get(obj_id, 'old name'), \ self.mock_create_update() as update_call: self.resourceApi.update(tier1_id, vpn_service_id, obj_id, name=name, description=description, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, endpoint_id=obj_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) class TestPolicyVpnSessionApi(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyVpnSessionApi, self).setUp() self.resourceApi = self.policy_lib.ipsec_vpn.session def test_create(self): name = 'Sess1' tier1_id = 'tier1' vpn_service_id = 'vpn1' obj_id = 'sess1' description = 'desc' enabled = True peer_address = '2.2.2.2' peer_id = '2' psk = 'dummy' rules = [self.resourceApi.build_rule( 'rule', 'dummy_id', source_cidrs=['1.1.1.0/24'])] dpd_profile_id = 'dpd1' ike_profile_id = 'ike1' tunnel_profile_id = 'tunnel1' local_endpoint_id = 'ep1' tags = [] with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, tier1_id=tier1_id, vpn_service_id=vpn_service_id, session_id=obj_id, description=description, enabled=enabled, peer_address=peer_address, peer_id=peer_id, psk=psk, rules=rules, dpd_profile_id=dpd_profile_id, ike_profile_id=ike_profile_id, tunnel_profile_id=tunnel_profile_id, local_endpoint_id=local_endpoint_id, tags=tags, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, service_id=self.resourceApi._locale_service_id(tier1_id), vpn_service_id=vpn_service_id, session_id=obj_id, name=name, description=description, enabled=enabled, peer_address=peer_address, peer_id=peer_id, psk=psk, rules=rules, dpd_profile_id=dpd_profile_id, ike_profile_id=ike_profile_id, tunnel_profile_id=tunnel_profile_id, local_endpoint_id=local_endpoint_id, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_delete(self): obj_id = '111' tier1_id = 'tier1' vpn_service_id = 'vpn1' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(tier1_id, vpn_service_id, obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, session_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' tier1_id = 'tier1' vpn_service_id = 'vpn1' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(tier1_id, vpn_service_id, obj_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, session_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' tier1_id = 'tier1' vpn_service_id = 'vpn1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(tier1_id, vpn_service_id, name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): tier1_id = 'tier1' vpn_service_id = 'vpn1' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tier1_id, vpn_service_id, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' tier1_id = 'tier1' vpn_service_id = 'vpn1' name = 'new name' description = 'new desc' with self.mock_get(obj_id, 'old name'), \ self.mock_create_update() as update_call: self.resourceApi.update(tier1_id, vpn_service_id, obj_id, name=name, description=description, tenant=TEST_TENANT) expected_def = self.resourceApi.entry_def( tier1_id=tier1_id, vpn_service_id=vpn_service_id, session_id=obj_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_get_status(self): obj_id = '111' tier1_id = 'tier1' vpn_service_id = 'vpn1' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: self.resourceApi.get_status(tier1_id, vpn_service_id, obj_id, tenant=TEST_TENANT) expected_def = ipsec_vpn_defs.Tier1IPSecVpnSessionStatusDef( tier1_id=tier1_id, vpn_service_id=vpn_service_id, session_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/policy/test_lb_resources.py0000664000175000017500000021452413623151571027367 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from vmware_nsxlib.tests.unit.v3.policy import test_resources from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3.policy import constants from vmware_nsxlib.v3.policy import lb_defs TEST_TENANT = 'test' class TestPolicyLBClientSSLProfileApi(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyLBClientSSLProfileApi, self).setUp() self.resourceApi = self.policy_lib.load_balancer.client_ssl_profile def test_create_with_id(self): name = 'd1' description = 'desc' obj_id = '111' protocols = ['TLS_V1_1'] with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, client_ssl_profile_id=obj_id, description=description, protocols=protocols, tenant=TEST_TENANT) expected_def = lb_defs.LBClientSslProfileDef( client_ssl_profile_id=obj_id, name=name, description=description, protocols=protocols, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_create_without_id(self): name = 'd1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tenant=TEST_TENANT) expected_def = lb_defs.LBClientSslProfileDef( client_ssl_profile_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBClientSslProfileDef( client_ssl_profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBClientSslProfileDef( client_ssl_profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = lb_defs.LBClientSslProfileDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = lb_defs.LBClientSslProfileDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' with self.mock_get(obj_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, description=description, tenant=TEST_TENANT) expected_def = lb_defs.LBClientSslProfileDef( client_ssl_profile_id=obj_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) class TestPolicyLBServerSSLProfileApi(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyLBServerSSLProfileApi, self).setUp() self.resourceApi = self.policy_lib.load_balancer.server_ssl_profile def test_create_with_id(self): name = 'd1' description = 'desc' obj_id = '111' protocols = ['TLS_V1_1'] with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, server_ssl_profile_id=obj_id, description=description, protocols=protocols, tenant=TEST_TENANT) expected_def = lb_defs.LBServerSslProfileDef( server_ssl_profile_id=obj_id, name=name, description=description, protocols=protocols, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_create_without_id(self): name = 'd1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tenant=TEST_TENANT) expected_def = lb_defs.LBServerSslProfileDef( server_ssl_profile_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBServerSslProfileDef( server_ssl_profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBServerSslProfileDef( server_ssl_profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = lb_defs.LBServerSslProfileDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = lb_defs.LBServerSslProfileDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' with self.mock_get(obj_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, description=description, tenant=TEST_TENANT) expected_def = lb_defs.LBServerSslProfileDef( server_ssl_profile_id=obj_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) class TestPolicyLBPersistenceProfile( test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyLBPersistenceProfile, self).setUp() self.resourceApi = ( self.policy_lib.load_balancer.lb_persistence_profile) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = ( self.resourceApi.entry_def( persistence_profile_id=obj_id, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = ( self.resourceApi.entry_def( persistence_profile_id=obj_id, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = ( self.resourceApi.entry_def( tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = ( self.resourceApi.entry_def( tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_wait_until_realized_fail(self): pers_id = 'test_pers' info = {'state': constants.STATE_UNREALIZED, 'realization_specific_identifier': pers_id} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationTimeoutError, self.resourceApi.wait_until_realized, pers_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) def test_wait_until_realized_succeed(self): pers_id = 'test_pers' info = {'state': constants.STATE_REALIZED, 'realization_specific_identifier': pers_id, 'entity_type': 'LbPersistenceProfileDto'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): actual_info = self.resourceApi.wait_until_realized( pers_id, entity_type='LbPersistenceProfileDto', max_attempts=5, sleep=0.1, tenant=TEST_TENANT) self.assertEqual(info, actual_info) class TestPolicyLBCookiePersistenceProfile( test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyLBCookiePersistenceProfile, self).setUp() self.resourceApi = ( self.policy_lib.load_balancer.lb_cookie_persistence_profile) def test_create_with_id(self): name = 'd1' description = 'desc' obj_id = '111' cookie_garble = 'test_garble' cookie_name = 'test_name' cookie_mode = 'INSERT' cookie_path = 'path' cookie_time = 'time' persistence_shared = False with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite( name, persistence_profile_id=obj_id, description=description, cookie_name=cookie_name, cookie_garble=cookie_garble, cookie_mode=cookie_mode, cookie_path=cookie_path, cookie_time=cookie_time, persistence_shared=persistence_shared, tenant=TEST_TENANT) expected_def = ( lb_defs.LBCookiePersistenceProfileDef( persistence_profile_id=obj_id, name=name, description=description, cookie_name=cookie_name, cookie_garble=cookie_garble, cookie_mode=cookie_mode, cookie_path=cookie_path, cookie_time=cookie_time, persistence_shared=persistence_shared, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_create_without_id(self): name = 'd1' description = 'desc' with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tenant=TEST_TENANT) expected_def = ( lb_defs.LBCookiePersistenceProfileDef( persistence_profile_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = ( lb_defs.LBCookiePersistenceProfileDef( persistence_profile_id=obj_id, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = ( lb_defs.LBCookiePersistenceProfileDef( persistence_profile_id=obj_id, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [ {'resource_type': self.resourceApi.entry_def.resource_type, 'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = ( lb_defs.LBCookiePersistenceProfileDef( tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object( self.policy_api, "list", return_value={'results': [ {'resource_type': self.resourceApi.entry_def.resource_type, 'display_name': 'profile1'}, {'resource_type': 'wrong_type', 'display_name': 'profile2'}]}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = ( lb_defs.LBCookiePersistenceProfileDef( tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual(1, len(result)) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' cookie_garble = 'test_garble' cookie_name = 'test_name' cookie_mode = 'INSERT' cookie_path = 'path' cookie_time = 'time' persistence_shared = False with self.mock_get(obj_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, description=description, cookie_name=cookie_name, cookie_garble=cookie_garble, cookie_mode=cookie_mode, cookie_path=cookie_path, cookie_time=cookie_time, persistence_shared=persistence_shared, tenant=TEST_TENANT) expected_def = ( lb_defs.LBCookiePersistenceProfileDef( persistence_profile_id=obj_id, name=name, description=description, cookie_name=cookie_name, cookie_garble=cookie_garble, cookie_mode=cookie_mode, cookie_path=cookie_path, cookie_time=cookie_time, persistence_shared=persistence_shared, tenant=TEST_TENANT)) self.assert_called_with_def(update_call, expected_def) class TestPolicyLBSourceIpProfileApi(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyLBSourceIpProfileApi, self).setUp() self.resourceApi = ( self.policy_lib.load_balancer.lb_source_ip_persistence_profile) def test_create_with_id(self): name = 'd1' description = 'desc' obj_id = '111' ha = 'ha' persistence_shared = True purge = 'purge' timeout = 100 with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, persistence_profile_id=obj_id, description=description, ha_persistence_mirroring_enabled=ha, persistence_shared=persistence_shared, purge=purge, timeout=timeout, tenant=TEST_TENANT) expected_def = ( lb_defs.LBSourceIpPersistenceProfileDef( persistence_profile_id=obj_id, name=name, description=description, ha_persistence_mirroring_enabled=ha, persistence_shared=persistence_shared, purge=purge, timeout=timeout, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_create_without_id(self): name = 'd1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tenant=TEST_TENANT) expected_def = ( lb_defs.LBSourceIpPersistenceProfileDef( persistence_profile_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = ( lb_defs.LBSourceIpPersistenceProfileDef( persistence_profile_id=obj_id, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = ( lb_defs.LBSourceIpPersistenceProfileDef( persistence_profile_id=obj_id, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [ {'resource_type': self.resourceApi.entry_def.resource_type, 'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = ( lb_defs.LBSourceIpPersistenceProfileDef( tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = ( lb_defs.LBSourceIpPersistenceProfileDef( tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' ha = False persistence_shared = False purge = 'no purge' timeout = 101 with self.mock_get(obj_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, description=description, ha_persistence_mirroring_enabled=ha, persistence_shared=persistence_shared, purge=purge, timeout=timeout, tenant=TEST_TENANT) expected_def = ( lb_defs.LBSourceIpPersistenceProfileDef( persistence_profile_id=obj_id, name=name, description=description, ha_persistence_mirroring_enabled=ha, persistence_shared=persistence_shared, purge=purge, timeout=timeout, tenant=TEST_TENANT)) self.assert_called_with_def(update_call, expected_def) class TestPolicyLBApplicationProfile(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyLBApplicationProfile, self).setUp() self.resourceApi = self.policy_lib.load_balancer.lb_http_profile def test_create_with_id(self): name = 'd1' description = 'desc' obj_id = '111' http_redirect_to_https = False http_redirect_to = "sample-url" idle_timeout = 100 ntlm = False request_body_size = 1025 request_header_size = 10 response_header_size = 10 response_timeout = 10 x_forwarded_for = 'INSERT' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, lb_app_profile_id=obj_id, description=description, http_redirect_to_https=http_redirect_to_https, http_redirect_to=http_redirect_to, idle_timeout=idle_timeout, ntlm=ntlm, request_body_size=request_body_size, request_header_size=request_header_size, response_header_size=response_header_size, response_timeout=response_timeout, x_forwarded_for=x_forwarded_for, tenant=TEST_TENANT) expected_def = ( lb_defs.LBHttpProfileDef( lb_app_profile_id=obj_id, name=name, description=description, http_redirect_to_https=http_redirect_to_https, http_redirect_to=http_redirect_to, idle_timeout=idle_timeout, ntlm=ntlm, request_body_size=request_body_size, request_header_size=request_header_size, response_header_size=response_header_size, response_timeout=response_timeout, x_forwarded_for=x_forwarded_for, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_fast_tcp_profile_def(self): obj_dict = {'close_timeout': 8, 'ha_flow_mirroring_enabled': False, 'idle_timeout': 100} fast_tcp_profile_def = lb_defs.LBFastTcpProfile(**obj_dict) self.assertDictContainsSubset(obj_dict, fast_tcp_profile_def.get_obj_dict()) def test_fast_udp_profile_def(self): obj_dict = {'flow_mirroring_enabled': False, 'idle_timeout': 100} fast_udp_profile_def = lb_defs.LBFastUdpProfile(**obj_dict) self.assertDictContainsSubset(obj_dict, fast_udp_profile_def.get_obj_dict()) def test_http_profile_def(self): obj_dict = {'http_redirect_to_https': False, 'http_redirect_to': "sample-url", 'idle_timeout': 100, 'ntlm': False, 'request_body_size': 1025, 'request_header_size': 10, 'response_header_size': 10, 'response_timeout': 10, 'x_forwarded_for': 'INSERT'} http_profile_def = lb_defs.LBHttpProfileDef(**obj_dict) self.assertDictContainsSubset(obj_dict, http_profile_def.get_obj_dict()) def test_create_without_id(self): name = 'd1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tenant=TEST_TENANT) expected_def = ( lb_defs.LBHttpProfileDef( lb_app_profile_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = ( lb_defs.LBHttpProfileDef( lb_app_profile_id=obj_id, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = ( lb_defs.LBHttpProfileDef( lb_app_profile_id=obj_id, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = ( lb_defs.LBHttpProfileDef(tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = ( lb_defs.LBHttpProfileDef(tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' with self.mock_get(obj_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, description=description, tenant=TEST_TENANT) expected_def = ( lb_defs.LBHttpProfileDef( lb_app_profile_id=obj_id, name=name, description=description, tenant=TEST_TENANT)) self.assert_called_with_def(update_call, expected_def) class TestPolicyLBService(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyLBService, self).setUp() self.resourceApi = self.policy_lib.load_balancer.lb_service def test_create_with_id(self): name = 'd1' description = 'desc' obj_id = '111' size = 'SMALL' connectivity_path = 'path' relax_scale_validation = True with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite( name, lb_service_id=obj_id, description=description, size=size, connectivity_path=connectivity_path, relax_scale_validation=relax_scale_validation, tenant=TEST_TENANT) expected_def = ( lb_defs.LBServiceDef( nsx_version=self.policy_lib.get_version(), lb_service_id=obj_id, name=name, description=description, size=size, connectivity_path=connectivity_path, relax_scale_validation=relax_scale_validation, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_create_without_id(self): name = 'd1' description = 'desc' with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tenant=TEST_TENANT) expected_def = ( lb_defs.LBServiceDef(lb_service_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_create_with_unsupported_attribute(self): name = 'd1' description = 'desc' relax_scale_validation = True with self.mock_create_update() as api_call, \ mock.patch.object(self.resourceApi, 'version', '0.0.0'): result = self.resourceApi.create_or_overwrite( name, description=description, relax_scale_validation=relax_scale_validation, tenant=TEST_TENANT) expected_def = ( lb_defs.LBServiceDef(lb_service_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBServiceDef( lb_service_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBServiceDef( lb_service_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = lb_defs.LBServiceDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = lb_defs.LBServiceDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' size = 'SMALL' connectivity_path = 'path' relax_scale_validation = True with self.mock_get(obj_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update( obj_id, name=name, description=description, tenant=TEST_TENANT, size=size, connectivity_path=connectivity_path, relax_scale_validation=relax_scale_validation) expected_def = lb_defs.LBServiceDef( nsx_version=self.policy_lib.get_version(), lb_service_id=obj_id, name=name, description=description, tenant=TEST_TENANT, size=size, connectivity_path=connectivity_path, relax_scale_validation=relax_scale_validation) self.assert_called_with_def(update_call, expected_def) def test_get_status(self): obj_id = '111' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get_status(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBServiceStatusDef( lb_service_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_statistics(self): obj_id = '111' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get_statistics(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBServiceStatisticsDef( lb_service_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_virtual_server_status(self): obj_id = '111' vs_id = '222' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get_virtual_server_status( obj_id, vs_id, tenant=TEST_TENANT) expected_def = lb_defs.LBVirtualServerStatusDef( lb_service_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_usage(self): lbs_id = 'test_vs' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get_usage( lbs_id, realtime=True, tenant=TEST_TENANT) expected_def = lb_defs.LBServiceUsageDef( lb_service_id=lbs_id, realtime=True, tenant=TEST_TENANT) expected_path = '%s/lb-services/%s/service-usage?source=realtime' self.assert_called_with_def(api_call, expected_def) self.assertEqual(expected_def.path_pattern, expected_path) def test_wait_until_realized_fail(self): lbs_id = 'test_lbs' info = {'state': constants.STATE_UNREALIZED, 'realization_specific_identifier': lbs_id, 'entity_type': 'LbServiceDto'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationTimeoutError, self.resourceApi.wait_until_realized, lbs_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) def test_wait_until_realized_error(self): lbs_id = 'test_lbs' error_code = 23500 related_error_code = 23707 error_msg = 'Found errors in the request.' related_error_msg = 'Exceed maximum number of load balancer.' info = {'state': constants.STATE_ERROR, 'realization_specific_identifier': lbs_id, 'entity_type': 'LbServiceDto', 'alarms': [{ 'message': error_msg, 'error_details': { 'related_errors': [{ 'error_code': related_error_code, 'module_name': 'LOAD-BALANCER', 'error_message': related_error_msg }], 'error_code': error_code, 'module_name': 'LOAD-BALANCER', 'error_message': error_msg } }]} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): with self.assertRaises(nsxlib_exc.RealizationErrorStateError) as e: self.resourceApi.wait_until_realized( lbs_id, tenant=TEST_TENANT) error_msg_tail = "%s: %s" % (error_msg, related_error_msg) self.assertTrue(e.exception.msg.endswith(error_msg_tail)) self.assertEqual(e.exception.error_code, error_code) self.assertEqual(e.exception.related_error_codes, [related_error_code]) def test_wait_until_realized_succeed(self): lbs_id = 'test_lbs' info = {'state': constants.STATE_REALIZED, 'realization_specific_identifier': lbs_id, 'entity_type': 'LbServiceDto'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): actual_info = self.resourceApi.wait_until_realized( lbs_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) self.assertEqual(info, actual_info) class TestPolicyLBVirtualServer(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyLBVirtualServer, self).setUp() self.resourceApi = self.policy_lib.load_balancer.virtual_server def test_create_with_id(self): name = 'd1' description = 'desc' obj_id = '111' waf_profile_id = 'waf' waf_profile_path = self.policy_lib.waf_profile.get_path( profile_id=waf_profile_id, tenant=TEST_TENANT) waf_profile_binding = lb_defs.WAFProfileBindingDef( waf_profile_path=waf_profile_path) lb_acl = self.resourceApi.build_access_list_control( constants.ACTION_ALLOW, 'fake_group_path', True) with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, virtual_server_id=obj_id, waf_profile_binding=waf_profile_binding, description=description, access_list_control=lb_acl, tenant=TEST_TENANT) expected_def = lb_defs.LBVirtualServerDef( nsx_version=self.policy_lib.get_version(), virtual_server_id=obj_id, name=name, description=description, waf_profile_binding=waf_profile_binding, access_list_control=lb_acl.get_obj_dict(), tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_create_without_id(self): name = 'd1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tenant=TEST_TENANT) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = lb_defs.LBVirtualServerDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = lb_defs.LBVirtualServerDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' vs_name = 'name-name' with self.mock_get(obj_id, vs_name), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, description=description, tenant=TEST_TENANT) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=obj_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_non_partial_update(self): obj_id = '111' vs_name = 'name-name' with self.mock_get(obj_id, vs_name, max_concurrent_connections=80), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, max_concurrent_connections=None, tenant=TEST_TENANT, allow_partial_updates=False) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=obj_id, name=vs_name, max_concurrent_connections=None, tenant=TEST_TENANT) update_call.assert_called_with(mock.ANY, partial_updates=False) self.assert_called_with_def(update_call, expected_def) def test_add_lb_rule(self): vs_obj_id = '111' vs_name = 'name-name' rule_actions = 'test1' rule_match_conditions = 'test2' rule_name = 'dummy_rule' rule_match_strategy = 'test3' rule_phase = 'test4' with self.mock_get(vs_obj_id, vs_name), \ self.mock_create_update() as update_call: self.resourceApi.add_lb_rule( vs_obj_id, actions=rule_actions, name=rule_name, match_conditions=rule_match_conditions, match_strategy=rule_match_strategy, phase=rule_phase) lb_rule = lb_defs.LBRuleDef( rule_actions, rule_match_conditions, rule_name, rule_match_strategy, rule_phase) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=vs_obj_id, rules=[lb_rule]) self.assert_called_with_def(update_call, expected_def) def test_add_lb_rule_first(self): vs_obj_id = '111' vs_name = 'name-name' rule_actions = 'test1' rule_match_conditions = 'test2' rule_name = 'dummy_rule' rule_match_strategy = 'test3' rule_phase = 'test4' with self.mock_get(vs_obj_id, vs_name, rules=[{'display_name': 'xx'}, {'display_name': 'yy'}]), \ self.mock_create_update() as update_call: self.resourceApi.add_lb_rule( vs_obj_id, actions=rule_actions, name=rule_name, match_conditions=rule_match_conditions, match_strategy=rule_match_strategy, phase=rule_phase, position=0) lb_rule = lb_defs.LBRuleDef( rule_actions, rule_match_conditions, rule_name, rule_match_strategy, rule_phase) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=vs_obj_id, rules=[lb_rule, {'display_name': 'xx'}, {'display_name': 'yy'}]) self.assert_called_with_def(update_call, expected_def) def test_add_lb_rule_last(self): vs_obj_id = '111' vs_name = 'name-name' rule_actions = 'test1' rule_match_conditions = 'test2' rule_name = 'dummy_rule' rule_match_strategy = 'test3' rule_phase = 'test4' with self.mock_get(vs_obj_id, vs_name, rules=[{'display_name': 'xx'}, {'display_name': 'yy'}]), \ self.mock_create_update() as update_call: self.resourceApi.add_lb_rule( vs_obj_id, actions=rule_actions, name=rule_name, match_conditions=rule_match_conditions, match_strategy=rule_match_strategy, phase=rule_phase) lb_rule = lb_defs.LBRuleDef( rule_actions, rule_match_conditions, rule_name, rule_match_strategy, rule_phase) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=vs_obj_id, rules=[{'display_name': 'xx'}, {'display_name': 'yy'}, lb_rule]) self.assert_called_with_def(update_call, expected_def) def test_add_lb_rule_last_over(self): vs_obj_id = '111' vs_name = 'name-name' rule_actions = 'test1' rule_match_conditions = 'test2' rule_name = 'dummy_rule' rule_match_strategy = 'test3' rule_phase = 'test4' with self.mock_get(vs_obj_id, vs_name, rules=[{'display_name': 'xx'}, {'display_name': 'yy'}]), \ self.mock_create_update() as update_call: self.resourceApi.add_lb_rule( vs_obj_id, actions=rule_actions, name=rule_name, match_conditions=rule_match_conditions, match_strategy=rule_match_strategy, phase=rule_phase, position=999) lb_rule = lb_defs.LBRuleDef( rule_actions, rule_match_conditions, rule_name, rule_match_strategy, rule_phase) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=vs_obj_id, rules=[{'display_name': 'xx'}, {'display_name': 'yy'}, lb_rule]) self.assert_called_with_def(update_call, expected_def) def test_add_lb_rule_mid(self): vs_obj_id = '111' vs_name = 'name-name' rule_actions = 'test1' rule_match_conditions = 'test2' rule_name = 'dummy_rule' rule_match_strategy = 'test3' rule_phase = 'test4' with self.mock_get(vs_obj_id, vs_name, rules=[{'display_name': 'xx'}, {'display_name': 'yy'}]), \ self.mock_create_update() as update_call: self.resourceApi.add_lb_rule( vs_obj_id, actions=rule_actions, name=rule_name, match_conditions=rule_match_conditions, match_strategy=rule_match_strategy, phase=rule_phase, position=1) lb_rule = lb_defs.LBRuleDef( rule_actions, rule_match_conditions, rule_name, rule_match_strategy, rule_phase) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=vs_obj_id, rules=[{'display_name': 'xx'}, lb_rule, {'display_name': 'yy'}]) self.assert_called_with_def(update_call, expected_def) def test_update_lb_rule(self): vs_obj_id = '111' vs_name = 'name-name' with self.mock_get( vs_obj_id, vs_name, rules=[{'display_name': 'xx', 'actions': '11'}, {'display_name': 'yy'}]), \ self.mock_create_update() as update_call: self.resourceApi.update_lb_rule(vs_obj_id, 'xx', actions='22') expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=vs_obj_id, rules=[{'display_name': 'xx', 'actions': '22'}, {'display_name': 'yy'}]) self.assert_called_with_def(update_call, expected_def) def test_update_lb_rule_position(self): vs_obj_id = '111' vs_name = 'name-name' with self.mock_get( vs_obj_id, vs_name, rules=[{'display_name': 'xx', 'actions': '11'}, {'display_name': 'yy'}]), \ self.mock_create_update() as update_call: self.resourceApi.update_lb_rule(vs_obj_id, 'xx', actions='22', position=1) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=vs_obj_id, rules=[{'display_name': 'yy'}, {'display_name': 'xx', 'actions': '22'}]) self.assert_called_with_def(update_call, expected_def) def test_remove_lb_rule(self): vs_obj_id = '111' vs_name = 'name-name' with self.mock_get(vs_obj_id, vs_name, rules=[{'display_name': 'xx'}, {'display_name': 'yy'}]), \ self.mock_create_update() as update_call: self.resourceApi.remove_lb_rule(vs_obj_id, 'xx') expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=vs_obj_id, rules=[{'display_name': 'yy'}]) self.assert_called_with_def(update_call, expected_def) def test_build_access_list_control(self): lb_acl = self.resourceApi.build_access_list_control( constants.ACTION_ALLOW, 'fake_group_path', True) expected_acl_dict = { 'action': constants.ACTION_ALLOW, 'enabled': True, 'group_path': 'fake_group_path' } self.assertDictEqual(lb_acl.get_obj_dict(), expected_acl_dict) def test_wait_until_realized_fail(self): vs_id = 'test_vs' info = {'state': constants.STATE_UNREALIZED, 'realization_specific_identifier': vs_id} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationTimeoutError, self.resourceApi.wait_until_realized, vs_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) def test_wait_until_realized_succeed(self): vs_id = 'test_vs' info = {'state': constants.STATE_REALIZED, 'realization_specific_identifier': vs_id, 'entity_type': 'LbVirtualServerDto'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): actual_info = self.resourceApi.wait_until_realized( vs_id, entity_type='LbVirtualServerDto', max_attempts=5, sleep=0.1, tenant=TEST_TENANT) self.assertEqual(info, actual_info) def test_remove_virtual_server_client_ssl_profile_binding(self): vs_id = 'test-id' vs_name = 'test-name' client_binding = { 'default_certificate_path': '/infra/certificates/test-cert', 'client_ssl_profile_path': '/infra/lb-client-ssl-profiles/default'} server_binding = { 'ssl_profile_path': '/infra/lb-server-ssl-profiles/test'} with self.mock_get( vs_id, vs_name, client_ssl_profile_binding=client_binding, server_ssl_profile_binding=server_binding), \ self.mock_create_update() as update_call: self.resourceApi.remove_virtual_server_client_ssl_profile_binding( vs_id) expected_def = lb_defs.LBVirtualServerDef( virtual_server_id=vs_id, name=vs_name) self.assert_called_with_def(update_call, expected_def) class TestPolicyLBPoolApi(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyLBPoolApi, self).setUp() self.resourceApi = self.policy_lib.load_balancer.lb_pool def test_create_with_id(self): name = 'd1' description = 'desc' obj_id = '111' members = [ lb_defs.LBPoolMemberDef(ip_address='10.0.0.1')] algorithm = 'algo' active_monitor_paths = 'path1' member_group = 'group1' snat_translation = False with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, lb_pool_id=obj_id, description=description, members=members, active_monitor_paths=active_monitor_paths, algorithm=algorithm, member_group=member_group, snat_translation=snat_translation, tenant=TEST_TENANT) expected_def = lb_defs.LBPoolDef( lb_pool_id=obj_id, name=name, description=description, members=members, active_monitor_paths=active_monitor_paths, algorithm=algorithm, member_group=member_group, snat_translation=snat_translation, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_create_without_id(self): name = 'd1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tenant=TEST_TENANT) expected_def = lb_defs.LBPoolDef( lb_pool_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBPoolDef( lb_pool_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = lb_defs.LBPoolDef( lb_pool_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = lb_defs.LBPoolDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = lb_defs.LBPoolDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' members = [{'ip_address': '10.0.0.1'}] algorithm = 'algo' active_monitor_paths = ['path1'] member_group = 'group1' snat_translation = False with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}), \ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(obj_id, name=name, description=description, members=members, active_monitor_paths=active_monitor_paths, algorithm=algorithm, member_group=member_group, snat_translation=snat_translation, tenant=TEST_TENANT) expected_def = lb_defs.LBPoolDef( lb_pool_id=obj_id, name=name, description=description, members=members, active_monitor_paths=active_monitor_paths, algorithm=algorithm, member_group=member_group, snat_translation=snat_translation, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_add_monitor_to_pool(self): obj_id = '111' active_monitor_paths = ['path1'] with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}), \ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.add_monitor_to_pool( obj_id, active_monitor_paths, tenant=TEST_TENANT) expected_def = lb_defs.LBPoolDef( lb_pool_id=obj_id, active_monitor_paths=active_monitor_paths, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_remove_monitor_from_pool(self): obj_id = '111' removed_monitor_path = 'path1' stay_monitor_path = 'path2' active_monitors = [removed_monitor_path, stay_monitor_path] with mock.patch.object( self.policy_api, "get", return_value={ 'id': obj_id, 'active_monitor_paths': active_monitors}), \ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.remove_monitor_from_pool( obj_id, removed_monitor_path, tenant=TEST_TENANT) expected_def = lb_defs.LBPoolDef( lb_pool_id=obj_id, active_monitor_paths=[stay_monitor_path], tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_create_pool_member_and_add_to_pool(self): obj_id = '111' ip_address = '1.1.1.1' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}), \ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.create_pool_member_and_add_to_pool( obj_id, ip_address, tenant=TEST_TENANT) mem_def = lb_defs.LBPoolMemberDef(ip_address) expected_def = lb_defs.LBPoolDef( lb_pool_id=obj_id, members=[mem_def], tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_update_pool_member(self): obj_id = '111' ip_address = '1.1.1.1' port = '80' new_name = 'mem1' member = {'ip_address': ip_address, 'port': port} with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id, 'members': [member]}), \ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update_pool_member( obj_id, ip_address, port=port, display_name=new_name, tenant=TEST_TENANT) member['display_name'] = new_name expected_def = lb_defs.LBPoolDef( lb_pool_id=obj_id, members=[member], tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_wait_until_realized_fail(self): pool_id = 'test_pool' info = {'state': constants.STATE_UNREALIZED, 'realization_specific_identifier': pool_id} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationTimeoutError, self.resourceApi.wait_until_realized, pool_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) def test_wait_until_realized_succeed(self): pool_id = 'test_pool' info = {'state': constants.STATE_REALIZED, 'realization_specific_identifier': pool_id, 'entity_type': 'LbPoolDto'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): actual_info = self.resourceApi.wait_until_realized( pool_id, entity_type='LbPoolDto', max_attempts=5, sleep=0.1, tenant=TEST_TENANT) self.assertEqual(info, actual_info) class TestPolicyLBMonitorProfileHttpApi(test_resources.NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyLBMonitorProfileHttpApi, self).setUp() self.resourceApi = ( self.policy_lib.load_balancer.lb_monitor_profile_http) self.obj_def = lb_defs.LBHttpMonitorProfileDef def test_create_with_id(self): name = 'd1' obj_id = '111' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( lb_monitor_profile_id=obj_id, name=name, tenant=TEST_TENANT) expected_def = self.obj_def( lb_monitor_profile_id=obj_id, name=name, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_create_without_id(self): name = 'd1' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name=name, tenant=TEST_TENANT) expected_def = self.obj_def( lb_monitor_profile_id=mock.ANY, name=name, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = self.obj_def( lb_monitor_profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = self.obj_def( lb_monitor_profile_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = self.obj_def( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = self.obj_def( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' with self.mock_get(obj_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, tenant=TEST_TENANT) expected_def = self.obj_def( lb_monitor_profile_id=obj_id, name=name, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) class TestPolicyLBMonitorProfileHttpsApi(TestPolicyLBMonitorProfileHttpApi): def setUp(self, *args, **kwargs): super(TestPolicyLBMonitorProfileHttpsApi, self).setUp() self.resourceApi = ( self.policy_lib.load_balancer.lb_monitor_profile_https) self.obj_def = lb_defs.LBHttpsMonitorProfileDef class TestPolicyLBMonitorProfileUdpApi(TestPolicyLBMonitorProfileHttpApi): def setUp(self, *args, **kwargs): super(TestPolicyLBMonitorProfileUdpApi, self).setUp() self.resourceApi = ( self.policy_lib.load_balancer.lb_monitor_profile_udp) self.obj_def = lb_defs.LBUdpMonitorProfileDef class TestPolicyLBMonitorProfileIcmpApi(TestPolicyLBMonitorProfileHttpApi): def setUp(self, *args, **kwargs): super(TestPolicyLBMonitorProfileIcmpApi, self).setUp() self.resourceApi = ( self.policy_lib.load_balancer.lb_monitor_profile_icmp) self.obj_def = lb_defs.LBIcmpMonitorProfileDef class TestPolicyLBMonitorProfileTcpApi(TestPolicyLBMonitorProfileHttpApi): def setUp(self, *args, **kwargs): super(TestPolicyLBMonitorProfileTcpApi, self).setUp() self.resourceApi = ( self.policy_lib.load_balancer.lb_monitor_profile_tcp) self.obj_def = lb_defs.LBTcpMonitorProfileDef vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/policy/test_transaction.py0000664000175000017500000005327113623151571027225 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import mock from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3.policy import policy_testcase from vmware_nsxlib.v3 import policy from vmware_nsxlib.v3.policy import constants from vmware_nsxlib.v3.policy import transaction as trans class TestPolicyTransaction(policy_testcase.TestPolicyApi): def setUp(self): super(TestPolicyTransaction, self).setUp() nsxlib_config = nsxlib_testcase.get_default_nsxlib_config() # Mock the nsx-lib for the passthrough api with mock.patch('vmware_nsxlib.v3.NsxLib.get_version', return_value='2.5.0'): self.policy_lib = policy.NsxPolicyLib(nsxlib_config) self.policy_api = self.policy_lib.policy_api self.policy_api.client = self.client def assert_infra_patch_call(self, body): self.assert_json_call('PATCH', self.client, 'infra', data=body, headers=mock.ANY) def test_domains_only(self): tags = [{'scope': 'color', 'tag': 'green'}] d1 = {'resource_type': 'Domain', 'id': 'domain1', 'display_name': 'd1', 'description': 'first domain', 'tags': tags} d2 = {'resource_type': 'Domain', 'id': 'domain2', 'display_name': 'd2', 'description': 'no tags', 'tags': None} with trans.NsxPolicyTransaction(): for d in (d1, d2): self.policy_lib.domain.create_or_overwrite( d['display_name'], d['id'], d['description'], tags=d['tags'] if 'tags' in d else None) expected_body = {'resource_type': 'Infra', 'children': [{'resource_type': 'ChildDomain', 'Domain': d1}, {'resource_type': 'ChildDomain', 'Domain': d2}]} self.assert_infra_patch_call(expected_body) def test_domains_and_groups(self): tags = [{'scope': 'color', 'tag': 'green'}] g1 = {'resource_type': 'Group', 'id': 'group1', 'display_name': 'g1', 'description': 'first group', 'tags': None, 'expression': []} g2 = {'resource_type': 'Group', 'id': 'group2', 'description': 'second group', 'display_name': 'g2', 'tags': tags, 'expression': []} g3 = {'resource_type': 'Group', 'id': 'group3', 'display_name': 'g3', 'description': 'third group', 'tags': None, 'expression': []} d1 = {'resource_type': 'Domain', 'id': 'domain1', 'display_name': 'd1', 'description': 'first domain', 'tags': tags} d2 = {'resource_type': 'Domain', 'id': 'domain2', 'display_name': 'd2', 'description': 'no tags', 'tags': None} with trans.NsxPolicyTransaction(): for d in (d1, d2): self.policy_lib.domain.create_or_overwrite( d['display_name'], d['id'], d['description'], tags=d['tags'] if 'tags' in d else None) d['children'] = [] for g in (g1, g2, g3): self.policy_lib.group.create_or_overwrite( g['display_name'], d['id'], g['id'], g['description'], tags=g['tags'] if 'tags' in g else None) d['children'].append({'resource_type': 'ChildGroup', 'Group': g}) expected_body = {'resource_type': 'Infra', 'children': [{'resource_type': 'ChildDomain', 'Domain': d1}, {'resource_type': 'ChildDomain', 'Domain': d2}]} self.assert_infra_patch_call(expected_body) def test_ip_address_pool_and_block_subnets(self): pool = {'id': 'pool1', 'resource_type': 'IpAddressPool', 'display_name': 'pool1', 'children': []} ip_block_id = 'block1' subnet1 = {'id': 'subnet1', 'resource_type': 'IpAddressPoolBlockSubnet', 'ip_block_path': '/infra/ip-blocks/%s' % ip_block_id, 'size': 8} subnet2 = {'id': 'subnet2', 'resource_type': 'IpAddressPoolBlockSubnet', 'ip_block_path': '/infra/ip-blocks/%s' % ip_block_id, 'size': 4} with trans.NsxPolicyTransaction(): self.policy_lib.ip_pool.create_or_overwrite( pool['display_name'], ip_pool_id=pool['id']) for s in (subnet1, subnet2): self.policy_lib.ip_pool.allocate_block_subnet( ip_pool_id=pool['id'], ip_block_id=ip_block_id, ip_subnet_id=s['id'], size=s['size']) pool['children'].append( {'resource_type': 'ChildIpAddressPoolSubnet', 'IpAddressPoolSubnet': s}) expected_body = {'resource_type': 'Infra', 'children': [{'resource_type': 'ChildIpAddressPool', 'IpAddressPool': pool}]} self.assert_infra_patch_call(expected_body) def test_groups_only(self): g1 = {'resource_type': 'Group', 'id': 'group1', 'display_name': 'g1', 'description': 'first group', 'expression': []} g2 = {'resource_type': 'Group', 'id': 'group2', 'description': 'second group', 'display_name': 'g2', 'expression': []} d1 = {'resource_type': 'Domain', 'id': 'domain1'} d2 = {'resource_type': 'Domain', 'id': 'domain2'} with trans.NsxPolicyTransaction(): for d in (d1, d2): d['children'] = [] for g in (g1, g2): self.policy_lib.group.create_or_overwrite( g['display_name'], d['id'], g['id'], g['description']) d['children'].append({'resource_type': 'ChildGroup', 'Group': g}) expected_body = {'resource_type': 'Infra', 'children': [{'resource_type': 'ChildDomain', 'Domain': d1}, {'resource_type': 'ChildDomain', 'Domain': d2}]} self.assert_infra_patch_call(expected_body) def test_segment_ports(self): port1 = {'id': 'port_on_seg1', 'resource_type': 'SegmentPort', 'display_name': 'port_on_seg1', 'attachment': {'type': 'VIF', 'app_id': 'app1', 'traffic_tag': 5} } port2 = {'id': 'port1_on_seg2', 'resource_type': 'SegmentPort', 'display_name': 'port_on_seg2', 'attachment': {'type': 'CHILD', 'app_id': 'app2', 'traffic_tag': None} } seg1 = {'id': 'seg1', 'resource_type': 'Segment', 'children': [{'resource_type': 'ChildSegmentPort', 'SegmentPort': port1}]} seg2 = {'id': 'seg2', 'resource_type': 'Segment', 'children': [{'resource_type': 'ChildSegmentPort', 'SegmentPort': port2}]} with trans.NsxPolicyTransaction(): self.policy_lib.segment_port.create_or_overwrite( port1['display_name'], seg1['id'], port1['id'], attachment_type=port1['attachment']['type'], app_id=port1['attachment']['app_id'], traffic_tag=port1['attachment']['traffic_tag']) self.policy_lib.segment_port.create_or_overwrite( port2['display_name'], seg2['id'], port2['id'], attachment_type=port2['attachment']['type'], app_id=port2['attachment']['app_id'], traffic_tag=port2['attachment']['traffic_tag']) expected_body = {'resource_type': 'Infra', 'children': [{'resource_type': 'ChildSegment', 'Segment': seg1}, {'resource_type': 'ChildSegment', 'Segment': seg2}]} self.assert_infra_patch_call(expected_body) def test_tier1_nat_rules_create(self): tier1_id = 'tier1-1' nat_rule_id1 = 'nat1' nat_rule_id2 = 'nat2' nat_rule1 = {"action": constants.NAT_ACTION_SNAT, "display_name": "snat rule", "id": nat_rule_id1, "resource_type": "PolicyNatRule"} nat_rule2 = {"action": constants.NAT_ACTION_DNAT, "display_name": "dnat rule", "id": nat_rule_id2, "resource_type": "PolicyNatRule"} policy_nat = {"id": "USER", "resource_type": "PolicyNat", "children": [ {"PolicyNatRule": nat_rule1, "resource_type": "ChildPolicyNatRule"}, {"PolicyNatRule": nat_rule2, "resource_type": "ChildPolicyNatRule"}]} tier1_dict = {"id": tier1_id, "resource_type": "Tier1", "children": [{"PolicyNat": policy_nat, "resource_type": "ChildPolicyNat"}]} with trans.NsxPolicyTransaction(): self.policy_lib.tier1_nat_rule.create_or_overwrite( 'snat rule', tier1_id, nat_rule_id=nat_rule_id1, action=constants.NAT_ACTION_SNAT) self.policy_lib.tier1_nat_rule.create_or_overwrite( 'dnat rule', tier1_id, nat_rule_id=nat_rule_id2, action=constants.NAT_ACTION_DNAT) expected_body = {"resource_type": "Infra", "children": [{"Tier1": tier1_dict, "resource_type": "ChildTier1"}]} self.assert_infra_patch_call(expected_body) def test_tier1_nat_rules_delete(self): tier1_id = 'tier1-1' nat_rule_id1 = 'nat1' nat_rule_id2 = 'nat2' nat_rule1 = {"action": constants.NAT_ACTION_DNAT, "id": nat_rule_id1, "resource_type": "PolicyNatRule"} nat_rule2 = {"action": constants.NAT_ACTION_DNAT, "id": nat_rule_id2, "resource_type": "PolicyNatRule"} policy_nat = {"id": "USER", "resource_type": "PolicyNat", "children": [ {"PolicyNatRule": nat_rule1, "marked_for_delete": True, "resource_type": "ChildPolicyNatRule"}, {"PolicyNatRule": nat_rule2, "marked_for_delete": True, "resource_type": "ChildPolicyNatRule"}]} tier1_dict = {"id": tier1_id, "resource_type": "Tier1", "children": [{"PolicyNat": policy_nat, "resource_type": "ChildPolicyNat"}]} with trans.NsxPolicyTransaction(): self.policy_lib.tier1_nat_rule.delete( tier1_id, nat_rule_id=nat_rule_id1) self.policy_lib.tier1_nat_rule.delete( tier1_id, nat_rule_id=nat_rule_id2) expected_body = {"resource_type": "Infra", "children": [{"Tier1": tier1_dict, "resource_type": "ChildTier1"}]} self.assert_infra_patch_call(expected_body) def test_creating_security_policy_and_dfw_rules(self): dfw_rule = {'id': 'rule_id1', 'action': 'ALLOW', 'display_name': 'rule1', 'description': None, 'direction': 'IN_OUT', 'ip_protocol': 'IPV4_IPV6', 'logged': False, 'destination_groups': ['destination_url'], 'source_groups': ['src_url'], 'resource_type': 'Rule', 'scope': None, 'sequence_number': None, 'tag': None, 'services': ['ANY']} security_policy = {'id': 'security_policy_id1', 'display_name': 'security_policy', 'category': 'Application', 'resource_type': 'SecurityPolicy'} domain = {'resource_type': 'Domain', 'id': 'domain1'} domain_id = domain['id'] map_id = security_policy['id'] dfw_rule_entries = [self.policy_lib.comm_map.build_entry( name=dfw_rule['display_name'], domain_id=domain_id, map_id=map_id, entry_id=dfw_rule['id'], source_groups=dfw_rule['source_groups'], dest_groups=dfw_rule['destination_groups'] )] with trans.NsxPolicyTransaction(): self.policy_lib.comm_map.create_with_entries( name=security_policy['display_name'], domain_id=domain_id, map_id=map_id, entries=dfw_rule_entries ) def get_group_path(group_id, domain_id): return '/infra/domains/' + domain_id + '/groups/' + group_id dfw_rule['destination_groups'] = [get_group_path(group_id, domain_id) for group_id in dfw_rule['destination_groups']] dfw_rule['source_groups'] = [get_group_path(group_id, domain_id) for group_id in dfw_rule['source_groups']] child_rules = [{'resource_type': 'ChildRule', 'Rule': dfw_rule}] security_policy.update({'children': child_rules}) child_security_policies = [{ 'resource_type': 'ChildSecurityPolicy', 'SecurityPolicy': security_policy }] domain.update({'children': child_security_policies}) child_domains = [{'resource_type': 'ChildDomain', 'Domain': domain}] expected_body = {'resource_type': 'Infra', 'children': child_domains} self.assert_infra_patch_call(expected_body) @mock.patch('vmware_nsxlib.v3.policy.core_defs.NsxPolicyApi.get') def test_updating_security_policy_and_dfw_rules(self, mock_get_api): dfw_rule1 = {'id': 'rule_id1', 'action': 'ALLOW', 'display_name': 'rule1', 'description': None, 'direction': 'IN_OUT', 'ip_protocol': 'IPV4_IPV6', 'logged': False, 'destination_groups': ['destination_url'], 'source_groups': ['src_url'], 'resource_type': 'Rule', 'scope': None, 'sequence_number': None, 'tag': None, 'services': ['ANY'], "_create_time": 1} dfw_rule2 = {'id': 'rule_id2', 'action': 'DROP', 'display_name': 'rule2', 'description': None, 'direction': 'IN_OUT', 'ip_protocol': 'IPV4_IPV6', 'logged': False, 'destination_groups': ['destination_url'], 'source_groups': ['src_url'], 'resource_type': 'Rule', 'scope': None, 'sequence_number': None, 'tag': None, 'services': ['ANY'], "_create_time": 1} security_policy = {'id': 'security_policy_id1', 'display_name': 'security_policy', 'category': 'Application', 'resource_type': 'SecurityPolicy'} domain = {'resource_type': 'Domain', 'id': 'domain1'} domain_id = domain['id'] map_id = security_policy['id'] new_rule_name = 'new_rule1' new_direction = 'IN' dfw_rule_entries = [self.policy_lib.comm_map.build_entry( name=new_rule_name, domain_id=domain_id, map_id=map_id, entry_id=dfw_rule1['id'], source_groups=dfw_rule1['source_groups'], dest_groups=dfw_rule1['destination_groups'], direction=new_direction )] def get_group_path(group_id, domain_id): return '/infra/domains/' + domain_id + '/groups/' + group_id for dfw_rule in [dfw_rule1, dfw_rule2]: dfw_rule['destination_groups'] = [get_group_path(group_id, domain_id) for group_id in dfw_rule['destination_groups']] dfw_rule['source_groups'] = [get_group_path(group_id, domain_id) for group_id in dfw_rule['source_groups']] security_policy_values = copy.deepcopy(security_policy) security_policy_values.update({'rules': copy.deepcopy([dfw_rule1, dfw_rule2])}) mock_get_api.return_value = security_policy_values with trans.NsxPolicyTransaction(): self.policy_lib.comm_map.update_with_entries( name=security_policy['display_name'], domain_id=domain_id, map_id=map_id, entries=dfw_rule_entries ) dfw_rule1['display_name'] = new_rule_name dfw_rule1['direction'] = new_direction child_rules = [{'resource_type': 'ChildRule', 'Rule': dfw_rule1}, {'resource_type': 'ChildRule', 'Rule': dfw_rule2, 'marked_for_delete': True}] security_policy.update({'children': child_rules}) child_security_policies = [{ 'resource_type': 'ChildSecurityPolicy', 'SecurityPolicy': security_policy }] domain.update({'children': child_security_policies}) child_domains = [{ 'resource_type': 'ChildDomain', 'Domain': domain }] expected_body = {'resource_type': 'Infra', 'children': child_domains} self.assert_infra_patch_call(expected_body) @mock.patch('vmware_nsxlib.v3.policy.core_defs.NsxPolicyApi.get') def test_updating_security_policy_with_no_entries_set(self, mock_get_api): dfw_rule1 = {'id': 'rule_id1', 'action': 'ALLOW', 'display_name': 'rule1', 'description': None, 'direction': 'IN_OUT', 'ip_protocol': 'IPV4_IPV6', 'logged': False, 'destination_groups': ['destination_url'], 'source_groups': ['src_url'], 'resource_type': 'Rule', 'scope': None, 'sequence_number': None, 'tag': None, 'services': ['ANY'], "_create_time": 1} security_policy = {'id': 'security_policy_id1', 'display_name': 'security_policy', 'category': 'Application', 'resource_type': 'SecurityPolicy'} domain = {'resource_type': 'Domain', 'id': 'domain1'} domain_id = domain['id'] map_id = security_policy['id'] def get_group_path(group_id, domain_id): return '/infra/domains/' + domain_id + '/groups/' + group_id for dfw_rule in [dfw_rule1]: dfw_rule['destination_groups'] = [get_group_path(group_id, domain_id) for group_id in dfw_rule['destination_groups']] dfw_rule['source_groups'] = [get_group_path(group_id, domain_id) for group_id in dfw_rule['source_groups']] security_policy.update({'rules': [dfw_rule1]}) mock_get_api.return_value = security_policy with trans.NsxPolicyTransaction(): self.policy_lib.comm_map.update_with_entries( name=security_policy['display_name'], domain_id=domain_id, map_id=map_id ) child_security_policies = [{ 'resource_type': 'ChildSecurityPolicy', 'SecurityPolicy': security_policy }] domain.update({'children': child_security_policies}) child_domains = [{ 'resource_type': 'ChildDomain', 'Domain': domain }] expected_body = {'resource_type': 'Infra', 'children': child_domains} self.assert_infra_patch_call(expected_body) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/policy/test_resources.py0000664000175000017500000074362013623151571026716 0ustar zuulzuul00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3.policy import policy_testcase from vmware_nsxlib.tests.unit.v3 import test_client from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import policy from vmware_nsxlib.v3.policy import constants from vmware_nsxlib.v3.policy import core_defs from vmware_nsxlib.v3.policy import core_resources TEST_TENANT = 'test' class NsxPolicyLibTestCase(policy_testcase.TestPolicyApi): def setUp(self, *args, **kwargs): super(NsxPolicyLibTestCase, self).setUp() nsxlib_config = nsxlib_testcase.get_default_nsxlib_config( allow_passthrough=kwargs.get('allow_passthrough', True)) # Mock the nsx-lib for the passthrough api # TODO(annak): move version forward with backend releases with mock.patch("vmware_nsxlib.v3.NsxLib") as mock_lib: mock_lib.return_value.get_version.return_value = "3.0.0" self.policy_lib = policy.NsxPolicyLib(nsxlib_config) self.policy_api = self.policy_lib.policy_api self.policy_api.client = self.client self.maxDiff = None def _compare_def(self, expected_def, actual_def): # verify the resource definition class self.assertEqual(expected_def.__class__, actual_def.__class__) # verify the resource definition tenant self.assertEqual(expected_def.get_tenant(), actual_def.get_tenant()) # verify the resource definition values self.assertEqual(expected_def.get_obj_dict(), actual_def.get_obj_dict()) def assert_called_with_def(self, mock_api, expected_def, call_num=0): # verify the api was called mock_api.assert_called() actual_def = mock_api.call_args_list[call_num][0][0] self._compare_def(expected_def, actual_def) def assert_called_with_defs(self, mock_api, expected_defs, call_num=0): # verify the api & first resource definition self.assert_called_with_def(mock_api, expected_defs[0], call_num=call_num) # compare the 2nd resource definition class & values def_list = mock_api.call_args_list[call_num][0][1] if not isinstance(def_list, list): def_list = [def_list] for i in range(1, len(expected_defs)): actual_def = def_list[i - 1] expected_def = expected_defs[i] self._compare_def(expected_def, actual_def) def assert_called_with_def_and_dict(self, mock_api, expected_def, expected_dict, call_num=0): # verify the api & resource definition self.assert_called_with_def(mock_api, expected_def, call_num=call_num) # compare the 2nd api parameter which is a dictionary actual_dict = mock_api.call_args_list[call_num][0][0].body self.assertEqual(expected_dict, actual_dict) def mock_get(self, obj_id, obj_name, **kwargs): obj_dict = { 'id': obj_id, 'display_name': obj_name, 'resource_type': self.resourceApi.entry_def.resource_type()} if kwargs: obj_dict.update(kwargs) return mock.patch.object(self.policy_api, "get", return_value=obj_dict) def mock_create_update(self): return mock.patch.object(self.policy_api, "create_or_update") class TestPolicyDomain(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyDomain, self).setUp() self.resourceApi = self.policy_lib.domain def test_create_with_id(self): name = 'd1' description = 'desc' domain_id = '111' with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite( name, domain_id=domain_id, description=description, tenant=TEST_TENANT) expected_def = core_defs.DomainDef(domain_id=domain_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(domain_id, result) def test_minimalistic_create(self): name = 'test' with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite(name, tenant=TEST_TENANT) expected_def = core_defs.DomainDef(domain_id=mock.ANY, name=name, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_create_without_id(self): name = 'd1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tenant=TEST_TENANT) expected_def = core_defs.DomainDef(domain_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): domain_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(domain_id, tenant=TEST_TENANT) expected_def = core_defs.DomainDef(domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): domain_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': domain_id}) as api_call: result = self.resourceApi.get(domain_id, tenant=TEST_TENANT) expected_def = core_defs.DomainDef(domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(domain_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.DomainDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.DomainDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): domain_id = '111' name = 'new name' description = 'new desc' with self.mock_get(domain_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(domain_id, name=name, description=description, tenant=TEST_TENANT) expected_def = core_defs.DomainDef(domain_id=domain_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_unset(self): domain_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': domain_id}): self.resourceApi.update(domain_id, description=None, tags=None, tenant=TEST_TENANT) expected_body = {'id': domain_id, 'resource_type': 'Domain', 'description': None, 'tags': None} self.assert_json_call('PATCH', self.client, '%s/domains/%s' % (TEST_TENANT, domain_id), data=expected_body, headers=test_client.PARTIAL_UPDATE_HEADERS) class TestPolicyGroup(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyGroup, self).setUp() self.resourceApi = self.policy_lib.group def test_create_with_id(self): domain_id = '111' name = 'g1' description = 'desc' group_id = '222' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, domain_id, group_id=group_id, description=description, tenant=TEST_TENANT) expected_def = core_defs.GroupDef(domain_id=domain_id, group_id=group_id, name=name, description=description, conditions=[], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(group_id, result) def test_create_without_id(self): domain_id = '111' name = 'g1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, domain_id, description=description, tenant=TEST_TENANT) expected_def = core_defs.GroupDef(domain_id=domain_id, group_id=mock.ANY, name=name, description=description, conditions=[], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_create_with_condition(self): domain_id = '111' name = 'g1' description = 'desc' cond_val = '123' cond_op = constants.CONDITION_OP_EQUALS cond_member_type = constants.CONDITION_MEMBER_VM cond_key = constants.CONDITION_KEY_TAG with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, domain_id, description=description, cond_val=cond_val, cond_op=cond_op, cond_member_type=cond_member_type, cond_key=cond_key, tenant=TEST_TENANT) exp_cond = core_defs.Condition(value=cond_val, key=cond_key, operator=cond_op, member_type=cond_member_type) expected_def = core_defs.GroupDef(domain_id=domain_id, group_id=mock.ANY, name=name, description=description, conditions=[exp_cond], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_create_with_empty_condition(self): domain_id = '111' name = 'g1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, domain_id, description=description, tenant=TEST_TENANT) expected_def = core_defs.GroupDef(domain_id=domain_id, group_id=mock.ANY, name=name, description=description, conditions=[], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_create_with_simple_condition(self): domain_id = '111' name = 'g1' description = 'desc' cond_val = '123' cond_op = constants.CONDITION_OP_EQUALS cond_member_type = constants.CONDITION_MEMBER_VM cond_key = constants.CONDITION_KEY_TAG cond = self.resourceApi.build_condition( cond_val=cond_val, cond_op=cond_op, cond_member_type=cond_member_type, cond_key=cond_key) with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite_with_conditions( name, domain_id, description=description, conditions=[cond], tenant=TEST_TENANT) exp_cond = core_defs.Condition(value=cond_val, key=cond_key, operator=cond_op, member_type=cond_member_type) expected_def = core_defs.GroupDef(domain_id=domain_id, group_id=mock.ANY, name=name, description=description, conditions=[exp_cond], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def _test_create_with_condition(self, condition, exp_condition): domain_id = '111' name = 'g1' description = 'desc' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite_with_conditions( name, domain_id, description=description, conditions=condition, tenant=TEST_TENANT) expected_def = core_defs.GroupDef(domain_id=domain_id, group_id=mock.ANY, name=name, description=description, conditions=exp_condition, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_create_with_union_condition(self): cond_val1 = '123' cond_val2 = '456' cond_op = constants.CONDITION_OP_EQUALS cond_member_type = constants.CONDITION_MEMBER_VM cond_key = constants.CONDITION_KEY_TAG cond1 = self.resourceApi.build_condition( cond_val=cond_val1, cond_op=cond_op, cond_member_type=cond_member_type, cond_key=cond_key) cond2 = self.resourceApi.build_condition( cond_val=cond_val2, cond_op=cond_op, cond_member_type=cond_member_type, cond_key=cond_key) union_cond = self.resourceApi.build_union_condition( conditions=[cond1, cond2]) exp_cond1 = core_defs.Condition(value=cond_val1, key=cond_key, operator=cond_op, member_type=cond_member_type) exp_cond2 = core_defs.Condition(value=cond_val2, key=cond_key, operator=cond_op, member_type=cond_member_type) or_cond = core_defs.ConjunctionOperator( operator=constants.CONDITION_OP_OR) exp_cond = [exp_cond1, or_cond, exp_cond2] self._test_create_with_condition(union_cond, exp_cond) def test_create_with_nested_condition(self): cond_val1 = '123' cond_val2 = '456' cond_op = constants.CONDITION_OP_EQUALS cond_member_type = constants.CONDITION_MEMBER_VM cond_key = constants.CONDITION_KEY_TAG cond1 = self.resourceApi.build_condition( cond_val=cond_val1, cond_op=cond_op, cond_member_type=cond_member_type, cond_key=cond_key) cond2 = self.resourceApi.build_condition( cond_val=cond_val2, cond_op=cond_op, cond_member_type=cond_member_type, cond_key=cond_key) nested = self.resourceApi.build_nested_condition( conditions=[cond1, cond2]) exp_cond1 = core_defs.Condition(value=cond_val1, key=cond_key, operator=cond_op, member_type=cond_member_type) exp_cond2 = core_defs.Condition(value=cond_val2, key=cond_key, operator=cond_op, member_type=cond_member_type) and_cond = core_defs.ConjunctionOperator() exp_cond = core_defs.NestedExpression( expressions=[exp_cond1, and_cond, exp_cond2]) self._test_create_with_condition(nested, exp_cond) def test_create_with_ip_expression(self): domain_id = '111' name = 'g1' description = 'desc' cidr = '1.1.1.0/24' cond = self.resourceApi.build_ip_address_expression([cidr]) with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite_with_conditions( name, domain_id, description=description, conditions=[cond], tenant=TEST_TENANT) exp_cond = core_defs.IPAddressExpression([cidr]) expected_def = core_defs.GroupDef(domain_id=domain_id, group_id=mock.ANY, name=name, description=description, conditions=[exp_cond], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_create_with_path_expression(self): domain_id = '111' name = 'g1' description = 'desc' path = '/test/path1' cond = self.resourceApi.build_path_expression([path]) with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite_with_conditions( name, domain_id, description=description, conditions=[cond], tenant=TEST_TENANT) exp_cond = core_defs.PathExpression([path]) expected_def = core_defs.GroupDef(domain_id=domain_id, group_id=mock.ANY, name=name, description=description, conditions=[exp_cond], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): domain_id = '111' group_id = '222' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(domain_id, group_id, tenant=TEST_TENANT) expected_def = core_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): domain_id = '111' group_id = '222' with mock.patch.object(self.policy_api, "get", return_value={'id': group_id}) as api_call: result = self.resourceApi.get(domain_id, group_id, tenant=TEST_TENANT) expected_def = core_defs.GroupDef(domain_id=domain_id, group_id=group_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(group_id, result['id']) def test_get_by_name(self): domain_id = '111' name = 'g1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(domain_id, name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.GroupDef(domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): domain_id = '111' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(domain_id, tenant=TEST_TENANT) expected_def = core_defs.GroupDef(domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): domain_id = '111' group_id = '222' name = 'new name' description = 'new desc' with self.mock_get(group_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(domain_id, group_id, name=name, description=description, tenant=TEST_TENANT) expected_def = core_defs.GroupDef(domain_id=domain_id, group_id=group_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def( update_call, expected_def) def test_update_with_conditions(self): domain_id = '111' group_id = '222' name = 'name' new_name = 'new name' description = 'desc' new_description = 'new desc' cond_val1 = '123' cond_val2 = '456' cond_op = constants.CONDITION_OP_EQUALS cond_member_type = constants.CONDITION_MEMBER_VM cond_key = constants.CONDITION_KEY_TAG cond1_def = core_defs.Condition(value=cond_val1, key=cond_key, operator=cond_op, member_type=cond_member_type) cond2_def = core_defs.Condition(value=cond_val2, key=cond_key, operator=cond_op, member_type=cond_member_type) original_group = { 'id': group_id, 'resource_type': 'Group', 'display_name': name, 'description': description, 'expression': [cond1_def.get_obj_dict()]} updated_group = { 'id': group_id, 'resource_type': 'Group', 'display_name': new_name, 'description': new_description, 'expression': [cond2_def.get_obj_dict()]} group_def = core_defs.GroupDef( domain_id=domain_id, group_id=group_id, tenant=TEST_TENANT) with mock.patch.object(self.policy_api, "get", return_value=original_group),\ mock.patch.object(self.policy_api.client, "update") as update_call: self.resourceApi.update_with_conditions( domain_id, group_id, name=new_name, description=new_description, conditions=[cond2_def], tenant=TEST_TENANT) update_call.assert_called_once_with( group_def.get_resource_path(), updated_group) def test_update_with_conditions_callback(self): def update_payload_cbk(revised_payload, payload): revised_ips = revised_payload["expression"][0]["ip_addresses"] new_ips = payload["conditions"][0].ip_addresses updated_ips = revised_ips + new_ips payload["conditions"] = [core_defs.IPAddressExpression( updated_ips)] domain_id = '111' group_id = '222' name = 'name' new_name = 'new name' description = 'desc' new_description = 'new desc' ips1 = ["1.1.1.1"] ips2 = ["2.2.2.2"] cond1_def = core_defs.IPAddressExpression(ips1) cond2_def = core_defs.IPAddressExpression(ips2) updated_cond_def = core_defs.IPAddressExpression(ips1 + ips2) original_group = { 'id': group_id, 'resource_type': 'Group', 'display_name': name, 'description': description, 'expression': [cond1_def.get_obj_dict()]} updated_group = { 'id': group_id, 'resource_type': 'Group', 'display_name': new_name, 'description': new_description, 'expression': [updated_cond_def.get_obj_dict()]} group_def = core_defs.GroupDef( domain_id=domain_id, group_id=group_id, conditions=[cond2_def], tenant=TEST_TENANT) with mock.patch.object(self.policy_api, "get", return_value=original_group),\ mock.patch.object(self.policy_api.client, "update") as update_call: self.resourceApi.update_with_conditions( domain_id, group_id, name=new_name, description=new_description, conditions=[cond2_def], tenant=TEST_TENANT, update_payload_cbk=update_payload_cbk) update_call.assert_called_once_with( group_def.get_resource_path(), updated_group) def test_unset(self): domain_id = '111' group_id = '222' description = 'new' with self.mock_get(group_id, 'test'): self.resourceApi.update(domain_id, group_id, name=None, description=description, tenant=TEST_TENANT) expected_body = {'id': group_id, 'resource_type': 'Group', 'display_name': None, 'description': description} self.assert_json_call('PATCH', self.client, '%s/domains/%s/groups/%s' % (TEST_TENANT, domain_id, group_id), data=expected_body, headers=test_client.PARTIAL_UPDATE_HEADERS) def test_get_realized(self): domain_id = 'd1' group_id = 'g1' result = [{'state': constants.STATE_REALIZED, 'entity_type': 'RealizedGroup'}] with mock.patch.object( self.policy_api, "get_realized_entities", return_value=result) as api_get: state = self.resourceApi.get_realized_state( domain_id, group_id, tenant=TEST_TENANT) self.assertEqual(constants.STATE_REALIZED, state) path = "/%s/domains/%s/groups/%s" % ( TEST_TENANT, domain_id, group_id) api_get.assert_called_once_with(path, silent=False) def test_get_realized_multiple_results_get_default(self): domain_id = 'd1' group_id = 'g1' result = [{'state': constants.STATE_UNREALIZED, 'entity_type': 'NotRealizedGroup'}, {'state': constants.STATE_REALIZED, 'entity_type': 'RealizedGroup'}] with mock.patch.object( self.policy_api, "get_realized_entities", return_value=result) as api_get: state = self.resourceApi.get_realized_state( domain_id, group_id, tenant=TEST_TENANT) self.assertEqual(constants.STATE_UNREALIZED, state) path = "/%s/domains/%s/groups/%s" % ( TEST_TENANT, domain_id, group_id) api_get.assert_called_once_with(path, silent=False) def test_get_realized_multiple_results_get_specific(self): domain_id = 'd1' group_id = 'g1' result = [{'state': constants.STATE_UNREALIZED, 'entity_type': 'NotRealizedGroup'}, {'state': constants.STATE_REALIZED, 'entity_type': 'RealizedGroup'}] with mock.patch.object( self.policy_api, "get_realized_entities", return_value=result) as api_get: state = self.resourceApi.get_realized_state( domain_id, group_id, entity_type='RealizedGroup', tenant=TEST_TENANT) self.assertEqual(constants.STATE_REALIZED, state) path = "/%s/domains/%s/groups/%s" % ( TEST_TENANT, domain_id, group_id) api_get.assert_called_once_with(path, silent=False) def test_get_realized_id(self): domain_id = 'd1' group_id = 'g1' realized_id = 'realized_111' result = [{'state': constants.STATE_REALIZED, 'entity_type': 'RealizedGroup', 'realization_specific_identifier': realized_id}] with mock.patch.object( self.policy_api, "get_realized_entities", return_value=result) as api_get: result_id = self.resourceApi.get_realized_id( domain_id, group_id, tenant=TEST_TENANT) self.assertEqual(realized_id, result_id) path = "/%s/domains/%s/groups/%s" % ( TEST_TENANT, domain_id, group_id) api_get.assert_called_once_with(path, silent=False) def test_get_path(self): domain_id = 'd1' group_id = 'g1' result = self.resourceApi.get_path(domain_id, group_id, tenant=TEST_TENANT) expected_path = '/%s/domains/%s/groups/%s' % ( TEST_TENANT, domain_id, group_id) self.assertEqual(expected_path, result) def test_wait_until_realized_fail(self): domain_id = 'd1' group_id = 'g1' info = {'state': constants.STATE_UNREALIZED, 'realization_specific_identifier': group_id, 'entity_type': 'RealizedGroup'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationTimeoutError, self.resourceApi.wait_until_realized, domain_id, group_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) def test_wait_until_realized_succeed(self): domain_id = 'd1' group_id = 'g1' info = {'state': constants.STATE_REALIZED, 'realization_specific_identifier': group_id, 'entity_type': 'RealizedGroup'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): actual_info = self.resourceApi.wait_until_realized( domain_id, group_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) self.assertEqual(info, actual_info) class TestPolicyL4Service(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyL4Service, self).setUp() self.resourceApi = self.policy_lib.service def test_create(self): name = 's1' description = 'desc' protocol = constants.TCP dest_ports = [81, 82] source_ports = [83, 84] tags = [{'scope': 'a', 'tag': 'b'}] with mock.patch.object(self.policy_api, "create_with_parent") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, protocol=protocol, dest_ports=dest_ports, source_ports=source_ports, tags=tags, tenant=TEST_TENANT) exp_srv_def = core_defs.ServiceDef(service_id=mock.ANY, name=name, description=description, tags=tags, tenant=TEST_TENANT) exp_entry_def = core_defs.L4ServiceEntryDef( service_id=mock.ANY, entry_id='entry', name='entry', protocol=protocol, dest_ports=dest_ports, source_ports=source_ports, tenant=TEST_TENANT) self.assert_called_with_defs( api_call, [exp_srv_def, exp_entry_def]) self.assertIsNotNone(result) def test_delete(self): srv_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(srv_id, tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(service_id=srv_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): srv_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': srv_id}) as api_call: result = self.resourceApi.get(srv_id, tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(service_id=srv_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(srv_id, result['id']) def test_get_by_name(self): name = 's1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): srv_id = '111' name = 'newName' description = 'new desc' protocol = 'tcp' tags = [{'scope': 'a', 'tag': 'b'}] entry_body = {'id': 'entry', 'l4_protocol': protocol} with mock.patch.object(self.policy_api, "get", return_value=entry_body),\ mock.patch.object(self.policy_api, "create_with_parent") as update_call: self.resourceApi.update(srv_id, name=name, description=description, tags=tags, tenant=TEST_TENANT) service_def = core_defs.ServiceDef(service_id=srv_id, name=name, description=description, tags=tags, tenant=TEST_TENANT) entry_def = core_defs.L4ServiceEntryDef( service_id=id, entry_id='entry', protocol=protocol, tenant=TEST_TENANT) self.assert_called_with_defs(update_call, [service_def, entry_def]) def test_update_all(self): srv_id = '111' name = 'newName' description = 'new desc' protocol = 'udp' dest_ports = [555] source_ports = [666] entry_body = {'id': 'entry', 'l4_protocol': 'tcp'} with mock.patch.object(self.policy_api, "get", return_value=entry_body),\ mock.patch.object(self.policy_api, "create_with_parent") as update_call: self.resourceApi.update(srv_id, name=name, description=description, protocol=protocol, dest_ports=dest_ports, source_ports=source_ports, tenant=TEST_TENANT) service_def = core_defs.ServiceDef(service_id=srv_id, name=name, description=description, tenant=TEST_TENANT) entry_def = core_defs.L4ServiceEntryDef( service_id=srv_id, entry_id=mock.ANY, protocol=protocol, dest_ports=dest_ports, source_ports=source_ports, tenant=TEST_TENANT) self.assert_called_with_defs( update_call, [service_def, entry_def]) def test_unset(self): name = 'hello' service_id = '111' # Until policy PATCH is fixed to accept partial update, we # call get on child entry with mock.patch.object( self.policy_api, "get", return_value={'display_name': name}): self.resourceApi.update(service_id, description=None, dest_ports=None, tenant=TEST_TENANT) expected_body = {'id': service_id, 'description': None, 'resource_type': 'Service', 'service_entries': [{ 'display_name': name, 'id': 'entry', 'resource_type': 'L4PortSetServiceEntry', 'destination_ports': None}] } self.assert_json_call('PATCH', self.client, '%s/services/%s' % (TEST_TENANT, service_id), data=expected_body) class TestPolicyIcmpService(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyIcmpService, self).setUp() self.resourceApi = self.policy_lib.icmp_service def test_create(self): name = 's1' description = 'desc' icmp_type = 2 with mock.patch.object(self.policy_api, "create_with_parent") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, icmp_type=icmp_type, tenant=TEST_TENANT) exp_srv_def = core_defs.ServiceDef(service_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT) exp_entry_def = core_defs.IcmpServiceEntryDef( service_id=mock.ANY, entry_id='entry', name='entry', version=4, icmp_type=icmp_type, tenant=TEST_TENANT) self.assert_called_with_defs( api_call, [exp_srv_def, exp_entry_def]) self.assertIsNotNone(result) def test_delete(self): srv_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(srv_id, tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(service_id=srv_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): srv_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': srv_id}) as api_call: result = self.resourceApi.get(srv_id, tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(service_id=srv_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(srv_id, result['id']) def test_get_by_name(self): name = 's1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): srv_id = '111' name = 'new_name' description = 'new desc' with mock.patch.object(self.policy_api, "get", return_value={'id': 'entry', 'protocol': 'ICMPv4'}),\ mock.patch.object(self.policy_api, "create_with_parent") as update_call: self.resourceApi.update(srv_id, name=name, description=description, tenant=TEST_TENANT) service_def = core_defs.ServiceDef(service_id=srv_id, name=name, description=description, tenant=TEST_TENANT) entry_def = core_defs.IcmpServiceEntryDef( service_id=srv_id, entry_id='entry', version=4, tenant=TEST_TENANT) self.assert_called_with_defs(update_call, [service_def, entry_def]) def test_update_all(self): srv_id = '111' name = 'newName' description = 'new desc' version = 6 icmp_type = 3 icmp_code = 3 with mock.patch.object(self.policy_api, "get", return_value={'id': 'entry'}),\ mock.patch.object(self.policy_api, "create_with_parent") as update_call: self.resourceApi.update(srv_id, name=name, description=description, version=version, icmp_type=icmp_type, icmp_code=icmp_code, tenant=TEST_TENANT) # get will be called for the entire service service_def = core_defs.ServiceDef(service_id=srv_id, name=name, description=description, tenant=TEST_TENANT) entry_def = core_defs.IcmpServiceEntryDef( service_id=srv_id, entry_id=mock.ANY, version=version, icmp_type=icmp_type, icmp_code=icmp_code, tenant=TEST_TENANT) self.assert_called_with_defs( update_call, [service_def, entry_def]) def test_icmp_type_and_code_in_obj_dict(self): icmp_type, icmp_code = 0, 0 entry_def = core_defs.IcmpServiceEntryDef( icmp_type=icmp_type, icmp_code=icmp_code) body = entry_def.get_obj_dict() self.assertEqual(icmp_type, body["icmp_type"]) self.assertEqual(icmp_code, body["icmp_code"]) class TestPolicyIPProtocolService(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyIPProtocolService, self).setUp() self.resourceApi = self.policy_lib.ip_protocol_service def test_create(self): name = 's1' description = 'desc' protocol_number = 2 with mock.patch.object(self.policy_api, "create_with_parent") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, protocol_number=protocol_number, tenant=TEST_TENANT) exp_srv_def = core_defs.ServiceDef(service_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT) exp_entry_def = core_defs.IPProtocolServiceEntryDef( service_id=mock.ANY, entry_id='entry', name='entry', protocol_number=protocol_number, tenant=TEST_TENANT) self.assert_called_with_defs( api_call, [exp_srv_def, exp_entry_def]) self.assertIsNotNone(result) def test_delete(self): srv_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(srv_id, tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(service_id=srv_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): srv_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': srv_id}) as api_call: result = self.resourceApi.get(srv_id, tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(service_id=srv_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(srv_id, result['id']) def test_get_by_name(self): name = 's1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): srv_id = '111' name = 'new_name' description = 'new desc' with mock.patch.object(self.policy_api, "get", return_value={'id': 'entry'}),\ mock.patch.object(self.policy_api, "create_with_parent") as update_call: self.resourceApi.update(srv_id, name=name, description=description, tenant=TEST_TENANT) service_def = core_defs.ServiceDef(service_id=srv_id, name=name, description=description, tenant=TEST_TENANT) entry_def = core_defs.IPProtocolServiceEntryDef( service_id=srv_id, entry_id='entry', tenant=TEST_TENANT) self.assert_called_with_defs(update_call, [service_def, entry_def]) def test_update_all(self): srv_id = '111' name = 'newName' description = 'new desc' protocol_number = 3 with mock.patch.object(self.policy_api, "get", return_value={'id': 'entry'}),\ mock.patch.object(self.policy_api, "create_with_parent") as service_update_call: self.resourceApi.update(srv_id, name=name, description=description, protocol_number=protocol_number, tenant=TEST_TENANT) service_def = core_defs.ServiceDef(service_id=srv_id, name=name, description=description, tenant=TEST_TENANT) entry_def = core_defs.IPProtocolServiceEntryDef( service_id=srv_id, entry_id='entry', protocol_number=protocol_number, tenant=TEST_TENANT) self.assert_called_with_defs(service_update_call, [service_def, entry_def]) def test_protocol_number_in_obj_dict(self): protocol_number = 0 entry_def = core_defs.IPProtocolServiceEntryDef( protocol_number=protocol_number) body = entry_def.get_obj_dict() self.assertEqual(protocol_number, body["protocol_number"]) class TestPolicyMixedService(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyMixedService, self).setUp() self.l4ServiceApi = self.policy_lib.service self.icmpServiceApi = self.policy_lib.icmp_service self.ipServiceApi = self.policy_lib.ip_protocol_service self.resourceApi = self.policy_lib.mixed_service def test_create_service_only(self): name = 's1' srv_id = '111' description = 'desc' tags = [{'scope': 'a', 'tag': 'b'}] with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, srv_id, description=description, tags=tags, tenant=TEST_TENANT) exp_srv_def = core_defs.ServiceDef( service_id=srv_id, name=name, description=description, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, exp_srv_def) self.assertIsNotNone(result) def test_create_with_entries(self): name = 's1' srv_id = '111' description = 'desc' tags = [{'scope': 'a', 'tag': 'b'}] protocol = constants.TCP dest_ports = [81, 82] source_ports = [83, 84] icmp_type = 2 protocol_number = 2 l4_entry = self.l4ServiceApi.build_entry( 'l4_entry', srv_id, 'l4_entry', protocol=protocol, dest_ports=dest_ports, source_ports=source_ports, tenant=TEST_TENANT) icmp_entry = self.icmpServiceApi.build_entry( 'icmp_entry', srv_id, 'icmp_entry', icmp_type=icmp_type, tenant=TEST_TENANT) ip_entry = self.ipServiceApi.build_entry( 'ip_entry', srv_id, 'ip_entry', protocol_number=protocol_number, tenant=TEST_TENANT) with mock.patch.object(self.policy_api, "create_with_parent") as api_call: result = self.resourceApi.create_or_overwrite( name, srv_id, description=description, entries=[l4_entry, icmp_entry, ip_entry], tags=tags, tenant=TEST_TENANT) service_def = core_defs.ServiceDef( service_id=srv_id, name=name, description=description, tags=tags, tenant=TEST_TENANT) self.assert_called_with_defs( api_call, [service_def, l4_entry, icmp_entry, ip_entry]) self.assertIsNotNone(result) def test_delete(self): srv_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(srv_id, tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(service_id=srv_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): srv_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': srv_id}) as api_call: result = self.resourceApi.get(srv_id, tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(service_id=srv_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(srv_id, result['id']) def test_get_by_name(self): name = 's1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.ServiceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): name = 'newName' srv_id = '111' description = 'new desc' tags = [{'scope': 'c', 'tag': 'd'}] protocol = constants.UDP dest_ports = [91, 92] source_ports = [93, 94] icmp_type = 3 protocol_number = 3 l4_entry = self.l4ServiceApi.build_entry( 'l4_entry', srv_id, 'l4_entry', protocol=protocol, dest_ports=dest_ports, source_ports=source_ports, tenant=TEST_TENANT) icmp_entry = self.icmpServiceApi.build_entry( 'icmp_entry', srv_id, 'icmp_entry', icmp_type=icmp_type, tenant=TEST_TENANT) ip_entry = self.ipServiceApi.build_entry( 'ip_entry', srv_id, 'ip_entry', protocol_number=protocol_number, tenant=TEST_TENANT) with mock.patch.object(self.policy_api, "get", return_value={}),\ mock.patch.object(self.policy_api, "create_with_parent") as update_call: self.resourceApi.update( srv_id, name=name, description=description, entries=[l4_entry, icmp_entry, ip_entry], tags=tags, tenant=TEST_TENANT) service_def = core_defs.ServiceDef( service_id=srv_id, name=name, description=description, tags=tags, tenant=TEST_TENANT) self.assert_called_with_defs( update_call, [service_def, l4_entry, icmp_entry, ip_entry]) class TestPolicyCommunicationMap(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyCommunicationMap, self).setUp() self.resourceApi = self.policy_lib.comm_map self.mapDef = core_defs.CommunicationMapDef self.entryDef = core_defs.CommunicationMapEntryDef self.resource_type = 'SecurityPolicy' self.path_name = 'security-policies' def test_create_another(self): domain_id = '111' map_id = '222' name = 'cm1' description = 'desc' source_group = 'g1' dest_group = 'g2' seq_num = 7 map_seq_num = 10 service_id = 'c1' direction = nsx_constants.IN_OUT get_return_value = {'rules': [{'sequence_number': 1}]} with mock.patch.object(self.policy_api, "create_with_parent") as api_call,\ mock.patch.object(self.policy_api, "get", return_value=get_return_value): result = self.resourceApi.create_or_overwrite( name, domain_id, map_id=map_id, description=description, sequence_number=seq_num, service_ids=[service_id], source_groups=[source_group], dest_groups=[dest_group], direction=direction, logged=True, map_sequence_number=map_seq_num, tenant=TEST_TENANT) map_def = self.mapDef( domain_id=domain_id, map_id=map_id, name=name, description=description, category=constants.CATEGORY_APPLICATION, map_sequence_number=map_seq_num, tenant=TEST_TENANT) entry_def = self.entryDef( domain_id=domain_id, map_id=map_id, entry_id='entry', name=name, action=constants.ACTION_ALLOW, description=description, sequence_number=seq_num, service_ids=[service_id], source_groups=[source_group], dest_groups=[dest_group], direction=direction, logged=True, tenant=TEST_TENANT) self.assert_called_with_defs(api_call, [map_def, entry_def]) self.assertEqual(map_id, result) def test_create_first_seqnum(self): domain_id = '111' map_id = '222' name = 'cm1' description = 'desc' source_group = 'g1' dest_group = 'g2' service_id = 'c1' category = 'Emergency' get_return_value = {'rules': []} with mock.patch.object(self.policy_api, "create_with_parent") as api_call, \ mock.patch.object(self.resourceApi, "get", return_value=get_return_value): result = self.resourceApi.create_or_overwrite( name, domain_id, map_id=map_id, description=description, service_ids=[service_id], source_groups=[source_group], dest_groups=[dest_group], category=category, logged=False, tenant=TEST_TENANT) map_def = self.mapDef( domain_id=domain_id, map_id=map_id, name=name, description=description, category=category, tenant=TEST_TENANT) entry_def = self.entryDef( domain_id=domain_id, map_id=map_id, entry_id='entry', name=name, action=constants.ACTION_ALLOW, direction=nsx_constants.IN_OUT, description=description, sequence_number=1, service_ids=[service_id], source_groups=[source_group], dest_groups=[dest_group], logged=False, tenant=TEST_TENANT) self.assert_called_with_defs(api_call, [map_def, entry_def]) self.assertEqual(map_id, result) def test_create_without_seqnum(self): domain_id = '111' name = 'cm1' description = 'desc' source_group = 'g1' dest_group = 'g2' service1_id = 'c1' service2_id = 'c2' with mock.patch.object(self.policy_api, "create_with_parent") as api_call: result = self.resourceApi.create_or_overwrite( name, domain_id, description=description, service_ids=[service1_id, service2_id], source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) expected_map_def = self.mapDef( domain_id=domain_id, map_id=mock.ANY, name=name, description=description, category=constants.CATEGORY_APPLICATION, tenant=TEST_TENANT) expected_entry_def = self.entryDef( domain_id=domain_id, map_id=mock.ANY, entry_id=mock.ANY, action=constants.ACTION_ALLOW, direction=nsx_constants.IN_OUT, name=name, description=description, sequence_number=1, service_ids=[service1_id, service2_id], source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) self.assert_called_with_defs( api_call, [expected_map_def, expected_entry_def]) self.assertIsNotNone(result) def test_create_map_only(self): domain_id = '111' name = 'cm1' description = 'desc' map_seq_num = 10 with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite_map_only( name, domain_id, description=description, map_sequence_number=map_seq_num, tenant=TEST_TENANT) expected_map_def = self.mapDef( domain_id=domain_id, map_id=mock.ANY, name=name, description=description, category=constants.CATEGORY_APPLICATION, map_sequence_number=map_seq_num, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_map_def) self.assertIsNotNone(result) def test_create_entry(self): domain_id = '111' map_id = '333' name = 'cm1' description = 'desc' source_group = 'g1' dest_group = 'g2' service1_id = 'c1' service2_id = 'c2' tag = 'abc1234' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_entry( name=name, domain_id=domain_id, map_id=map_id, description=description, service_ids=[service1_id, service2_id], source_groups=[source_group], dest_groups=[dest_group], sequence_number=1, direction=nsx_constants.IN, ip_protocol=nsx_constants.IPV4, tag=tag, tenant=TEST_TENANT) expected_entry_def = self.entryDef( domain_id=domain_id, map_id=map_id, entry_id=mock.ANY, name=name, action=constants.ACTION_ALLOW, description=description, sequence_number=1, service_ids=[service1_id, service2_id], source_groups=[source_group], dest_groups=[dest_group], direction=nsx_constants.IN, ip_protocol=nsx_constants.IPV4, scope=None, logged=False, tag=tag, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_entry_def) self.assertIsNotNone(result) def test_create_entry_no_service(self): domain_id = '111' map_id = '333' name = 'cm1' description = 'desc' source_group = 'g1' dest_group = 'g2' tag = 'abc1234' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_entry( name, domain_id, map_id, description=description, source_groups=[source_group], dest_groups=[dest_group], sequence_number=1, tag=tag, tenant=TEST_TENANT) expected_entry_def = self.entryDef( domain_id=domain_id, map_id=map_id, entry_id=mock.ANY, name=name, action=constants.ACTION_ALLOW, direction=nsx_constants.IN_OUT, ip_protocol=nsx_constants.IPV4_IPV6, description=description, sequence_number=1, service_ids=None, source_groups=[source_group], dest_groups=[dest_group], scope=None, logged=False, tag=tag, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_entry_def) self.assertIsNotNone(result) def test_create_entry_no_seq_num(self): domain_id = '111' map_id = '333' name = 'cm1' description = 'desc' source_group = 'g1' dest_group = 'g2' service1_id = 'c1' service2_id = 'c2' seq_num = 1 ret_comm = {'rules': [{'sequence_number': seq_num}]} tag = 'abc1234' with mock.patch.object(self.policy_api, "create_or_update") as api_call,\ mock.patch.object(self.policy_api, "get", return_value=ret_comm): result = self.resourceApi.create_entry( name, domain_id, map_id, description=description, service_ids=[service1_id, service2_id], source_groups=[source_group], dest_groups=[dest_group], logged=False, tag=tag, tenant=TEST_TENANT) expected_entry_def = self.entryDef( domain_id=domain_id, map_id=map_id, entry_id=mock.ANY, name=name, action=constants.ACTION_ALLOW, direction=nsx_constants.IN_OUT, ip_protocol=nsx_constants.IPV4_IPV6, description=description, service_ids=[service1_id, service2_id], source_groups=[source_group], dest_groups=[dest_group], sequence_number=seq_num + 1, scope=None, logged=False, tag=tag, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_entry_def) self.assertIsNotNone(result) def test_create_with_entries(self): domain_id = '111' map_id = '222' name = 'cm1' description = 'desc' source_group = 'g1' dest_group = 'g2' service_id = 'c1' category = 'Emergency' ip_protocol = nsx_constants.IPV4 map_seq_num = 10 rule_id = 1 entry1 = self.resourceApi.build_entry( 'DHCP Reply', domain_id, map_id, rule_id, sequence_number=rule_id, service_ids=[service_id], action=constants.ACTION_DENY, source_groups=None, dest_groups=[dest_group], direction=nsx_constants.IN, ip_protocol=ip_protocol) self.assertEqual(rule_id, entry1.get_id()) rule_id += 1 entry2 = self.resourceApi.build_entry( 'DHCP Request', domain_id, map_id, rule_id, sequence_number=rule_id, service_ids=None, action=constants.ACTION_DENY, source_groups=[source_group], dest_groups=None, direction=nsx_constants.OUT, ip_protocol=ip_protocol) self.assertEqual(rule_id, entry2.get_id()) with mock.patch.object(self.policy_api, "create_with_parent") as api_call: result = self.resourceApi.create_with_entries( name, domain_id, map_id=map_id, description=description, entries=[entry1, entry2], category=category, map_sequence_number=map_seq_num, tenant=TEST_TENANT) expected_def = self.mapDef( domain_id=domain_id, map_id=map_id, name=name, description=description, category=category, map_sequence_number=map_seq_num, tenant=TEST_TENANT) self.assert_called_with_defs(api_call, [expected_def, entry1, entry2]) self.assertEqual(map_id, result) def test_create_with_entries_no_id(self): domain_id = '111' map_id = '222' name = 'cm1' description = 'desc' source_group = 'g1' dest_group = 'g2' service_id = 'c1' category = 'Emergency' ip_protocol = nsx_constants.IPV4 map_seq_num = 10 rule_id = 1 entry1 = self.resourceApi.build_entry( 'DHCP Reply', domain_id, map_id, sequence_number=rule_id, service_ids=[service_id], action=constants.ACTION_DENY, source_groups=None, dest_groups=[dest_group], direction=nsx_constants.IN, ip_protocol=ip_protocol) self.assertIsNotNone(entry1.get_id()) rule_id += 1 entry2 = self.resourceApi.build_entry( 'DHCP Request', domain_id, map_id, sequence_number=rule_id, service_ids=None, action=constants.ACTION_DENY, source_groups=[source_group], dest_groups=None, direction=nsx_constants.OUT, ip_protocol=ip_protocol) self.assertIsNotNone(entry2.get_id()) with mock.patch.object(self.policy_api, "create_with_parent") as api_call: result = self.resourceApi.create_with_entries( name, domain_id, map_id=map_id, description=description, entries=[entry1, entry2], category=category, map_sequence_number=map_seq_num, tenant=TEST_TENANT) expected_def = self.mapDef( domain_id=domain_id, map_id=map_id, name=name, description=description, category=category, map_sequence_number=map_seq_num, tenant=TEST_TENANT) self.assert_called_with_defs(api_call, [expected_def, entry1, entry2]) self.assertEqual(map_id, result) def test_delete(self): domain_id = '111' map_id = '222' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(domain_id, map_id, tenant=TEST_TENANT) expected_def = self.mapDef( domain_id=domain_id, map_id=map_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_delete_entry(self): domain_id = '111' map_id = '222' entry_id = '333' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete_entry(domain_id, map_id, entry_id, tenant=TEST_TENANT) expected_def = self.entryDef( domain_id=domain_id, map_id=map_id, entry_id=entry_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): domain_id = '111' map_id = '222' with mock.patch.object(self.policy_api, "get", return_value={'id': map_id}) as api_call: result = self.resourceApi.get(domain_id, map_id, tenant=TEST_TENANT) expected_def = self.mapDef( domain_id=domain_id, map_id=map_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(map_id, result['id']) def test_get_entry(self): domain_id = '111' map_id = '222' entry_id = '333' with mock.patch.object(self.policy_api, "get", return_value={'id': entry_id}) as api_call: result = self.resourceApi.get_entry(domain_id, map_id, entry_id, tenant=TEST_TENANT) expected_def = self.entryDef( domain_id=domain_id, map_id=map_id, entry_id=entry_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(entry_id, result['id']) def test_get_by_name(self): domain_id = '111' name = 'cm1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(domain_id, name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = self.mapDef( domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): domain_id = '111' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(domain_id, tenant=TEST_TENANT) expected_def = self.mapDef( domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): domain_id = '111' map_id = '222' name = 'new name' description = 'new desc' source_group = 'ng1' dest_group = 'ng2' service1_id = 'nc1' service2_id = 'nc2' category = constants.CATEGORY_EMERGENCY with mock.patch.object(self.policy_api, "get", return_value={}),\ mock.patch.object(self.resourceApi, "get", return_value={'category': category}),\ mock.patch.object(self.policy_api, "create_with_parent") as update_call: self.resourceApi.update(domain_id, map_id, name=name, description=description, service_ids=[service1_id, service2_id], source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) map_def = self.mapDef( domain_id=domain_id, map_id=map_id, name=name, description=description, category=category, tenant=TEST_TENANT) entry_def = self.entryDef( domain_id=domain_id, map_id=map_id, entry_id='entry', service_ids=[service1_id, service2_id], source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) self.assert_called_with_defs(update_call, [map_def, entry_def]) def test_update_entry(self): domain_id = '111' map_id = '222' entry_id = 'entry' name = 'new name' description = 'new desc' source_group = 'ng1' dest_group = 'ng2' service1_id = 'nc1' service2_id = 'nc2' with mock.patch.object(self.policy_api, "get", return_value={}),\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update_entry( domain_id, map_id, entry_id, name=name, description=description, service_ids=[service1_id, service2_id], source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) entry_def = self.entryDef( domain_id=domain_id, map_id=map_id, entry_id=entry_id, name=name, description=description, service_ids=[service1_id, service2_id], source_groups=[source_group], dest_groups=[dest_group], tenant=TEST_TENANT) self.assert_called_with_def(update_call, entry_def) def test_update_entries(self): domain_id = '111' map_id = '222' entries = "fake_entries" with mock.patch.object(self.resourceApi, "update_with_entries") as update_call: self.resourceApi.update_entries( domain_id, map_id, entries, tenant=TEST_TENANT) update_call.assert_called_once_with( domain_id, map_id, entries, category=constants.CATEGORY_APPLICATION, tenant=TEST_TENANT) def test_update_with_entries(self): domain_id = '111' map_id = '222' entry1_id = 'entry1' entry2_id = 'entry2' entry3_id = 'entry3' entry1 = self.entryDef( domain_id=domain_id, map_id=map_id, entry_id=entry1_id, scope=['new_scope1'], tenant=TEST_TENANT) entry2 = self.entryDef( domain_id=domain_id, map_id=map_id, entry_id=entry2_id, scope=['scope2'], tenant=TEST_TENANT) original_map = { 'id': map_id, 'resource_type': self.resource_type, 'category': constants.CATEGORY_APPLICATION, 'display_name': 'map_name', 'rules': [ {'id': entry1_id, 'resource_type': 'Rule', 'display_name': 'name1', 'scope': ['scope1']}, {'id': entry2_id, 'resource_type': 'Rule', 'display_name': 'name2', 'scope': ['scope2']}, {'id': entry3_id, 'resource_type': 'Rule', 'display_name': 'name3', 'scope': ['scope3']}]} updated_map = { 'id': map_id, 'resource_type': self.resource_type, 'category': constants.CATEGORY_APPLICATION, 'display_name': 'new_map_name', 'rules': [ {'id': entry1_id, 'resource_type': 'Rule', 'display_name': 'name1', 'scope': ['new_scope1']}, {'id': entry2_id, 'resource_type': 'Rule', 'display_name': 'name2', 'scope': ['scope2']}]} map_def = self.mapDef( domain_id=domain_id, map_id=map_id, tenant=TEST_TENANT) with mock.patch.object(self.policy_api, "get", return_value=original_map),\ mock.patch.object(self.policy_api.client, "update") as update_call: self.resourceApi.update_with_entries( domain_id, map_id, entries=[entry1, entry2], name='new_map_name', tenant=TEST_TENANT) update_call.assert_called_once_with( map_def.get_resource_path(), updated_map) def test_update_with_entries_for_IGNORE_entries(self): domain_id = '111' map_id = '222' entry1_id = 'entry1' entry2_id = 'entry2' entry3_id = 'entry3' original_map = { 'id': map_id, 'resource_type': self.resource_type, 'category': constants.CATEGORY_APPLICATION, 'display_name': 'map_name', 'rules': [ {'id': entry1_id, 'resource_type': 'Rule', 'display_name': 'name1', 'scope': ['scope1'], '_created_time': 1}, {'id': entry2_id, 'resource_type': 'Rule', 'display_name': 'name2', 'scope': ['scope2']}, {'id': entry3_id, 'resource_type': 'Rule', 'display_name': 'name3', 'scope': ['scope3']}]} updated_map = { 'id': map_id, 'resource_type': self.resource_type, 'category': constants.CATEGORY_APPLICATION, 'display_name': 'new_map_name', 'rules': [ {'id': entry1_id, 'resource_type': 'Rule', 'display_name': 'name1', 'scope': ['scope1'], '_created_time': 1}, {'id': entry2_id, 'resource_type': 'Rule', 'display_name': 'name2', 'scope': ['scope2']}, {'id': entry3_id, 'resource_type': 'Rule', 'display_name': 'name3', 'scope': ['scope3']}]} map_def = self.mapDef( domain_id=domain_id, map_id=map_id, tenant=TEST_TENANT) with mock.patch.object(self.policy_api, "get", return_value=original_map),\ mock.patch.object(self.policy_api.client, "update") as update_call: self.resourceApi.update_with_entries( domain_id, map_id, name='new_map_name', tenant=TEST_TENANT) update_call.assert_called_once_with( map_def.get_resource_path(), updated_map) def test_unset(self): name = 'hello' domain_id = 'test' map_id = '111' dest_groups = ['/infra/stuff'] category = constants.CATEGORY_EMERGENCY # Until policy PATCH is fixed to accept partial update, we # call get on child entry with mock.patch.object( self.policy_api, "get", return_value={'display_name': name, 'source_groups': ['/infra/other/stuff'], 'destination_groups': dest_groups}),\ mock.patch.object(self.resourceApi, "get", return_value={'category': category}): self.resourceApi.update(domain_id, map_id, description=None, source_groups=None, service_ids=None, tenant=TEST_TENANT) expected_body = {'id': map_id, 'description': None, 'category': category, 'resource_type': self.resource_type, 'rules': [{ 'display_name': name, 'id': 'entry', 'resource_type': 'Rule', 'services': ["ANY"], 'source_groups': ["ANY"], 'destination_groups': dest_groups}] } url = '%s/domains/%s/%s/%s' % (TEST_TENANT, domain_id, self.path_name, map_id) self.assert_json_call('PATCH', self.client, url, data=expected_body) def test_update_entries_logged(self): domain_id = '111' map_id = '222' dummy_map = {'rules': [{'logged': False}]} updated_map = {'rules': [{'logged': True}]} map_def = self.mapDef( domain_id=domain_id, map_id=map_id, tenant=TEST_TENANT) with mock.patch.object(self.policy_api, "get", return_value=dummy_map),\ mock.patch.object(self.policy_api.client, "update") as update_call: self.resourceApi.update_entries_logged( domain_id, map_id, logged=True, tenant=TEST_TENANT) update_call.assert_called_once_with( map_def.get_resource_path(), updated_map) def test_get_realized(self): domain_id = 'd1' map_id = '111' result = [{'state': constants.STATE_REALIZED, 'entity_type': 'RealizedFirewallSection'}] with mock.patch.object( self.policy_api, "get_realized_entities", return_value=result) as api_get: state = self.resourceApi.get_realized_state( domain_id, map_id, tenant=TEST_TENANT) self.assertEqual(constants.STATE_REALIZED, state) path = "/%s/domains/%s/%s/%s" % ( TEST_TENANT, domain_id, self.path_name, map_id) api_get.assert_called_once_with(path, silent=False) class TestPolicyGatewayPolicy(TestPolicyCommunicationMap): def setUp(self, *args, **kwargs): super(TestPolicyGatewayPolicy, self).setUp() self.resourceApi = self.policy_lib.gateway_policy self.mapDef = core_defs.GatewayPolicyDef self.entryDef = core_defs.GatewayPolicyRuleDef self.resource_type = 'GatewayPolicy' self.path_name = 'gateway-policies' class TestPolicyEnforcementPoint(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyEnforcementPoint, self).setUp() self.resourceApi = self.policy_lib.enforcement_point def test_create(self): name = 'ep' description = 'desc' ip_address = '1.1.1.1' username = 'admin' password = 'zzz' thumbprint = 'abc' edge_cluster_id = 'ec1' transport_zone_id = 'tz1' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, ip_address=ip_address, thumbprint=thumbprint, username=username, password=password, edge_cluster_id=edge_cluster_id, transport_zone_id=transport_zone_id, tenant=TEST_TENANT) expected_def = core_defs.EnforcementPointDef( ep_id=mock.ANY, name=name, description=description, ip_address=ip_address, username=username, thumbprint=thumbprint, password=password, edge_cluster_id=edge_cluster_id, transport_zone_id=transport_zone_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): ef_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(ef_id, tenant=TEST_TENANT) expected_def = core_defs.EnforcementPointDef(ep_id=ef_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): ef_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': ef_id}) as api_call: result = self.resourceApi.get(ef_id, tenant=TEST_TENANT) expected_def = core_defs.EnforcementPointDef(ep_id=ef_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(ef_id, result['id']) def test_get_by_name(self): name = 'ep1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.EnforcementPointDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.EnforcementPointDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): ef_id = '111' name = 'new name' username = 'admin' password = 'zzz' ip_address = '1.1.1.1' thumbprint = 'abc' edge_cluster_id = 'ec1' transport_zone_id = 'tz1' entry = {'id': ef_id, 'connection_info': {'thumbprint': thumbprint, 'resource_type': 'NSXTConnectionInfo'}} with mock.patch.object(self.policy_api, "create_or_update") as update_call,\ mock.patch.object(self.policy_api, "get", return_value=entry): self.resourceApi.update(ef_id, name=name, username=username, password=password, ip_address=ip_address, edge_cluster_id=edge_cluster_id, transport_zone_id=transport_zone_id, tenant=TEST_TENANT) expected_def = core_defs.EnforcementPointDef( ep_id=ef_id, name=name, username=username, password=password, ip_address=ip_address, thumbprint=thumbprint, edge_cluster_id=edge_cluster_id, transport_zone_id=transport_zone_id, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_get_realized(self): ep_id = 'ef1' result = [{'state': constants.STATE_REALIZED}] with mock.patch.object( self.policy_api, "get_realized_entities", return_value=result) as api_get: state = self.resourceApi.get_realized_state( ep_id, tenant=TEST_TENANT) self.assertEqual(constants.STATE_REALIZED, state) path = "/%s/sites/default/enforcement-points/%s" % ( TEST_TENANT, ep_id) api_get.assert_called_once_with(path, silent=False) def test_reload(self): ef_id = '111' with mock.patch.object(self.policy_api.client, "url_post") as api_post: self.resourceApi.reload(ef_id, tenant=TEST_TENANT) expected_def = core_defs.EnforcementPointDef(ep_id=ef_id, tenant=TEST_TENANT) api_post.assert_called_once_with( expected_def.get_resource_path() + '?action=reload', None, expected_results=None, headers=None) class TestPolicyDeploymentMap(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyDeploymentMap, self).setUp() self.resourceApi = self.policy_lib.deployment_map def test_create(self): name = 'map1' description = 'desc' domain_id = 'domain1' ep_id = 'ep1' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, ep_id=ep_id, domain_id=domain_id, tenant=TEST_TENANT) expected_def = core_defs.DeploymentMapDef( map_id=mock.ANY, name=name, description=description, ep_id=ep_id, domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' domain_id = 'domain1' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, domain_id=domain_id, tenant=TEST_TENANT) expected_def = core_defs.DeploymentMapDef(map_id=obj_id, domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' domain_id = 'domain1' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, domain_id=domain_id, tenant=TEST_TENANT) expected_def = core_defs.DeploymentMapDef(map_id=obj_id, domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'ep1' domain_id = 'domain1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, domain_id=domain_id, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.DeploymentMapDef(domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): domain_id = 'domain1' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(domain_id=domain_id, tenant=TEST_TENANT) expected_def = core_defs.DeploymentMapDef(domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' domain_id = 'domain2' ep_id = 'ep2' with self.mock_get(domain_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, ep_id=ep_id, domain_id=domain_id, tenant=TEST_TENANT) expected_def = core_defs.DeploymentMapDef( map_id=obj_id, name=name, ep_id=ep_id, domain_id=domain_id, tenant=TEST_TENANT) self.assert_called_with_def( update_call, expected_def) class TestPolicyTransportZone(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyTransportZone, self).setUp() self.resourceApi = self.policy_lib.transport_zone def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = core_defs.TransportZoneDef(tz_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_with_cache(self): """Verify that cache is used for GET""" obj_id = '111' with mock.patch.object(self.policy_api.client, "get") as client_get: self.resourceApi.get(obj_id, tenant=TEST_TENANT) self.resourceApi.get(obj_id, tenant=TEST_TENANT) self.assertEqual(1, client_get.call_count) def test_get_by_name(self): name = 'tz1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.TransportZoneDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_tz_type(self): obj_id = '111' tz_type = self.resourceApi.TZ_TYPE_OVERLAY with mock.patch.object(self.policy_api, "get", return_value={'tz_type': tz_type}) as api_call: actual_tz_type = self.resourceApi.get_tz_type( obj_id, tenant=TEST_TENANT) expected_def = core_defs.TransportZoneDef(tz_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(tz_type, actual_tz_type) def test_get_transport_type(self): obj_id = '111' tz_type = self.resourceApi.TZ_TYPE_OVERLAY with mock.patch.object(self.policy_api, "get", return_value={'tz_type': tz_type}) as api_call: actual_tz_type = self.resourceApi.get_transport_type( obj_id, tenant=TEST_TENANT) expected_def = core_defs.TransportZoneDef(tz_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(nsx_constants.TRANSPORT_TYPE_OVERLAY, actual_tz_type) def test_get_switch_mode(self): obj_id = '111' tz_type = self.resourceApi.TZ_TYPE_OVERLAY with mock.patch.object(self.policy_api, "get", return_value={'tz_type': tz_type}) as api_call: actual_sm = self.resourceApi.get_host_switch_mode( obj_id, tenant=TEST_TENANT) expected_def = core_defs.TransportZoneDef(tz_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(nsx_constants.HOST_SWITCH_MODE_STANDARD, actual_sm) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.TransportZoneDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) class TestPolicyEdgeCluster(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyEdgeCluster, self).setUp() self.resourceApi = self.policy_lib.edge_cluster def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = core_defs.EdgeClusterDef(ec_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'tz1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.EdgeClusterDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.EdgeClusterDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_get_nodes(self): obj_id = '111' node_id = 'node1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'id': node_id}]}) as api_call: result = self.resourceApi.get_edge_node_ids( obj_id, tenant=TEST_TENANT) expected_def = core_defs.EdgeClusterNodeDef( ec_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([node_id], result) class TestPolicyMetadataProxy(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyMetadataProxy, self).setUp() self.resourceApi = self.policy_lib.md_proxy def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = core_defs.MetadataProxyDef(mdproxy_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'tz1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.MetadataProxyDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.MetadataProxyDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) class TestPolicyTier1(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyTier1, self).setUp(*args, **kwargs) self.resourceApi = self.policy_lib.tier1 self.partial_updates = True def test_create(self): name = 'test' description = 'desc' tier0_id = '111' pool_alloc_type = 'LB_SMALL' route_adv = self.resourceApi.build_route_advertisement( lb_vip=True, lb_snat=True) ipv6_profile_id = '222' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tier0=tier0_id, force_whitelisting=True, route_advertisement=route_adv, pool_allocation=pool_alloc_type, ipv6_ndra_profile_id=ipv6_profile_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def( nsx_version=self.policy_lib.get_version(), tier1_id=mock.ANY, name=name, description=description, tier0=tier0_id, force_whitelisting=True, failover_mode=constants.NON_PREEMPTIVE, route_advertisement=route_adv, pool_allocation=pool_alloc_type, ipv6_ndra_profile_id=ipv6_profile_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_create_no_ipv6_profile(self): name = 'test' description = 'desc' tier0_id = '111' pool_alloc_type = 'LB_SMALL' route_adv = self.resourceApi.build_route_advertisement( lb_vip=True, lb_snat=True) with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tier0=tier0_id, force_whitelisting=True, route_advertisement=route_adv, pool_allocation=pool_alloc_type, ipv6_ndra_profile_id=None, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def( nsx_version=self.policy_lib.get_version(), tier1_id=mock.ANY, name=name, description=description, tier0=tier0_id, force_whitelisting=True, failover_mode=constants.NON_PREEMPTIVE, route_advertisement=route_adv, pool_allocation=pool_alloc_type, ipv6_ndra_profile_id=None, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def(tier1_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def(tier1_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_path(self): obj_id = '111' result = self.resourceApi.get_path(obj_id, tenant=TEST_TENANT) self.assertEqual('/%s/tier-1s/%s' % (TEST_TENANT, obj_id), result) def test_get_with_no_cache(self): """Make sure cache is not used for GET requests""" obj_id = '111' with mock.patch.object(self.policy_api.client, "get") as client_get: self.resourceApi.get(obj_id, tenant=TEST_TENANT) self.resourceApi.get(obj_id, tenant=TEST_TENANT) self.assertEqual(2, client_get.call_count) def test_get_by_name(self): name = 'test' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.Tier1Def(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.Tier1Def(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' tier0 = 'tier0' pool_alloc_type = 'LB_SMALL' with self.mock_get(obj_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, tier0=tier0, enable_standby_relocation=False, pool_allocation=pool_alloc_type, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def( nsx_version=self.policy_lib.get_version(), tier1_id=obj_id, name=name, tier0=tier0, enable_standby_relocation=False, pool_allocation=pool_alloc_type, tenant=TEST_TENANT) self.assert_called_with_def( update_call, expected_def) def test_update_ignore_tier0(self): obj_id = '111' name = 'new name' with mock.patch.object(self.policy_api, "get", return_value={}),\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(obj_id, name=name, enable_standby_relocation=False, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def(tier1_id=obj_id, name=name, enable_standby_relocation=False, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) # make sure tier0 is not in the body actual_def = update_call.call_args_list[0][0][0] self.assertNotIn('tier0_path', actual_def.get_obj_dict()) def test_update_unset_tier0(self): obj_id = '111' name = 'new name' description = 'abc' with mock.patch.object(self.policy_api, "get", return_value={}),\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(obj_id, name=name, description=description, tier0=None, enable_standby_relocation=False, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def(tier1_id=obj_id, name=name, description=description, tier0=None, enable_standby_relocation=False, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) # make sure tier0 is in the body with value None actual_def = update_call.call_args_list[0][0][0] self.assertIn('tier0_path', actual_def.get_obj_dict()) self.assertEqual("", actual_def.get_obj_dict()['tier0_path']) def test_update_route_adv(self): obj_id = '111' rtr_name = 'rtr111' get_result = {'id': obj_id, 'display_name': rtr_name, 'route_advertisement_types': ['TIER1_NAT', 'TIER1_LB_VIP']} with mock.patch.object(self.policy_api, "get", return_value=get_result),\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update_route_advertisement( obj_id, static_routes=True, lb_vip=False, lb_snat=True, ipsec_endpoints=True, tenant=TEST_TENANT) new_adv = self.resourceApi.build_route_advertisement( nat=True, static_routes=True, lb_snat=True, ipsec_endpoints=True) expected_def = core_defs.Tier1Def( tier1_id=obj_id, route_advertisement=new_adv, tenant=TEST_TENANT) if not self.partial_updates: expected_def.attrs['name'] = rtr_name self.assert_called_with_def( update_call, expected_def) def test_update_route_adv_and_tier0(self): obj_id = '111' rtr_name = 'rtr111' tier0 = 'tier0-id' get_result = {'id': obj_id, 'display_name': rtr_name, 'route_advertisement_types': ['TIER1_NAT', 'TIER1_LB_VIP']} with mock.patch.object(self.policy_api, "get", return_value=get_result),\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update_route_advertisement( obj_id, static_routes=True, lb_vip=False, lb_snat=True, tier0=tier0, tenant=TEST_TENANT) new_adv = self.resourceApi.build_route_advertisement( nat=True, static_routes=True, lb_snat=True) expected_def = core_defs.Tier1Def( tier1_id=obj_id, route_advertisement=new_adv, tier0=tier0, tenant=TEST_TENANT) if not self.partial_updates: expected_def.attrs['name'] = rtr_name self.assert_called_with_def( update_call, expected_def) def test_set_enable_standby_relocation(self): obj_id = '111' name = 'new name' tier0 = 'tier0' with mock.patch.object(self.policy_api, "get", return_value={}),\ mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.update(obj_id, name=name, tier0=tier0, enable_standby_relocation=True, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def(tier1_id=obj_id, name=name, tier0=tier0, enable_standby_relocation=True, tenant=TEST_TENANT) self.assert_called_with_def( update_call, expected_def) def test_wait_until_realized_fail(self): tier1_id = '111' logical_router_id = 'realized_111' info = {'state': constants.STATE_UNREALIZED, 'realization_specific_identifier': logical_router_id, 'entity_type': 'RealizedLogicalRouter'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationTimeoutError, self.resourceApi.wait_until_realized, tier1_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) def test_wait_until_realized_succeed(self): tier1_id = '111' logical_router_id = 'realized_111' info = {'state': constants.STATE_REALIZED, 'realization_specific_identifier': logical_router_id, 'entity_type': 'RealizedLogicalRouter'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): actual_info = self.resourceApi.wait_until_realized( tier1_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) self.assertEqual(info, actual_info) def test_update_transport_zone(self): # Test the passthrough api tier1_id = '111' logical_router_id = 'realized_111' tz_uuid = 'dummy_tz' info = {'state': constants.STATE_REALIZED, 'entity_type': 'RealizedLogicalRouter', 'realization_specific_identifier': logical_router_id} passthrough_mock = self.resourceApi.nsx_api.logical_router.update with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info) as realization,\ mock.patch.object(self.resourceApi, "_get_realized_id_using_search", return_value=logical_router_id): self.resourceApi.update_transport_zone(tier1_id, tz_uuid, tenant=TEST_TENANT) realization.assert_called_once() passthrough_mock.assert_called_once_with( logical_router_id, transport_zone_id=tz_uuid) def test_wait_until_realized(self): tier1_id = '111' logical_router_id = 'realized_111' info = {'state': constants.STATE_UNREALIZED, 'realization_specific_identifier': logical_router_id} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationTimeoutError, self.resourceApi.wait_until_realized, tier1_id, tenant=TEST_TENANT, max_attempts=5, sleep=0.1) def test_get_realized_downlink_port(self): tier1_id = '111' segment_id = '222' lrp_id = '333' info = {'state': constants.STATE_REALIZED, 'realization_specific_identifier': lrp_id, 'entity_type': 'RealizedLogicalRouterPort'} dummy_port = {'resource_type': nsx_constants.LROUTERPORT_DOWNLINK, 'id': lrp_id, 'display_name': 'test_%s' % segment_id} with mock.patch.object(self.resourceApi.policy_api, "get_realized_entities", return_value=[info]),\ mock.patch.object(self.resourceApi.nsx_api.logical_router_port, "get", return_value=dummy_port): actual_id = self.resourceApi._get_realized_downlink_port( tier1_id, segment_id) self.assertEqual(lrp_id, actual_id) def test_set_dhcp_relay(self): tier1_id = '111' segment_id = '222' lrp_id = '333' relay_id = '444' info = {'state': constants.STATE_REALIZED, 'realization_specific_identifier': lrp_id, 'entity_type': 'RealizedLogicalRouterPort'} dummy_port = {'resource_type': nsx_constants.LROUTERPORT_DOWNLINK, 'id': lrp_id, 'display_name': 'test_%s' % segment_id} with mock.patch.object(self.resourceApi.policy_api, "get_realized_entities", return_value=[info]),\ mock.patch.object(self.resourceApi.nsx_api.logical_router_port, "get", return_value=dummy_port),\ mock.patch.object(self.resourceApi.nsx_api.logical_router_port, "update") as nsx_lrp_update: self.resourceApi.set_dhcp_relay(tier1_id, segment_id, relay_id) nsx_lrp_update.assert_called_once_with( lrp_id, relay_service_uuid=relay_id) def test_get_locale_tier1_services(self): tier1_id = '111' path = 'dummy/path' mock_result = [{'edge_cluster_path': path}, {'test': 'test'}] with mock.patch.object(self.policy_api, "list", return_value={'results': mock_result}): self.assertEqual( self.resourceApi.get_locale_tier1_services(tier1_id), mock_result) def test_get_edge_cluster_by_searching(self): tier1_id = '111' path = 'dummy/path' with mock.patch.object(self.resourceApi, "get_locale_tier1_services", return_value=[{'edge_cluster_path': path}, {'test': 'test'}]): result = self.resourceApi.get_edge_cluster_path_by_searching( tier1_id, tenant=TEST_TENANT) self.assertEqual(path, result) def test_get_edge_cluster(self): tier1_id = '111' path = 'dummy/path' with mock.patch.object(self.policy_api, "get", return_value={'edge_cluster_path': path}): result = self.resourceApi.get_edge_cluster_path( tier1_id, tenant=TEST_TENANT) self.assertEqual(path, result) def test_set_edge_cluster(self): tier1_id = '111' path = 'dummy/path' with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.set_edge_cluster_path( tier1_id, path, tenant=TEST_TENANT) expected_def = core_defs.Tier1LocaleServiceDef( tier1_id=tier1_id, service_id=self.resourceApi._locale_service_id(tier1_id), edge_cluster_path=path, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_remove_edge_cluster(self): tier1_id = '111' with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.remove_edge_cluster( tier1_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1LocaleServiceDef( tier1_id=tier1_id, service_id=self.resourceApi._locale_service_id(tier1_id), edge_cluster_path="", tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_create_locale_service(self): tier1_id = '111' with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.create_locale_service( tier1_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1LocaleServiceDef( tier1_id=tier1_id, service_id=self.resourceApi._locale_service_id(tier1_id), tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_delete_locale_service(self): tier1_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete_locale_service( tier1_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1LocaleServiceDef( tier1_id=tier1_id, service_id=self.resourceApi._locale_service_id(tier1_id), tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_add_router_interface(self): tier1_id = '111' interface_id = 'seg-if' segment_id = 'seg' ip_addr = '1.1.1.1' prefix_len = '24' ndra_profile = 'slaac' subnet = core_defs.InterfaceSubnet([ip_addr], prefix_len) with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.add_segment_interface( tier1_id, interface_id, segment_id, subnets=[subnet], ipv6_ndra_profile_id=ndra_profile, tenant=TEST_TENANT) expected_def = core_defs.Tier1InterfaceDef( tier1_id=tier1_id, service_id=self.resourceApi._locale_service_id(tier1_id), interface_id=interface_id, segment_id=segment_id, subnets=[subnet], ipv6_ndra_profile_id=ndra_profile, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_add_route_interface_subnet_as_dict(self): tier1_id = '111' interface_id = 'seg-if' segment_id = 'seg' ip_addr = '1.1.1.1' prefix_len = '24' ndra_profile = 'slaac' subnet = {'ip_addresses': ip_addr, 'prefix_len': prefix_len} with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.add_segment_interface( tier1_id, interface_id, segment_id, subnets=[subnet], ipv6_ndra_profile_id=ndra_profile, tenant=TEST_TENANT) expected_def = core_defs.Tier1InterfaceDef( tier1_id=tier1_id, service_id=self.resourceApi._locale_service_id(tier1_id), interface_id=interface_id, segment_id=segment_id, subnets=[subnet], ipv6_ndra_profile_id=ndra_profile, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_add_router_interface_no_ndra(self): tier1_id = '111' interface_id = 'seg-if' segment_id = 'seg' ip_addr = '1.1.1.1' prefix_len = '24' subnet = core_defs.InterfaceSubnet([ip_addr], prefix_len) with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.add_segment_interface( tier1_id, interface_id, segment_id, subnets=[subnet], tenant=TEST_TENANT) expected_def = core_defs.Tier1InterfaceDef( tier1_id=tier1_id, service_id=self.resourceApi._locale_service_id(tier1_id), interface_id=interface_id, segment_id=segment_id, subnets=[subnet], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_remove_router_interface(self): tier1_id = '111' interface_id = 'seg-if' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.remove_segment_interface( tier1_id, interface_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1InterfaceDef( tier1_id=tier1_id, service_id=self.resourceApi._locale_service_id(tier1_id), interface_id=interface_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_add_advertisement_rule(self): tier1_id = '111' rule_name = 'rule_name' rule_action = 'rule_action' rule_pfx_operator = 'GE' rule_adv_types = ['A'] rule_subnets = ['x', 'y', 'z'] with mock.patch.object(self.policy_api, "get", return_value={'id': tier1_id, 'resource_type': 'Tier1'}),\ mock.patch.object(self.policy_api, 'create_or_update') as api_call: self.resourceApi.add_advertisement_rule( tier1_id, rule_name, action=rule_action, prefix_operator=rule_pfx_operator, route_advertisement_types=rule_adv_types, subnets=rule_subnets, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def( tier1_id=tier1_id, route_advertisement_rules=[ core_defs.RouteAdvertisementRule( rule_name, action=rule_action, prefix_operator=rule_pfx_operator, route_advertisement_types=rule_adv_types, subnets=rule_subnets)], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_remove_advertisement_rule(self): tier1_id = '111' rule_name = 'rule_name' get_retval = { 'id': tier1_id, 'route_advertisement_rules': [{'name': rule_name}]} with mock.patch.object(self.policy_api, "get", return_value=get_retval),\ mock.patch.object(self.policy_api, 'create_or_update') as api_call: self.resourceApi.remove_advertisement_rule( tier1_id, rule_name, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def( tier1_id=tier1_id, route_advertisement_rules=[], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_update_advertisement_rules(self): tier1_id = '111' old_rule = 'old' new_rule = 'new' get_retval = { 'id': tier1_id, 'route_advertisement_rules': [{'name': old_rule}]} rules = [{'name': new_rule}] with mock.patch.object(self.policy_api, "get", return_value=get_retval),\ mock.patch.object(self.policy_api, 'create_or_update') as api_call: self.resourceApi.update_advertisement_rules( tier1_id, rules, name_prefix=None, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def( tier1_id=tier1_id, route_advertisement_rules=rules, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_update_advertisement_rules_with_replace(self): tier1_id = '111' old_rule1 = 'old1' old_rule2 = 'old2' new_rule = 'new' get_retval = { 'id': tier1_id, 'route_advertisement_rules': [ {'name': old_rule1}, {'name': old_rule2}]} rules = [{'name': new_rule}] with mock.patch.object(self.policy_api, "get", return_value=get_retval),\ mock.patch.object(self.policy_api, 'create_or_update') as api_call: self.resourceApi.update_advertisement_rules( tier1_id, rules, name_prefix='old1', tenant=TEST_TENANT) expected_def = core_defs.Tier1Def( tier1_id=tier1_id, route_advertisement_rules=[ {'name': old_rule2}, {'name': new_rule}], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_create_with_unsupported_attr(self): name = 'test' description = 'test_version_support' tier0_id = 'tier0' pool_alloc_type = 'LB_SMALL' route_adv = self.resourceApi.build_route_advertisement( lb_vip=True, lb_snat=True) with mock.patch.object( self.policy_api, "create_or_update") as api_call, \ mock.patch.object(self.resourceApi, 'version', '0.0.0'): result = self.resourceApi.create_or_overwrite( name, description=description, tier0=tier0_id, force_whitelisting=True, route_advertisement=route_adv, pool_allocation=pool_alloc_type, tenant=TEST_TENANT) expected_def = core_defs.Tier1Def( tier1_id=mock.ANY, name=name, description=description, tier0=tier0_id, force_whitelisting=True, failover_mode=constants.NON_PREEMPTIVE, route_advertisement=route_adv, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) class TestPolicyTier1NoPassthrough(TestPolicyTier1): def setUp(self, *args, **kwargs): super(TestPolicyTier1NoPassthrough, self).setUp( allow_passthrough=False) # No passthrough also means no partial updates self.partial_updates = False def test_update_transport_zone(self): # Will not work without passthrough api tier1_id = '111' tz_uuid = 'dummy_tz' with mock.patch.object(self.resourceApi, "_get_realization_info") as realization: self.resourceApi.update_transport_zone(tier1_id, tz_uuid, tenant=TEST_TENANT) realization.assert_not_called() def test_get_realized_downlink_port(self): # Will not work without passthrough api tier1_id = '111' segment_id = '222' with mock.patch.object(self.resourceApi.policy_api, "get_realized_entities") as realization: actual_id = self.resourceApi._get_realized_downlink_port( tier1_id, segment_id) self.assertIsNone(actual_id) realization.assert_not_called() def test_set_dhcp_relay(self): # Will not work without passthrough api tier1_id = '111' segment_id = '222' relay_id = '444' with mock.patch.object(self.resourceApi.policy_api, "get_realized_entities") as realization: self.resourceApi.set_dhcp_relay(tier1_id, segment_id, relay_id) realization.assert_not_called() class TestPolicyTier0NatRule(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyTier0NatRule, self).setUp() self.resourceApi = self.policy_lib.tier0_nat_rule def test_create(self): name = 'test' description = 'desc' tier0_id = '111' nat_rule_id = 'rule1' action = constants.NAT_ACTION_SNAT firewall_match = constants.NAT_FIREWALL_MATCH_INTERNAL cidr1 = '1.1.1.1/32' cidr2 = '2.2.2.0/24' enabled = True with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, tier0_id, nat_rule_id=nat_rule_id, description=description, action=action, translated_network=cidr1, source_network=cidr2, firewall_match=firewall_match, tenant=TEST_TENANT, enabled=enabled) expected_def = core_defs.Tier0NatRule( tier0_id=tier0_id, nat_rule_id=nat_rule_id, nat_id=self.resourceApi.DEFAULT_NAT_ID, name=name, description=description, action=action, translated_network=cidr1, source_network=cidr2, firewall_match=firewall_match, tenant=TEST_TENANT, enabled=enabled) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): tier0_id = '111' nat_rule_id = 'rule1' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete( tier0_id, nat_rule_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0NatRule( tier0_id=tier0_id, nat_rule_id=nat_rule_id, nat_id=self.resourceApi.DEFAULT_NAT_ID, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): tier0_id = '111' nat_rule_id = 'rule1' with mock.patch.object(self.policy_api, "get") as api_call: mock_t0_nat_rule = mock.Mock() api_call.return_value = mock_t0_nat_rule result = self.resourceApi.get(tier0_id, nat_rule_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0NatRule( tier0_id=tier0_id, nat_rule_id=nat_rule_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(mock_t0_nat_rule, result) def test_update(self): name = 'test' description = 'desc' tier0_id = '111' nat_rule_id = 'rule1' action = constants.NAT_ACTION_SNAT firewall_match = constants.NAT_FIREWALL_MATCH_EXTERNAL cidr1 = '1.1.1.1/32' cidr2 = '2.2.2.0/24' enabled = True with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.update( tier0_id, nat_rule_id, name=name, description=description, action=action, translated_network=cidr1, firewall_match=firewall_match, source_network=cidr2, tenant=TEST_TENANT, enabled=enabled) expected_def = core_defs.Tier0NatRule( tier0_id=tier0_id, nat_rule_id=nat_rule_id, nat_id=self.resourceApi.DEFAULT_NAT_ID, name=name, description=description, action=action, translated_network=cidr1, firewall_match=firewall_match, source_network=cidr2, tenant=TEST_TENANT, enabled=enabled) self.assert_called_with_def(api_call, expected_def) class TestPolicyTier1NatRule(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyTier1NatRule, self).setUp() self.resourceApi = self.policy_lib.tier1_nat_rule def test_create(self): name = 'test' description = 'desc' tier1_id = '111' nat_rule_id = 'rule1' action = constants.NAT_ACTION_SNAT firewall_match = constants.NAT_FIREWALL_MATCH_INTERNAL cidr1 = '1.1.1.1/32' cidr2 = '2.2.2.0/24' enabled = True with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, tier1_id, nat_rule_id=nat_rule_id, description=description, action=action, translated_network=cidr1, firewall_match=firewall_match, source_network=cidr2, tenant=TEST_TENANT, enabled=enabled) expected_def = core_defs.Tier1NatRule( tier1_id=tier1_id, nat_rule_id=nat_rule_id, nat_id=self.resourceApi.DEFAULT_NAT_ID, name=name, description=description, action=action, translated_network=cidr1, firewall_match=firewall_match, source_network=cidr2, tenant=TEST_TENANT, enabled=enabled) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): tier1_id = '111' nat_rule_id = 'rule1' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete( tier1_id, nat_rule_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1NatRule( tier1_id=tier1_id, nat_rule_id=nat_rule_id, nat_id=self.resourceApi.DEFAULT_NAT_ID, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_update(self): name = 'test' description = 'desc' tier1_id = '111' nat_rule_id = 'rule1' action = constants.NAT_ACTION_SNAT firewall_match = constants.NAT_FIREWALL_MATCH_INTERNAL cidr1 = '1.1.1.1/32' cidr2 = '2.2.2.0/24' enabled = True with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.update( tier1_id, nat_rule_id, name=name, description=description, action=action, translated_network=cidr1, firewall_match=firewall_match, source_network=cidr2, tenant=TEST_TENANT, enabled=enabled) expected_def = core_defs.Tier1NatRule( tier1_id=tier1_id, nat_rule_id=nat_rule_id, nat_id=self.resourceApi.DEFAULT_NAT_ID, name=name, description=description, action=action, translated_network=cidr1, firewall_match=firewall_match, source_network=cidr2, tenant=TEST_TENANT, enabled=enabled) self.assert_called_with_def(api_call, expected_def) class TestPolicyTier1StaticRoute(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyTier1StaticRoute, self).setUp() self.resourceApi = self.policy_lib.tier1_static_route def test_create(self): name = 'test' description = 'desc' tier1_id = '111' static_route_id = '222' network = '1.1.1.1/24' nexthop = '2.2.2.2' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, tier1_id, static_route_id=static_route_id, description=description, network=network, next_hop=nexthop, tenant=TEST_TENANT) expected_def = core_defs.Tier1StaticRoute( tier1_id=tier1_id, static_route_id=static_route_id, name=name, description=description, network=network, next_hop=nexthop, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): tier1_id = '111' static_route_id = '222' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete( tier1_id, static_route_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1StaticRoute( tier1_id=tier1_id, static_route_id=static_route_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): tier1_id = '111' static_route_id = '222' with mock.patch.object(self.policy_api, "get") as api_call: mock_get = mock.Mock() api_call.return_value = mock_get result = self.resourceApi.get( tier1_id, static_route_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1StaticRoute( tier1_id=tier1_id, static_route_id=static_route_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(mock_get, result) class TestPolicyTier0(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyTier0, self).setUp() self.resourceApi = self.policy_lib.tier0 def test_create(self): name = 'test' description = 'desc' dhcp_config = '111' subnets = ["2.2.2.0/24"] ipv6_profile_id = '222' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, dhcp_config=dhcp_config, force_whitelisting=True, default_rule_logging=True, transit_subnets=subnets, ipv6_ndra_profile_id=ipv6_profile_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0Def( tier0_id=mock.ANY, name=name, description=description, dhcp_config=dhcp_config, default_rule_logging=True, force_whitelisting=True, ha_mode=constants.ACTIVE_ACTIVE, failover_mode=constants.NON_PREEMPTIVE, transit_subnets=subnets, ipv6_ndra_profile_id=ipv6_profile_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0Def(tier0_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0Def(tier0_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_path(self): obj_id = '111' result = self.resourceApi.get_path(obj_id, tenant=TEST_TENANT) self.assertEqual('/%s/tier-0s/%s' % (TEST_TENANT, obj_id), result) def test_get_with_cache(self): """Make sure the cache is used for GET requests""" obj_id = '111' with mock.patch.object(self.policy_api.client, "get") as client_get: self.resourceApi.get(obj_id, tenant=TEST_TENANT) self.resourceApi.get(obj_id, tenant=TEST_TENANT) self.assertEqual(1, client_get.call_count) def test_get_by_name(self): name = 'test' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.Tier0Def(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.Tier0Def(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' with self.mock_get(obj_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, tenant=TEST_TENANT) expected_def = core_defs.Tier0Def(tier0_id=obj_id, name=name, tenant=TEST_TENANT) self.assert_called_with_def( update_call, expected_def) def test_get_overlay_transport_zone(self): # Test the passthrough api tier0_id = '111' logical_router_id = 'realized_111' info = {'state': constants.STATE_REALIZED, 'entity_type': 'RealizedLogicalRouter', 'realization_specific_identifier': logical_router_id} pt_mock = self.resourceApi.nsx_api.router.get_tier0_router_overlay_tz with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info) as realization: result = self.resourceApi.get_overlay_transport_zone( tier0_id, tenant=TEST_TENANT) realization.assert_called_once() pt_mock.assert_called_once_with(logical_router_id) self.assertIsNotNone(result) def test_wait_until_realized(self): tier1_id = '111' logical_router_id = 'realized_111' info = {'state': constants.STATE_UNREALIZED, 'realization_specific_identifier': logical_router_id} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationTimeoutError, self.resourceApi.wait_until_realized, tier1_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) def test_get_uplink_ips(self): tier0_id = '111' ip_addr = '5.5.5.5' interface = {'id': '222', 'type': 'EXTERNAL', 'subnets': [{'ip_addresses': [ip_addr]}]} with mock.patch.object(self.resourceApi.policy_api, "list", return_value={'results': [interface]}): uplink_ips = self.resourceApi.get_uplink_ips( tier0_id, tenant=TEST_TENANT) self.assertEqual([ip_addr], uplink_ips) def test_get_transport_zones(self): # Test the passthrough api tier0_id = '111' logical_router_id = 'realized_111' info = {'state': constants.STATE_REALIZED, 'entity_type': 'RealizedLogicalRouter', 'realization_specific_identifier': logical_router_id} pt_mock = self.resourceApi.nsx_api.router.get_tier0_router_tz with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info) as realization: result = self.resourceApi.get_transport_zones( tier0_id, tenant=TEST_TENANT) realization.assert_called_once() pt_mock.assert_called_once_with(logical_router_id) self.assertIsNotNone(result) def test_get_bgp_config(self): tier0_id = '111' services = {'results': [{'id': 'service1'}]} bgp_config = {"id": "bgp", "enabled": True} with mock.patch.object(self.resourceApi.policy_api, "get", return_value=bgp_config), \ mock.patch.object(self.resourceApi.policy_api, "list", return_value=services): result = self.resourceApi.get_bgp_config( tier0_id, tenant=TEST_TENANT) self.assertEqual(result, bgp_config) def test_build_route_redistribution_rule(self): name = "rule_name" types = ["T1_CONNECTED", "T1_SEGMENT"] route_map_path = "/infra/route_map_path" rule = self.resourceApi.build_route_redistribution_rule( name, types, route_map_path) self.assertEqual(name, rule.name) self.assertEqual(types, rule.route_redistribution_types) self.assertEqual(route_map_path, rule.route_map_path) def test_build_route_redistribution_config(self): enabled = True rules = ["redistribution_types"] config = self.resourceApi.build_route_redistribution_config( enabled, rules) self.assertEqual(enabled, config.enabled) self.assertEqual(rules, config.redistribution_rules) def test_get_route_redistribution_config(self): tier0_id = '111' config = 'redistribution_config' with mock.patch.object( self.resourceApi, "get_locale_services", return_value=[{'route_redistribution_config': config}]): result = self.resourceApi.get_route_redistribution_config( tier0_id, tenant=TEST_TENANT) self.assertEqual(config, result) def test_update_route_redistribution_config(self): tier0_id = '111' service_id = '222' config = 'redistribution_config' with mock.patch.object( self.policy_api, "create_or_update") as api_call: self.resourceApi.update_route_redistribution_config( tier0_id, config, service_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0LocaleServiceDef( nsx_version='3.0.0', tier0_id=tier0_id, service_id=service_id, route_redistribution_config=config, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) with mock.patch.object(self.resourceApi, "get_locale_services", return_value=[]): self.assertRaises( nsxlib_exc.ManagerError, self.resourceApi.update_route_redistribution_config, tier0_id, config, tenant=TEST_TENANT) def test_feature_supported(self): with mock.patch.object(self.policy_lib, "get_version", return_value='2.5.0'): self.assertFalse( self.policy_lib.feature_supported( nsx_constants.FEATURE_ROUTE_REDISTRIBUTION_CONFIG)) with mock.patch.object(self.policy_lib, "get_version", return_value='3.0.0'): self.assertTrue( self.policy_lib.feature_supported( nsx_constants.FEATURE_ROUTE_REDISTRIBUTION_CONFIG)) class TestPolicyTier1Segment(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyTier1Segment, self).setUp() self.resourceApi = self.policy_lib.tier1_segment def test_create(self): name = 'test' description = 'desc' tier1_id = '111' ip_pool_id = 'external-ip-pool' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tier1_id=tier1_id, ip_pool_id=ip_pool_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1SegmentDef( segment_id=mock.ANY, name=name, description=description, tier1_id=tier1_id, ip_pool_id=ip_pool_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): tier1_id = '111' segment_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(tier1_id, segment_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1SegmentDef( tier1_id=tier1_id, segment_id=segment_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): tier1_id = '111' segment_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': segment_id}) as api_call: result = self.resourceApi.get(tier1_id, segment_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1SegmentDef( tier1_id=tier1_id, segment_id=segment_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(segment_id, result['id']) def test_list(self): tier1_id = '111' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tier1_id=tier1_id, tenant=TEST_TENANT) expected_def = core_defs.Tier1SegmentDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): tier1_id = '111' segment_id = '111' name = 'new name' with self.mock_get(tier1_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(segment_id=segment_id, tier1_id=tier1_id, name=name, tenant=TEST_TENANT) expected_def = core_defs.Tier1SegmentDef( tier1_id=tier1_id, segment_id=segment_id, name=name, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_build_subnet(self): gateway_address = "10.0.0.1/24" dhcp_ranges = None subnet = self.resourceApi.build_subnet( gateway_address=gateway_address, dhcp_ranges=dhcp_ranges) self.assertEqual(gateway_address, subnet.gateway_address) self.assertEqual(dhcp_ranges, subnet.dhcp_ranges) class TestPolicySegment(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicySegment, self).setUp() self.resourceApi = self.policy_lib.segment def _test_create(self, tier1_id=None, tier0_id=None, mdproxy=None, dhcp_server=None, admin_state=None): name = 'test' description = 'desc' subnets = [core_defs.Subnet(gateway_address="2.2.2.0/24")] kwargs = {'description': description, 'subnets': subnets, 'ip_pool_id': 'external-ip-pool', 'tenant': TEST_TENANT} if tier1_id: kwargs['tier1_id'] = tier1_id if tier0_id: kwargs['tier0_id'] = tier0_id if mdproxy: kwargs['metadata_proxy_id'] = mdproxy if dhcp_server: kwargs['dhcp_server_config_id'] = dhcp_server if admin_state: kwargs['admin_state'] = admin_state with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite(name, **kwargs) if admin_state: kwargs['admin_state'] = admin_state if 'UP' else 'DOWN' expected_def = core_defs.SegmentDef( nsx_version='3.0.0', segment_id=mock.ANY, name=name, **kwargs) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_create_with_t1(self): self._test_create(tier1_id='111') def test_create_with_t0(self): self._test_create(tier0_id='000') def test_create_with_t0_t1_fail(self): self.assertRaises(nsxlib_exc.InvalidInput, self.resourceApi.create_or_overwrite, 'seg-name', tier1_id='111', tier0_id='000') def test_create_with_mdproxy(self): self._test_create(mdproxy='md1') def test_create_with_dhcp_server_config(self): self._test_create(dhcp_server='dhcp1') def test_create_with_admin_state_up(self): self._test_create(admin_state=True) def test_create_with_admin_state_down(self): self._test_create(admin_state=False) def test_delete(self): segment_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(segment_id, tenant=TEST_TENANT) expected_def = core_defs.SegmentDef(segment_id=segment_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): segment_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': segment_id}) as api_call: result = self.resourceApi.get(segment_id, tenant=TEST_TENANT) expected_def = core_defs.SegmentDef(segment_id=segment_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(segment_id, result['id']) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.SegmentDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): segment_id = '111' name = 'new name' admin_state = False with self.mock_get(segment_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(segment_id, name=name, admin_state=admin_state, tenant=TEST_TENANT) expected_def = core_defs.SegmentDef(nsx_version='3.0.0', segment_id=segment_id, name=name, admin_state=admin_state, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_remove_connectivity_and_subnets(self): segment_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': segment_id}) as api_get,\ mock.patch.object(self.policy_api.client, "update") as api_put: self.resourceApi.remove_connectivity_and_subnets( segment_id, tenant=TEST_TENANT) api_get.assert_called_once() api_put.assert_called_once_with( '%s/segments/%s' % (TEST_TENANT, segment_id), {'id': segment_id, 'connectivity_path': None, 'subnets': None}) def test_build_subnet(self): gateway_address = "10.0.0.1/24" dhcp_ranges = None subnet = self.resourceApi.build_subnet( gateway_address=gateway_address, dhcp_ranges=dhcp_ranges) self.assertEqual(gateway_address, subnet.gateway_address) self.assertEqual(dhcp_ranges, subnet.dhcp_ranges) class TestPolicyIpPool(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyIpPool, self).setUp() self.resourceApi = self.policy_lib.ip_pool def test_create(self): name = 'test' description = 'desc' ip_pool_id = '111' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, ip_pool_id, description=description, tenant=TEST_TENANT) expected_def = core_defs.IpPoolDef( ip_pool_id=ip_pool_id, name=name, description=description, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(ip_pool_id, result) def test_delete(self): ip_pool_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(ip_pool_id, tenant=TEST_TENANT) expected_def = core_defs.IpPoolDef(ip_pool_id=ip_pool_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): ip_pool_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': ip_pool_id}) as api_call: result = self.resourceApi.get(ip_pool_id, tenant=TEST_TENANT) expected_def = core_defs.IpPoolDef(ip_pool_id=ip_pool_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(ip_pool_id, result['id']) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.IpPoolDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): ip_pool_id = '111' name = 'new name' with self.mock_get(ip_pool_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(ip_pool_id, name=name, tenant=TEST_TENANT) expected_def = core_defs.IpPoolDef(ip_pool_id=ip_pool_id, name=name, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_allocate_ip(self): ip_pool_id = '111' ip_allocation_id = 'alloc-id' with mock.patch.object(self.policy_api, "create_or_update") as update_call: self.resourceApi.allocate_ip(ip_pool_id, ip_allocation_id, tenant=TEST_TENANT) expected_def = core_defs.IpPoolAllocationDef( ip_pool_id=ip_pool_id, ip_allocation_id=ip_allocation_id, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_release_ip(self): ip_pool_id = '111' ip_allocation_id = 'alloc-id' with mock.patch.object(self.policy_api, "delete") as delete_call: self.resourceApi.release_ip(ip_pool_id, ip_allocation_id, tenant=TEST_TENANT) expected_def = core_defs.IpPoolAllocationDef( ip_pool_id=ip_pool_id, ip_allocation_id=ip_allocation_id, tenant=TEST_TENANT) self.assert_called_with_def(delete_call, expected_def) def test_allocate_block_subnet(self): ip_pool_id = '111' ip_block_id = 'block-id' size = 256 ip_subnet_id = 'subnet-id' start_ip = '192.168.1.0' with mock.patch.object( self.policy_api, "create_or_update") as api_call, \ mock.patch.object(self.resourceApi, 'version', '3.0.0'): self.resourceApi.allocate_block_subnet( ip_pool_id, ip_block_id, size, ip_subnet_id, tenant=TEST_TENANT, start_ip=start_ip) expected_def = core_defs.IpPoolBlockSubnetDef( nsx_version='3.0.0', ip_pool_id=ip_pool_id, ip_block_id=ip_block_id, ip_subnet_id=ip_subnet_id, size=size, tenant=TEST_TENANT, start_ip=start_ip) self.assert_called_with_def(api_call, expected_def) def test_allocate_block_subnet_with_unsupported_attribute(self): ip_pool_id = '111' ip_block_id = 'block-id' size = 256 ip_subnet_id = 'subnet-id' start_ip = '192.168.1.0' with mock.patch.object( self.policy_api, "create_or_update") as api_call, \ mock.patch.object(self.resourceApi, 'version', '2.5.0'): self.resourceApi.allocate_block_subnet( ip_pool_id, ip_block_id, size, ip_subnet_id, tenant=TEST_TENANT, start_ip=start_ip) expected_def = core_defs.IpPoolBlockSubnetDef( nsx_version='2.5.0', ip_pool_id=ip_pool_id, ip_block_id=ip_block_id, ip_subnet_id=ip_subnet_id, size=size, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_release_block_subnet(self): ip_pool_id = '111' ip_subnet_id = 'subnet-id' with mock.patch.object(self.policy_api, "delete") as delete_call: self.resourceApi.release_block_subnet(ip_pool_id, ip_subnet_id, tenant=TEST_TENANT) expected_def = core_defs.IpPoolBlockSubnetDef( ip_pool_id=ip_pool_id, ip_subnet_id=ip_subnet_id, tenant=TEST_TENANT) self.assert_called_with_def(delete_call, expected_def) def test_list_block_subnets(self): ip_pool_id = 'ip-pool-id' api_results = { 'results': [{'id': 'static_subnet_1', 'resource_type': 'IpAddressPoolStaticSubnet'}, {'id': 'block_subnet_2', 'resource_type': 'IpAddressPoolBlockSubnet'}] } with mock.patch.object( self.policy_api, "list", return_value=api_results) as api_call: result = self.resourceApi.list_block_subnets( ip_pool_id, tenant=TEST_TENANT) expected_def = core_defs.IpPoolBlockSubnetDef( ip_pool_id=ip_pool_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) expected_result = [{'id': 'block_subnet_2', 'resource_type': 'IpAddressPoolBlockSubnet'}] self.assertEqual(result, expected_result) def test_get_ip_subnet_realization_info(self): ip_pool_id = '111' ip_subnet_id = 'subnet-id' result = {'extended_attributes': [{'values': ['5.5.0.0/24'], 'key': 'cidr'}]} with mock.patch.object( self.resourceApi, "_get_realization_info", return_value=result) as api_get: self.resourceApi.get_ip_subnet_realization_info( ip_pool_id, ip_subnet_id, tenant=TEST_TENANT) api_get.assert_called_once() # Test with wait set to True with mock.patch.object( self.resourceApi, "_wait_until_realized", return_value=result) as api_get: self.resourceApi.get_ip_subnet_realization_info( ip_pool_id, ip_subnet_id, tenant=TEST_TENANT, wait=True) api_get.assert_called_once() def test_get_ip_block_subnet_cidr(self): ip_pool_id = '111' ip_subnet_id = 'subnet-id' result = {'extended_attributes': [{'values': ['5.5.0.0/24'], 'key': 'cidr'}]} with mock.patch.object( self.resourceApi, "_get_realization_info", return_value=result) as api_get: cidr = self.resourceApi.get_ip_block_subnet_cidr( ip_pool_id, ip_subnet_id, tenant=TEST_TENANT) self.assertEqual(['5.5.0.0/24'], cidr) api_get.assert_called_once() def test_get_ip_alloc_realization_info(self): ip_pool_id = '111' ip_allocation_id = 'alloc-id' result = {'extended_attributes': [{'values': ['5.5.0.8']}]} with mock.patch.object( self.resourceApi, "_get_realization_info", return_value=result) as api_get: self.resourceApi.get_ip_alloc_realization_info( ip_pool_id, ip_allocation_id, tenant=TEST_TENANT) api_get.assert_called_once() # Test with wait set to True with mock.patch.object( self.resourceApi, "_wait_until_realized", return_value=result) as api_get: self.resourceApi.get_ip_alloc_realization_info( ip_pool_id, ip_allocation_id, tenant=TEST_TENANT, wait=True) api_get.assert_called_once() def test_get_realized_allocated_ip(self): ip_pool_id = '111' ip_allocation_id = 'alloc-id' result = {'extended_attributes': [{'values': ['5.5.0.8']}]} with mock.patch.object( self.resourceApi, "_get_realization_info", return_value=result) as api_get: ip = self.resourceApi.get_realized_allocated_ip( ip_pool_id, ip_allocation_id, tenant=TEST_TENANT) self.assertEqual('5.5.0.8', ip) api_get.assert_called_once() def test_create_or_update_static_subnet(self): ip_pool_id = 'ip-pool-id' ip_subnet_id = 'static-subnet-id' cidr = '10.10.10.0/24' allocation_ranges = [{'start': '10.10.10.2', 'end': '10.10.10.250'}] with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.create_or_update_static_subnet( ip_pool_id, cidr, allocation_ranges, ip_subnet_id, tenant=TEST_TENANT) expected_def = core_defs.IpPoolStaticSubnetDef( ip_pool_id=ip_pool_id, cidr=cidr, allocation_ranges=allocation_ranges, ip_subnet_id=ip_subnet_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_release_static_subnet(self): ip_pool_id = 'ip-pool-id' ip_subnet_id = 'static-subnet-id' with mock.patch.object(self.policy_api, "delete") as delete_call: self.resourceApi.release_static_subnet( ip_pool_id, ip_subnet_id, tenant=TEST_TENANT) expected_def = core_defs.IpPoolStaticSubnetDef( ip_pool_id=ip_pool_id, ip_subnet_id=ip_subnet_id, tenant=TEST_TENANT) self.assert_called_with_def(delete_call, expected_def) def test_list_static_subnet(self): ip_pool_id = 'ip-pool-id' api_results = { 'results': [{'id': 'static_subnet_1', 'resource_type': 'IpAddressPoolStaticSubnet'}, {'id': 'block_subnet_2', 'resource_type': 'IpAddressPoolBlockSubnet'}] } with mock.patch.object( self.policy_api, "list", return_value=api_results) as api_call: result = self.resourceApi.list_static_subnets( ip_pool_id, tenant=TEST_TENANT) expected_def = core_defs.IpPoolStaticSubnetDef( ip_pool_id=ip_pool_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) expected_result = [{'id': 'static_subnet_1', 'resource_type': 'IpAddressPoolStaticSubnet'}] self.assertEqual(result, expected_result) def test_get_static_subnet(self): ip_pool_id = 'ip-pool-id' ip_subnet_id = 'static-subnet-id' with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get_static_subnet( ip_pool_id, ip_subnet_id, tenant=TEST_TENANT) expected_def = core_defs.IpPoolStaticSubnetDef( ip_pool_id=ip_pool_id, ip_subnet_id=ip_subnet_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get_realization_info(self): ip_pool_id = '111' with mock.patch.object( self.resourceApi, "_get_realization_info") as api_call: self.resourceApi.get_realization_info( ip_pool_id, tenant=TEST_TENANT) expected_def = core_defs.IpPoolDef( ip_pool_id=ip_pool_id, tenant=TEST_TENANT) self.assert_called_with_def_and_dict(api_call, expected_def, {}) def test_get_static_subnet_realization_info(self): ip_pool_id = 'ip-pool-id' ip_subnet_id = 'static-subnet-id' result = {'extended_attributes': [ {'values': '10.10.10.0/24', 'key': 'cidr'}, {'values': [{'value': '10.10.10.2', 'key': 'start'}, {'value': '10.10.10.250', 'key': 'end'}], 'key': 'allocation_ranges'}]} with mock.patch.object( self.resourceApi, "_get_realization_info", return_value=result) as api_get: self.resourceApi.get_ip_subnet_realization_info( ip_pool_id, ip_subnet_id, tenant=TEST_TENANT, subnet_type=constants.IPPOOL_STATIC_SUBNET) api_get.assert_called_once() # Test with wait set to True with mock.patch.object( self.resourceApi, "_wait_until_realized", return_value=result) as api_get: self.resourceApi.get_ip_subnet_realization_info( ip_pool_id, ip_subnet_id, tenant=TEST_TENANT, wait=True, subnet_type=constants.IPPOOL_STATIC_SUBNET) api_get.assert_called_once() def test_wait_until_realized_fail(self): ip_pool_id = 'p1' info = {'state': constants.STATE_UNREALIZED, 'realization_specific_identifier': ip_pool_id, 'entity_type': 'IpPool'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationTimeoutError, self.resourceApi.wait_until_realized, ip_pool_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) def test_wait_until_realized_error(self): ip_alloc_id = 'ip_alloc_1' error_code = 5109 error_msg = 'Insufficient free IP addresses.' info = {'state': constants.STATE_ERROR, 'realization_specific_identifier': ip_alloc_id, 'entity_type': 'AllocationIpAddress', 'alarms': [{ 'message': error_msg, 'error_details': { 'error_code': error_code, 'module_name': 'id-allocation service', 'error_message': error_msg } }]} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): with self.assertRaises(nsxlib_exc.RealizationErrorStateError) as e: self.resourceApi.wait_until_realized( ip_alloc_id, tenant=TEST_TENANT) self.assertTrue(e.exception.msg.endswith(error_msg)) self.assertEqual(e.exception.error_code, error_code) self.assertEqual(e.exception.related_error_codes, []) def test_wait_until_realized_succeed(self): ip_pool_id = 'p1' info = {'state': constants.STATE_REALIZED, 'realization_specific_identifier': ip_pool_id, 'entity_type': 'IpPool'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): actual_info = self.resourceApi.wait_until_realized( ip_pool_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) self.assertEqual(info, actual_info) class TestPolicySegmentPort(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicySegmentPort, self).setUp() self.resourceApi = self.policy_lib.segment_port def test_feature_supported(self): with mock.patch.object(self.policy_lib, "get_version", return_value='2.5.0'): self.assertFalse( self.policy_lib.feature_supported( nsx_constants.FEATURE_SWITCH_HYPERBUS_MODE)) with mock.patch.object(self.policy_lib, "get_version", return_value='3.0.0'): self.assertTrue( self.policy_lib.feature_supported( nsx_constants.FEATURE_SWITCH_HYPERBUS_MODE)) def test_create(self): name = 'test' description = 'desc' segment_id = "segment" address_bindings = [] attachment_type = "CHILD" vif_id = "vif" app_id = "app" context_id = "context" traffic_tag = 10 allocate_addresses = "BOTH" tags = [{'scope': 'a', 'tag': 'b'}] hyperbus_mode = 'DISABLE' admin_state = True with mock.patch.object( self.policy_api, "create_or_update") as api_call, \ mock.patch.object(self.resourceApi, 'version', '3.0.0'): result = self.resourceApi.create_or_overwrite( name, segment_id, description=description, address_bindings=address_bindings, attachment_type=attachment_type, vif_id=vif_id, app_id=app_id, context_id=context_id, traffic_tag=traffic_tag, allocate_addresses=allocate_addresses, hyperbus_mode=hyperbus_mode, admin_state=admin_state, tags=tags, tenant=TEST_TENANT) expected_def = core_defs.SegmentPortDef( nsx_version='3.0.0', segment_id=segment_id, port_id=mock.ANY, name=name, description=description, address_bindings=address_bindings, attachment_type=attachment_type, vif_id=vif_id, app_id=app_id, context_id=context_id, traffic_tag=traffic_tag, allocate_addresses=allocate_addresses, admin_state=admin_state, tags=tags, tenant=TEST_TENANT, hyperbus_mode=hyperbus_mode) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_create_with_unsupported_attribute(self): name = 'test' description = 'desc' segment_id = "segment" address_bindings = [] attachment_type = "CHILD" vif_id = "vif" app_id = "app" context_id = "context" traffic_tag = 10 allocate_addresses = "BOTH" tags = [{'scope': 'a', 'tag': 'b'}] hyperbus_mode = 'DISABLE' with mock.patch.object( self.policy_api, "create_or_update") as api_call, \ mock.patch.object(self.resourceApi, 'version', '0.0.0'): result = self.resourceApi.create_or_overwrite( name, segment_id, description=description, address_bindings=address_bindings, attachment_type=attachment_type, vif_id=vif_id, app_id=app_id, context_id=context_id, traffic_tag=traffic_tag, allocate_addresses=allocate_addresses, tags=tags, tenant=TEST_TENANT, hyperbus_mode=hyperbus_mode) expected_def = core_defs.SegmentPortDef( nsx_version=self.policy_lib.get_version(), segment_id=segment_id, port_id=mock.ANY, name=name, description=description, address_bindings=address_bindings, attachment_type=attachment_type, vif_id=vif_id, app_id=app_id, context_id=context_id, traffic_tag=traffic_tag, allocate_addresses=allocate_addresses, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_attach(self): segment_id = "segment" port_id = "port" attachment_type = "CHILD" vif_id = "vif" app_id = "app" context_id = "context" traffic_tag = 10 allocate_addresses = "BOTH" tags = [{'scope': 'a', 'tag': 'b'}] hyperbus_mode = 'DISABLE' with mock.patch.object( self.policy_api, "create_or_update") as api_call, \ mock.patch.object(self.resourceApi, 'version', '3.0.0'): self.resourceApi.attach( segment_id, port_id, attachment_type=attachment_type, vif_id=vif_id, app_id=app_id, context_id=context_id, traffic_tag=traffic_tag, allocate_addresses=allocate_addresses, hyperbus_mode=hyperbus_mode, tags=tags, tenant=TEST_TENANT) expected_def = core_defs.SegmentPortDef( nsx_version='3.0.0', segment_id=segment_id, port_id=port_id, attachment_type=attachment_type, vif_id=vif_id, app_id=app_id, context_id=context_id, traffic_tag=traffic_tag, allocate_addresses=allocate_addresses, hyperbus_mode=hyperbus_mode, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_detach(self): segment_id = "segment" port_id = "port" tags = [{'scope': 'a', 'tag': 'b'}] with mock.patch.object(self.policy_api, "create_or_update") as api_call: self.resourceApi.detach( segment_id, port_id, tags=tags, tenant=TEST_TENANT) expected_def = core_defs.SegmentPortDef( segment_id=segment_id, port_id=port_id, attachment_type=None, vif_id=None, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) class TestPolicySegmentProfileBase(NsxPolicyLibTestCase): def setUp(self, resource_api_name='segment_security_profile', resource_def=core_defs.SegmentSecurityProfileDef): super(TestPolicySegmentProfileBase, self).setUp() self.resourceApi = getattr(self.policy_lib, resource_api_name) self.resourceDef = resource_def def test_create(self): name = 'test' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, tenant=TEST_TENANT) expected_def = self.resourceDef( profile_id=mock.ANY, name=name, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): profile_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(profile_id, tenant=TEST_TENANT) expected_def = self.resourceDef(profile_id=profile_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): profile_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': profile_id}) as api_call: result = self.resourceApi.get(profile_id, tenant=TEST_TENANT) expected_def = self.resourceDef(profile_id=profile_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(profile_id, result['id']) def test_get_by_name(self): name = 'test' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = self.resourceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = self.resourceDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): profile_id = '111' name = 'new name' with self.mock_get(profile_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(profile_id, name=name, tenant=TEST_TENANT) expected_def = self.resourceDef(profile_id=profile_id, name=name, tenant=TEST_TENANT) self.assert_called_with_def( update_call, expected_def) class TestPolicyQosProfile(TestPolicySegmentProfileBase): def setUp(self): super(TestPolicyQosProfile, self).setUp( resource_api_name='qos_profile', resource_def=core_defs.QosProfileDef) def test_create_with_params(self): name = 'test' description = 'desc' dscp = self.resourceApi.build_dscp(trusted=False, priority=7) limiter = self.resourceApi.build_ingress_rate_limiter( average_bandwidth=700, enabled=True) with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, dscp=dscp, shaper_configurations=[limiter], tenant=TEST_TENANT) expected_def = self.resourceDef( profile_id=mock.ANY, name=name, description=description, dscp=dscp, shaper_configurations=[limiter], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) class TestPolicySpoofguardProfile(TestPolicySegmentProfileBase): def setUp(self): super(TestPolicySpoofguardProfile, self).setUp( resource_api_name='spoofguard_profile', resource_def=core_defs.SpoofguardProfileDef) class TestPolicyIpDiscoveryProfile(TestPolicySegmentProfileBase): def setUp(self): super(TestPolicyIpDiscoveryProfile, self).setUp( resource_api_name='ip_discovery_profile', resource_def=core_defs.IpDiscoveryProfileDef) class TestPolicyMacDiscoveryProfile(TestPolicySegmentProfileBase): def setUp(self): super(TestPolicyMacDiscoveryProfile, self).setUp( resource_api_name='mac_discovery_profile', resource_def=core_defs.MacDiscoveryProfileDef) class TestPolicyWAFProfile(TestPolicySegmentProfileBase): def setUp(self): super(TestPolicyWAFProfile, self).setUp( resource_api_name='waf_profile', resource_def=core_defs.WAFProfileDef) class TestPolicySegmentSecurityProfile(TestPolicySegmentProfileBase): def test_create_with_params(self): name = 'test' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, bpdu_filter_enable=True, dhcp_client_block_enabled=False, dhcp_client_block_v6_enabled=True, dhcp_server_block_enabled=False, dhcp_server_block_v6_enabled=True, non_ip_traffic_block_enabled=False, ra_guard_enabled=True, rate_limits_enabled=False, tenant=TEST_TENANT) expected_def = self.resourceDef( profile_id=mock.ANY, name=name, bpdu_filter_enable=True, dhcp_client_block_enabled=False, dhcp_client_block_v6_enabled=True, dhcp_server_block_enabled=False, dhcp_server_block_v6_enabled=True, non_ip_traffic_block_enabled=False, ra_guard_enabled=True, rate_limits_enabled=False, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) class TestPolicySegmentSecProfilesBinding(NsxPolicyLibTestCase): def setUp(self, resource_api_name='segment_security_profile_maps', resource_def=core_defs.SegmentSecProfilesBindingMapDef): super(TestPolicySegmentSecProfilesBinding, self).setUp() self.resourceApi = getattr(self.policy_lib, resource_api_name) self.resourceDef = resource_def def test_create(self): name = 'test' segment_id = 'seg1' prf1 = '1' prf2 = '2' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, segment_id, segment_security_profile_id=prf1, spoofguard_profile_id=prf2, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, map_id=core_resources.DEFAULT_MAP_ID, name=name, segment_security_profile_id=prf1, spoofguard_profile_id=prf2, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): segment_id = 'seg1' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(segment_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, map_id=core_resources.DEFAULT_MAP_ID, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): segment_id = 'seg1' with mock.patch.object(self.policy_api, "get", return_value={'id': segment_id}) as api_call: result = self.resourceApi.get(segment_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, map_id=core_resources.DEFAULT_MAP_ID, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(segment_id, result['id']) def test_list(self): segment_id = 'seg1' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(segment_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): name = 'new name' segment_id = 'seg1' prf1 = '1' prf2 = '2' with self.mock_get(segment_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update( segment_id=segment_id, name=name, segment_security_profile_id=prf1, spoofguard_profile_id=prf2, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, map_id=core_resources.DEFAULT_MAP_ID, name=name, segment_security_profile_id=prf1, spoofguard_profile_id=prf2, tenant=TEST_TENANT) self.assert_called_with_def( update_call, expected_def) class TestPolicySegmentPortSecProfilesBinding(NsxPolicyLibTestCase): def setUp(self, resource_api_name='segment_port_security_profiles', resource_def=core_defs.SegmentPortSecProfilesBindingMapDef): super(TestPolicySegmentPortSecProfilesBinding, self).setUp() self.resourceApi = getattr(self.policy_lib, resource_api_name) self.resourceDef = resource_def def test_create(self): name = 'test' segment_id = 'seg1' port_id = 'port1' prf1 = '1' prf2 = '2' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, segment_id, port_id, segment_security_profile_id=prf1, spoofguard_profile_id=prf2, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, name=name, segment_security_profile_id=prf1, spoofguard_profile_id=prf2, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): segment_id = 'seg1' port_id = 'port1' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(segment_id, port_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): segment_id = 'seg1' port_id = 'port1' with mock.patch.object(self.policy_api, "get", return_value={'id': port_id}) as api_call: result = self.resourceApi.get(segment_id, port_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(port_id, result['id']) def test_list(self): segment_id = 'seg1' port_id = 'port1' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(segment_id, port_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): name = 'new name' segment_id = 'seg1' port_id = 'port1' prf1 = '1' prf2 = '2' with self.mock_get(segment_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update( segment_id=segment_id, port_id=port_id, name=name, segment_security_profile_id=prf1, spoofguard_profile_id=prf2, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, name=name, segment_security_profile_id=prf1, spoofguard_profile_id=prf2, tenant=TEST_TENANT) self.assert_called_with_def( update_call, expected_def) class TestPolicySegmentDiscoveryProfilesBinding(NsxPolicyLibTestCase): def setUp( self, resource_api_name='segment_port_discovery_profiles', resource_def=core_defs.SegmentPortDiscoveryProfilesBindingMapDef): super(TestPolicySegmentDiscoveryProfilesBinding, self).setUp() self.resourceApi = getattr(self.policy_lib, resource_api_name) self.resourceDef = resource_def def test_create(self): name = 'test' segment_id = 'seg1' port_id = 'port1' prf1 = '1' prf2 = '2' with mock.patch.object(self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite( name, segment_id, port_id, mac_discovery_profile_id=prf1, ip_discovery_profile_id=prf2, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, name=name, mac_discovery_profile_id=prf1, ip_discovery_profile_id=prf2, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): segment_id = 'seg1' port_id = 'port1' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(segment_id, port_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): segment_id = 'seg1' port_id = 'port1' with mock.patch.object(self.policy_api, "get", return_value={'id': port_id}) as api_call: result = self.resourceApi.get(segment_id, port_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(port_id, result['id']) def test_list(self): segment_id = 'seg1' port_id = 'port1' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(segment_id, port_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): name = 'new name' segment_id = 'seg1' port_id = 'port1' prf1 = '1' prf2 = '2' with self.mock_get(segment_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update( segment_id=segment_id, port_id=port_id, name=name, mac_discovery_profile_id=prf1, ip_discovery_profile_id=prf2, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, name=name, mac_discovery_profile_id=prf1, ip_discovery_profile_id=prf2, tenant=TEST_TENANT) self.assert_called_with_def( update_call, expected_def) class TestPolicySegmentQosProfilesBinding(NsxPolicyLibTestCase): def setUp( self, resource_api_name='segment_port_qos_profiles', resource_def=core_defs.SegmentPortQoSProfilesBindingMapDef): super(TestPolicySegmentQosProfilesBinding, self).setUp() self.resourceApi = getattr(self.policy_lib, resource_api_name) self.resourceDef = resource_def def test_create(self): name = 'test' segment_id = 'seg1' port_id = 'port1' prf1 = '1' with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite( name, segment_id, port_id, qos_profile_id=prf1, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, name=name, qos_profile_id=prf1, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): segment_id = 'seg1' port_id = 'port1' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(segment_id, port_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): segment_id = 'seg1' port_id = 'port1' with mock.patch.object(self.policy_api, "get", return_value={'id': segment_id}) as api_call: result = self.resourceApi.get(segment_id, port_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(segment_id, result['id']) def test_list(self): segment_id = 'seg1' port_id = 'port1' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(segment_id, port_id, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): name = 'new name' segment_id = 'seg1' port_id = 'port1' prf1 = '1' with self.mock_get(segment_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update( segment_id=segment_id, port_id=port_id, name=name, qos_profile_id=prf1, tenant=TEST_TENANT) expected_def = self.resourceDef( segment_id=segment_id, port_id=port_id, map_id=core_resources.DEFAULT_MAP_ID, name=name, qos_profile_id=prf1, tenant=TEST_TENANT) self.assert_called_with_def( update_call, expected_def) class TestPolicyTier1SegmentPort(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyTier1SegmentPort, self).setUp() self.resourceApi = self.policy_lib.tier1_segment_port def test_create(self): name = 'test' tier1_id = 'tier1' description = 'desc' segment_id = "segment" address_bindings = [] attachment_type = "CHILD" vif_id = "vif" app_id = "app" context_id = "context" traffic_tag = 10 allocate_addresses = "BOTH" tags = [{'scope': 'a', 'tag': 'b'}] with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite( name, tier1_id, segment_id, description=description, address_bindings=address_bindings, attachment_type=attachment_type, vif_id=vif_id, app_id=app_id, context_id=context_id, traffic_tag=traffic_tag, allocate_addresses=allocate_addresses, tags=tags, tenant=TEST_TENANT) expected_def = core_defs.Tier1SegmentPortDef( segment_id=segment_id, tier1_id=tier1_id, port_id=mock.ANY, name=name, description=description, address_bindings=address_bindings, attachment_type=attachment_type, vif_id=vif_id, app_id=app_id, context_id=context_id, traffic_tag=traffic_tag, allocate_addresses=allocate_addresses, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_wait_until_realized_fail(self): tier1_id = '111' port_id = 'port-111' segment_id = 'seg-111' logical_port_id = 'realized_port_111' info = {'state': constants.STATE_UNREALIZED, 'realization_specific_identifier': logical_port_id, 'entity_type': 'RealizedLogicalPort'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationTimeoutError, self.resourceApi.wait_until_realized, tier1_id, segment_id, port_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) def test_wait_until_realized_error(self): tier1_id = '111' port_id = 'port-111' segment_id = 'seg-111' info = {'state': constants.STATE_ERROR, 'alarms': [{'message': 'dummy'}], 'entity_type': 'RealizedLogicalPort'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationErrorStateError, self.resourceApi.wait_until_realized, tier1_id, segment_id, port_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) def test_wait_until_realized_succeed(self): tier1_id = '111' port_id = 'port-111' segment_id = 'seg-111' logical_port_id = 'realized_port_111' info = {'state': constants.STATE_REALIZED, 'realization_specific_identifier': logical_port_id, 'entity_type': 'RealizedLogicalPort'} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): actual_info = self.resourceApi.wait_until_realized( tier1_id, segment_id, port_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) self.assertEqual(info, actual_info) class TestPolicySegmentDhcpStaticBinding(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicySegmentDhcpStaticBinding, self).setUp() self.resourceApi = self.policy_lib.segment_dhcp_static_bindings def test_create(self): """Create v4 static bindings""" name = 'test' description = 'desc' segment_id = "segment" ip_address = "1.1.1.1" mac_address = "fa:16:3e:44:56:df" with mock.patch.object( self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite_v4( name, segment_id, description=description, ip_address=ip_address, mac_address=mac_address, tenant=TEST_TENANT) expected_def = core_defs.DhcpV4StaticBindingConfig( segment_id=segment_id, binding_id=mock.ANY, name=name, description=description, ip_address=ip_address, mac_address=mac_address, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_create_v6(self): """Create v6 static bindings""" name = 'test' description = 'desc' segment_id = "segment" ip_address = "2000::01ab" mac_address = "fa:16:3e:44:56:df" with mock.patch.object( self.policy_api, "create_or_update") as api_call: result = self.resourceApi.create_or_overwrite_v6( name, segment_id, description=description, ip_addresses=[ip_address], mac_address=mac_address, tenant=TEST_TENANT) expected_def = core_defs.DhcpV6StaticBindingConfig( segment_id=segment_id, binding_id=mock.ANY, name=name, description=description, ip_addresses=[ip_address], mac_address=mac_address, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_list(self): segment_id = '111' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(segment_id, tenant=TEST_TENANT) expected_def = core_defs.DhcpV4StaticBindingConfig( segment_id=segment_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_delete(self): segment_id = '111' binding_id = '222' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(segment_id, binding_id, tenant=TEST_TENANT) expected_def = core_defs.DhcpV4StaticBindingConfig( segment_id=segment_id, binding_id=binding_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): segment_id = '111' binding_id = '222' with mock.patch.object(self.policy_api, "get", return_value={'id': binding_id}) as api_call: result = self.resourceApi.get(segment_id, binding_id, tenant=TEST_TENANT) expected_def = core_defs.DhcpV4StaticBindingConfig( segment_id=segment_id, binding_id=binding_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(binding_id, result['id']) class TestPolicyDhcpRelayConfig(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyDhcpRelayConfig, self).setUp() self.resourceApi = self.policy_lib.dhcp_relay_config def test_create(self): name = 'test' description = 'desc' server_addr = '1.1.1.1' with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, server_addresses=[server_addr], tenant=TEST_TENANT) expected_def = core_defs.DhcpRelayConfigDef( config_id=mock.ANY, name=name, description=description, server_addresses=[server_addr], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): config_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(config_id, tenant=TEST_TENANT) expected_def = core_defs.DhcpRelayConfigDef(config_id=config_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): config_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': config_id}) as api_call: result = self.resourceApi.get(config_id, tenant=TEST_TENANT) expected_def = core_defs.DhcpRelayConfigDef(config_id=config_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(config_id, result['id']) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.DhcpRelayConfigDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) class TestPolicyDhcpServerConfig(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyDhcpServerConfig, self).setUp() self.resourceApi = self.policy_lib.dhcp_server_config def test_create(self): name = 'test' description = 'desc' server_addr = '1.1.1.1' lease_time = 100 edge_cluster_path = 'dummy/path' tags = [{'scope': 'a', 'tag': 'b'}] with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, server_addresses=[server_addr], edge_cluster_path=edge_cluster_path, lease_time=lease_time, tags=tags, tenant=TEST_TENANT) expected_def = core_defs.DhcpServerConfigDef( config_id=mock.ANY, name=name, description=description, server_addresses=[server_addr], edge_cluster_path=edge_cluster_path, lease_time=lease_time, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): config_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(config_id, tenant=TEST_TENANT) expected_def = core_defs.DhcpServerConfigDef(config_id=config_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): config_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': config_id}) as api_call: result = self.resourceApi.get(config_id, tenant=TEST_TENANT) expected_def = core_defs.DhcpServerConfigDef(config_id=config_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(config_id, result['id']) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.DhcpServerConfigDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): name = 'test' description = 'desc' server_addr = '1.1.1.1' lease_time = 100 edge_cluster_path = 'dummy/path' tags = [{'scope': 'a', 'tag': 'b'}] config_id = 'aaa' with self.mock_create_update() as api_call: self.resourceApi.update( config_id, name=name, description=description, server_addresses=[server_addr], edge_cluster_path=edge_cluster_path, lease_time=lease_time, tags=tags, tenant=TEST_TENANT) expected_def = core_defs.DhcpServerConfigDef( config_id=mock.ANY, name=name, description=description, server_addresses=[server_addr], edge_cluster_path=edge_cluster_path, lease_time=lease_time, tags=tags, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) class TestPolicyCertificate(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyCertificate, self).setUp() self.resourceApi = self.policy_lib.certificate def test_create_with_id(self): name = 'd1' description = 'desc' obj_id = '111' pem_encoded = 'pem_encoded' private_key = 'private_key' passphrase = 'passphrase' key_algo = 'algo' with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite( name, certificate_id=obj_id, description=description, pem_encoded=pem_encoded, private_key=private_key, passphrase=passphrase, key_algo=key_algo, tenant=TEST_TENANT) expected_def = ( core_defs.CertificateDef( certificate_id=obj_id, name=name, description=description, pem_encoded=pem_encoded, private_key=private_key, passphrase=passphrase, key_algo=key_algo, tenant=TEST_TENANT)) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result) def test_create_without_id(self): name = 'd1' description = 'desc' pem_encoded = 'pem_encoded' with self.mock_create_update() as api_call: result = self.resourceApi.create_or_overwrite( name, description=description, tenant=TEST_TENANT, pem_encoded=pem_encoded) expected_def = ( core_defs.CertificateDef(certificate_id=mock.ANY, name=name, description=description, tenant=TEST_TENANT, pem_encoded=pem_encoded)) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): obj_id = '111' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(obj_id, tenant=TEST_TENANT) expected_def = core_defs.CertificateDef( certificate_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): obj_id = '111' with mock.patch.object(self.policy_api, "get", return_value={'id': obj_id}) as api_call: result = self.resourceApi.get(obj_id, tenant=TEST_TENANT) expected_def = core_defs.CertificateDef( certificate_id=obj_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(obj_id, result['id']) def test_get_by_name(self): name = 'd1' with mock.patch.object( self.policy_api, "list", return_value={'results': [{'display_name': name}]}) as api_call: obj = self.resourceApi.get_by_name(name, tenant=TEST_TENANT) self.assertIsNotNone(obj) expected_def = core_defs.CertificateDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tenant=TEST_TENANT) expected_def = core_defs.CertificateDef(tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): obj_id = '111' name = 'new name' description = 'new desc' pem_encoded = 'pem_encoded' private_key = 'private_key' passphrase = '12' key_algo = 'new_algo' with self.mock_get(obj_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(obj_id, name=name, description=description, tenant=TEST_TENANT, pem_encoded=pem_encoded, private_key=private_key, passphrase=passphrase, key_algo=key_algo) expected_def = core_defs.CertificateDef( certificate_id=obj_id, name=name, description=description, tenant=TEST_TENANT, pem_encoded=pem_encoded, private_key=private_key, passphrase=passphrase, key_algo=key_algo ) self.assert_called_with_def(update_call, expected_def) def test_wait_until_realized_fail(self): cert_id = 'test_cert' info = {'state': constants.STATE_UNREALIZED, 'realization_specific_identifier': cert_id} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): self.assertRaises(nsxlib_exc.RealizationTimeoutError, self.resourceApi.wait_until_realized, cert_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) def test_wait_until_realized_succeed(self): cert_id = 'test_cert' info = {'state': constants.STATE_REALIZED, 'realization_specific_identifier': cert_id} with mock.patch.object(self.resourceApi, "_get_realization_info", return_value=info): actual_info = self.resourceApi.wait_until_realized( cert_id, max_attempts=5, sleep=0.1, tenant=TEST_TENANT) self.assertEqual(info, actual_info) class TestPolicyExcludeList(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyExcludeList, self).setUp() self.resourceApi = self.policy_lib.exclude_list def test_create_or_overwrite(self): members = ["/infra/domains/default/groups/adit1"] with self.mock_create_update() as api_call: self.resourceApi.create_or_overwrite( members=members, tenant=TEST_TENANT) expected_def = core_defs.ExcludeListDef( members=members, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_delete(self): self.skipTest("The action is not supported by this resource") def test_get(self): with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get(tenant=TEST_TENANT) expected_def = core_defs.ExcludeListDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): self.skipTest("The action is not supported by this resource") def test_update(self): self.skipTest("The action is not supported by this resource") class TestPolicyTier0RouteMap(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyTier0RouteMap, self).setUp() self.resourceApi = self.policy_lib.tier0_route_map def test_create(self): name = 'route_map_test' tier0_id = 't0_test' with mock.patch.object(self.policy_api, "create_or_update") as api_call: # test with 'entries' entry = core_defs.RouteMapEntry('DENY') result = self.resourceApi.create_or_overwrite( name, tier0_id, entries=[entry], tenant=TEST_TENANT) expected_def = core_defs.Tier0RouteMapDef( tier0_id=tier0_id, route_map_id=mock.ANY, name=name, entries=[entry], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): tier0_id = 't0_test' route_map_id = 'route_map_test' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(tier0_id, route_map_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0RouteMapDef( tier0_id=tier0_id, route_map_id=route_map_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): tier0_id = 't0_test' route_map_id = 'route_map_test' entries = [] with mock.patch.object(self.policy_api, "get", return_value={'id': route_map_id}) as api_call: result = self.resourceApi.get(tier0_id, route_map_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0RouteMapDef( tier0_id=tier0_id, route_map_id=route_map_id, entries=entries, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(route_map_id, result['id']) def test_list(self): tier0_id = 't0_test' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tier0_id=tier0_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0RouteMapDef( tier0_id=tier0_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): tier0_id = 't0_test' route_map_id = 'route_map_test' name = 'new_name' entries = [] with self.mock_get(tier0_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(name, tier0_id, route_map_id, entries, tenant=TEST_TENANT) expected_def = core_defs.Tier0RouteMapDef( tier0_id=tier0_id, route_map_id=route_map_id, name=name, entries=entries, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_build_route_map_entry(self): action = constants.ADV_RULE_PERMIT community_list_matches = mock.ANY prefix_list_matches = ["prefix_list_matches"] entry_set = mock.ANY route_map_entry = self.resourceApi.build_route_map_entry( action, community_list_matches, prefix_list_matches, entry_set) self.assertEqual(action, route_map_entry.action) self.assertEqual(community_list_matches, route_map_entry.community_list_matches) self.assertEqual(prefix_list_matches, route_map_entry.prefix_list_matches) self.assertEqual(entry_set, route_map_entry.entry_set) def test_build_route_map_entry_set(self): local_preference = 100 as_path_prepend = mock.ANY community = mock.ANY med = mock.ANY weight = mock.ANY entry_set = self.resourceApi.build_route_map_entry_set( local_preference, as_path_prepend, community, med, weight) self.assertEqual(local_preference, entry_set.local_preference) self.assertEqual(as_path_prepend, entry_set.as_path_prepend) self.assertEqual(community, entry_set.community) self.assertEqual(med, entry_set.med) self.assertEqual(weight, entry_set.weight) def test_build_community_match_criteria(self): criteria = "test_criteria" match_operator = mock.ANY match_criteria = self.resourceApi.build_community_match_criteria( criteria, match_operator) self.assertEqual(criteria, match_criteria.criteria) self.assertEqual(match_operator, match_criteria.match_operator) class TestPolicyTier0PrefixList(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyTier0PrefixList, self).setUp() self.resourceApi = self.policy_lib.tier0_prefix_list def test_create(self): name = 'prefix_list_test' tier0_id = 't0_test' with mock.patch.object(self.policy_api, "create_or_update") as api_call: # test with 'prefixes' prefix = core_defs.PrefixEntry('network_test') result = self.resourceApi.create_or_overwrite( name, tier0_id, prefixes=[prefix], tenant=TEST_TENANT) expected_def = core_defs.Tier0PrefixListDef( tier0_id=tier0_id, prefix_list_id=mock.ANY, name=name, prefixes=[prefix], tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertIsNotNone(result) def test_delete(self): tier0_id = 't0_test' prefix_list_id = 'prefix_list_test' with mock.patch.object(self.policy_api, "delete") as api_call: self.resourceApi.delete(tier0_id, prefix_list_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0PrefixListDef( tier0_id=tier0_id, prefix_list_id=prefix_list_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_get(self): tier0_id = 't0_test' prefix_list_id = 'prefix_list_test' with mock.patch.object( self.policy_api, "get", return_value={'id': prefix_list_id}) as api_call: result = self.resourceApi.get(tier0_id, prefix_list_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0PrefixListDef( tier0_id=tier0_id, prefix_list_id=prefix_list_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual(prefix_list_id, result['id']) def test_list(self): tier0_id = 't0_test' with mock.patch.object(self.policy_api, "list", return_value={'results': []}) as api_call: result = self.resourceApi.list(tier0_id=tier0_id, tenant=TEST_TENANT) expected_def = core_defs.Tier0PrefixListDef( tier0_id=tier0_id, tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) self.assertEqual([], result) def test_update(self): tier0_id = 't0_test' prefix_list_id = 'prefix_list_test' name = 'new_name' prefixes = [] with self.mock_get(tier0_id, name), \ self.mock_create_update() as update_call: self.resourceApi.update(name, tier0_id, prefix_list_id, prefixes, tenant=TEST_TENANT) expected_def = core_defs.Tier0PrefixListDef( tier0_id=tier0_id, prefix_list_id=prefix_list_id, name=name, prefixes=prefixes, tenant=TEST_TENANT) self.assert_called_with_def(update_call, expected_def) def test_build_prefix_entry(self): network = "network_test" le = mock.ANY ge = mock.ANY action = constants.ADV_RULE_DENY prefix_entry = self.resourceApi.build_prefix_entry( network, le, ge, action) self.assertEqual(network, prefix_entry.network) self.assertEqual(le, prefix_entry.le) self.assertEqual(ge, prefix_entry.ge) self.assertEqual(action, prefix_entry.action) class TestNsxSearch(NsxPolicyLibTestCase): def setUp(self): super(TestNsxSearch, self).setUp() self.search_path = 'search/query?query=%s' def test_nsx_search_by_realization(self): """Test search of resources with the specified tag.""" with mock.patch.object(self.policy_lib.client, 'url_get') as search: realized_id = 'xxx' realized_type = 'RealizedLogicalSwitch' query = ('resource_type:GenericPolicyRealizedResource AND ' 'realization_specific_identifier:%s AND ' 'entity_type:%s' % (realized_id, realized_type)) self.policy_lib.search_resource_by_realized_id( realized_id, realized_type) search.assert_called_with(self.search_path % query) class TestPolicyGlobalConfig(NsxPolicyLibTestCase): def setUp(self, *args, **kwargs): super(TestPolicyGlobalConfig, self).setUp() self.resourceApi = self.policy_lib.global_config def test_create_or_overwrite(self): self.skipTest("The action is not supported by this resource") def test_delete(self): self.skipTest("The action is not supported by this resource") def test_get(self): with mock.patch.object(self.policy_api, "get") as api_call: self.resourceApi.get(tenant=TEST_TENANT) expected_def = core_defs.GlobalConfigDef( tenant=TEST_TENANT) self.assert_called_with_def(api_call, expected_def) def test_list(self): self.skipTest("The action is not supported by this resource") def test_update(self): self.skipTest("The action is not supported by this resource") def test_enable_ipv6(self): current_config = {'l3_forwarding_mode': 'IPV4_ONLY'} with mock.patch.object(self.policy_api, "get", return_value=current_config) as api_get,\ mock.patch.object(self.policy_api.client, "update") as api_put: self.resourceApi.enable_ipv6(tenant=TEST_TENANT) api_get.assert_called_once() api_put.assert_called_once_with( "%s/global-config/" % TEST_TENANT, {'l3_forwarding_mode': 'IPV4_AND_IPV6'}) def test_enable_ipv6_no_call(self): current_config = {'l3_forwarding_mode': 'IPV4_AND_IPV6'} with mock.patch.object(self.policy_api, "get", return_value=current_config) as api_get,\ mock.patch.object(self.policy_api.client, "update") as api_put: self.resourceApi.enable_ipv6(tenant=TEST_TENANT) api_get.assert_called_once() api_put.assert_not_called() def test_disable_ipv6(self): current_config = {'l3_forwarding_mode': 'IPV4_AND_IPV6'} with mock.patch.object(self.policy_api, "get", return_value=current_config) as api_get,\ mock.patch.object(self.policy_api.client, "update") as api_put: self.resourceApi.disable_ipv6(tenant=TEST_TENANT) api_get.assert_called_once() api_put.assert_called_once_with( "%s/global-config/" % TEST_TENANT, {'l3_forwarding_mode': 'IPV4_ONLY'}) def test_disable_ipv6_no_call(self): current_config = {'l3_forwarding_mode': 'IPV4_ONLY'} with mock.patch.object(self.policy_api, "get", return_value=current_config) as api_get,\ mock.patch.object(self.policy_api.client, "update") as api_put: self.resourceApi.disable_ipv6(tenant=TEST_TENANT) api_get.assert_called_once() api_put.assert_not_called() vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_security.py0000664000175000017500000004753613623151571025257 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import six from oslo_utils import uuidutils from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_constants from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants as const class TestNsxLibFirewallSection(nsxlib_testcase.NsxLibTestCase): """Tests for vmware_nsxlib.v3.security.NsxLibFirewallSection""" def test_get_logicalport_reference(self): mock_port = '3ed55c9f-f879-4048-bdd3-eded92465252' result = self.nsxlib.firewall_section.get_logicalport_reference( mock_port) expected = { 'target_id': '3ed55c9f-f879-4048-bdd3-eded92465252', 'target_type': 'LogicalPort' } self.assertEqual(expected, result) def test_get_rule_address(self): result = self.nsxlib.firewall_section.get_rule_address( 'target-id', 'display-name') expected = { 'target_display_name': 'display-name', 'target_id': 'target-id', 'is_valid': True, 'target_type': 'IPv4Address' } self.assertEqual(expected, result) def test_get_l4portset_nsservice(self): result = self.nsxlib.firewall_section.get_l4portset_nsservice() expected = { 'service': { 'resource_type': 'L4PortSetNSService', 'source_ports': [], 'destination_ports': [], 'l4_protocol': 'TCP' } } self.assertEqual(expected, result) def test_create_rules_with_protocol(self): with mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection" ".add_rules") as add_rules: rule_id = uuidutils.generate_uuid() rule = {'id': rule_id, 'ethertype': 'IPv4', 'protocol': 'ipip', 'direction': 'ingress', 'remote_ip_prefix': None} rules = [rule] section_id = 'section-id' group_id = 'nsgroup-id' target_id = 'dummy' self.nsxlib.firewall_section.create_rules( None, section_id, group_id, False, "ALLOW", rules, {rule_id: target_id}) add_rules.assert_called_once_with([ {'display_name': mock.ANY, 'ip_protocol': 'IPV4', 'direction': 'IN', 'services': [{'service': { 'resource_type': 'IPProtocolNSService', 'protocol_number': 4}}], 'disabled': False, 'sources': [{'target_id': target_id, 'target_type': 'NSGroup'}], 'destinations': [{'target_id': group_id, 'target_type': 'NSGroup'}], 'logged': False, 'action': 'ALLOW'}], section_id) def test_create_rules_ingress_with_port(self): with mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection" ".add_rules") as add_rules: rule_id = uuidutils.generate_uuid() rule = {'id': rule_id, 'ethertype': 'IPv4', 'protocol': 'tcp', 'direction': 'ingress', 'port_range_min': 80, 'port_range_max': 80, 'remote_ip_prefix': None} rules = [rule] section_id = 'section-id' group_id = 'nsgroup-id' target_id = 'dummy' self.nsxlib.firewall_section.create_rules( None, section_id, group_id, False, "ALLOW", rules, {rule_id: target_id}) add_rules.assert_called_once_with([ {'display_name': mock.ANY, 'ip_protocol': 'IPV4', 'direction': 'IN', 'services': [{'service': { 'l4_protocol': 'TCP', 'destination_ports': ['80'], 'source_ports': [], 'resource_type': 'L4PortSetNSService'}}], 'disabled': False, 'sources': [{'target_id': target_id, 'target_type': 'NSGroup'}], 'destinations': [{'target_id': group_id, 'target_type': 'NSGroup'}], 'logged': False, 'action': 'ALLOW'}], section_id) def test_create_rules_egress_with_port(self): with mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection" ".add_rules") as add_rules: rule_id = uuidutils.generate_uuid() rule = {'id': rule_id, 'ethertype': 'IPv4', 'protocol': 'tcp', 'direction': 'egress', 'port_range_min': 80, 'port_range_max': 80, 'remote_ip_prefix': None} rules = [rule] section_id = 'section-id' group_id = 'nsgroup-id' target_id = 'dummy' self.nsxlib.firewall_section.create_rules( None, section_id, group_id, False, "ALLOW", rules, {rule_id: target_id}) add_rules.assert_called_once_with([ {'display_name': mock.ANY, 'ip_protocol': 'IPV4', 'direction': 'OUT', 'services': [{'service': { 'l4_protocol': 'TCP', 'destination_ports': ['80'], 'source_ports': [], 'resource_type': 'L4PortSetNSService'}}], 'disabled': False, 'destinations': [{'target_id': target_id, 'target_type': 'NSGroup'}], 'sources': [{'target_id': group_id, 'target_type': 'NSGroup'}], 'logged': False, 'action': 'ALLOW'}], section_id) def test_create_rule_with_illegal_protocol(self): rule_id = uuidutils.generate_uuid() rule = {'id': rule_id, 'ethertype': 'IPv4', 'protocol': 'bad', 'direction': 'ingress', 'remote_ip_prefix': None} rules = [rule] self.assertRaises(nsxlib_exc.InvalidInput, self.nsxlib.firewall_section.create_section_rules, 'section-id', 'nsgroup-id', False, "ALLOW", rules, {rule_id: 'dummy'}) def test_create_rule_with_icmp(self): nsx_ver = ["2.3.0", "2.4.0"] for nsx_ver in nsx_ver: with mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection" ".add_rules") as add_rules: with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value=nsx_ver): rule_id = uuidutils.generate_uuid() rule = {'id': rule_id, 'ethertype': 'IPv4', 'protocol': 'icmp', 'direction': 'egress', 'port_range_min': 33, 'port_range_max': 0, 'remote_ip_prefix': None} rules = [rule] section_id = 'section-id' group_id = 'nsgroup-id' target_id = 'dummy' self.nsxlib.firewall_section.create_rules( None, section_id, group_id, False, "ALLOW", rules, {rule_id: target_id}) add_rules.assert_called_once_with([ {'display_name': mock.ANY, 'ip_protocol': 'IPV4', 'direction': 'OUT', 'services': [{'service': { 'protocol': 'ICMPv4', 'icmp_type': 33, 'icmp_code': 0, 'resource_type': 'ICMPTypeNSService'}}], 'disabled': False, 'destinations': [{'target_id': target_id, 'target_type': 'NSGroup'}], 'sources': [{'target_id': group_id, 'target_type': 'NSGroup'}], 'logged': False, 'action': 'ALLOW'}], section_id) def test_create_rule_with_illegal_icmp(self): rule_id = uuidutils.generate_uuid() rule = {'id': rule_id, 'ethertype': 'IPv4', 'protocol': 'icmp', 'direction': 'egress', 'port_range_min': 2, 'port_range_max': 3, 'remote_ip_prefix': None} rules = [rule] section_id = 'section-id' group_id = 'nsgroup-id' target_id = 'dummy' with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value="2.3.0"): self.assertRaises(nsxlib_exc.InvalidInput, self.nsxlib.firewall_section.create_rules, None, section_id, group_id, False, "ALLOW", rules, {rule_id: target_id}) with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value="2.4.0"): self.assertRaises(nsxlib_exc.InvalidInput, self.nsxlib.firewall_section.create_rules, None, section_id, group_id, False, "ALLOW", rules, {rule_id: target_id}) def test_create_rule_with_illegal_icmp_2_4(self): rule_id = uuidutils.generate_uuid() rule = {'id': rule_id, 'ethertype': 'IPv4', 'protocol': 'icmp', 'direction': 'egress', 'port_range_min': 9, 'port_range_max': 16, 'remote_ip_prefix': None} rules = [rule] section_id = 'section-id' group_id = 'nsgroup-id' target_id = 'dummy' with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value="2.4.0"): self.assertRaises(nsxlib_exc.InvalidInput, self.nsxlib.firewall_section.create_rules, None, section_id, group_id, False, "ALLOW", rules, {rule_id: target_id}) def test_create_with_rules(self): expected_body = { 'display_name': 'display-name', 'description': 'section-description', 'stateful': True, 'section_type': "LAYER3", 'applied_tos': [], 'rules': [{ 'display_name': 'rule-name', 'direction': 'IN_OUT', 'ip_protocol': "IPV4_IPV6", 'action': "ALLOW", 'logged': False, 'disabled': False, 'sources': [], 'destinations': [], 'services': [] }], 'tags': [] } with mock.patch.object(self.nsxlib.client, 'create') as create: rule = self.nsxlib.firewall_section.get_rule_dict('rule-name') self.nsxlib.firewall_section.create_with_rules( 'display-name', 'section-description', rules=[rule]) resource = 'firewall/sections?operation=insert_bottom' \ '&action=create_with_rules' create.assert_called_with(resource, expected_body, headers=None) def test_get_excludelist(self): with mock.patch.object(self.nsxlib.client, 'list') as clist: self.nsxlib.firewall_section.get_excludelist() clist.assert_called_with('firewall/excludelist') def test_update(self): fws_tags = [{"scope": "name", "tag": "new_name"}] with mock.patch.object(self.nsxlib.client, 'update') as update: with mock.patch.object(self.nsxlib.client, 'get') as get: get.return_value = {} self.nsxlib.firewall_section.update('fw_section_id', tags_update=fws_tags) resource = 'firewall/sections/%s' % 'fw_section_id' data = {'tags': fws_tags} update.assert_called_with(resource, data, headers=None) class TestNsxLibIPSet(nsxlib_testcase.NsxClientTestCase): """Tests for vmware_nsxlib.v3.security.NsxLibIPSet""" def test_get_ipset_reference(self): mock_ip_set = uuidutils.generate_uuid() result = self.nsxlib.ip_set.get_ipset_reference( mock_ip_set) expected = { 'target_id': mock_ip_set, 'target_type': const.IP_SET } self.assertEqual(expected, result) def test_create_ip_set(self): fake_ip_set = test_constants.FAKE_IP_SET.copy() data = { 'display_name': fake_ip_set['display_name'], 'ip_addresses': fake_ip_set['ip_addresses'], 'description': 'ipset-desc', 'tags': [] } with mock.patch.object(self.nsxlib.client, 'create') as create: self.nsxlib.ip_set.create( fake_ip_set['display_name'], 'ipset-desc', ip_addresses=fake_ip_set['ip_addresses']) resource = 'ip-sets' create.assert_called_with(resource, data) def test_delete_ip_set(self): with mock.patch.object(self.nsxlib.client, 'delete') as delete: fake_ip_set = test_constants.FAKE_IP_SET.copy() self.nsxlib.ip_set.delete(fake_ip_set['id']) delete.assert_called_with('ip-sets/%s' % fake_ip_set['id']) def test_update_ip_set(self): fake_ip_set = test_constants.FAKE_IP_SET.copy() new_ip_addresses = ['10.0.0.0'] data = { 'id': fake_ip_set['id'], 'display_name': fake_ip_set['display_name'], 'ip_addresses': new_ip_addresses, 'resource_type': 'IPSet' } with mock.patch.object(self.nsxlib.client, 'get', return_value=fake_ip_set): with mock.patch.object(self.nsxlib.client, 'update') as update: self.nsxlib.ip_set.update( fake_ip_set['id'], ip_addresses=new_ip_addresses) resource = 'ip-sets/%s' % fake_ip_set['id'] update.assert_called_with(resource, data, headers=None) def test_update_ip_set_empty_ip_addresses(self): fake_ip_set = test_constants.FAKE_IP_SET.copy() new_ip_addresses = [] data = { 'id': fake_ip_set['id'], 'display_name': fake_ip_set['display_name'], 'ip_addresses': new_ip_addresses, 'resource_type': 'IPSet' } with mock.patch.object(self.nsxlib.client, 'get', return_value=fake_ip_set): with mock.patch.object(self.nsxlib.client, 'update') as update: self.nsxlib.ip_set.update( fake_ip_set['id'], ip_addresses=new_ip_addresses) resource = 'ip-sets/%s' % fake_ip_set['id'] update.assert_called_with(resource, data, headers=None) def test_update_ip_set_callback(self): def update_payload_cbk(revised_payload, payload): payload['ip_addresses'] = (revised_payload['ip_addresses'] + payload['ip_addresses']) fake_ip_set = test_constants.FAKE_IP_SET.copy() new_ip_addresses = ['10.0.0.0'] updated_ip_addresses = fake_ip_set['ip_addresses'] + new_ip_addresses data = { 'id': fake_ip_set['id'], 'display_name': fake_ip_set['display_name'], 'ip_addresses': updated_ip_addresses, 'resource_type': 'IPSet' } with mock.patch.object(self.nsxlib.client, 'get', return_value=fake_ip_set): with mock.patch.object(self.nsxlib.client, 'update') as update: self.nsxlib.ip_set.update( fake_ip_set['id'], ip_addresses=new_ip_addresses, update_payload_cbk=update_payload_cbk) resource = 'ip-sets/%s' % fake_ip_set['id'] update.assert_called_with(resource, data, headers=None) class TestNsxLibNSGroup(nsxlib_testcase.NsxClientTestCase): """Tests for vmware_nsxlib.v3.security.NsxLibNSGroup""" def test_get_nsgroup_complex_expression(self): port_tags = {'app': 'foo', 'project': 'myproject'} port_exp = [self.nsxlib.ns_group.get_port_tag_expression(k, v) for k, v in six.iteritems(port_tags)] complex_exp = self.nsxlib.ns_group.get_nsgroup_complex_expression( expressions=port_exp) expected_exp = {'resource_type': const.NSGROUP_COMPLEX_EXP, 'expressions': port_exp} self.assertEqual(expected_exp, complex_exp) def test_update(self): nsg_tags = [{"scope": "name", "tag": "new_name"}] membership_criteria = [] with mock.patch.object(self.nsxlib.client, 'update') as update: with mock.patch.object(self.nsxlib.client, 'get') as get: get.return_value = {} self.nsxlib.ns_group.update( 'nsgroupid', tags_update=nsg_tags, membership_criteria=membership_criteria) resource = 'ns-groups/nsgroupid' data = {'tags': nsg_tags, 'membership_criteria': membership_criteria} update.assert_called_with(resource, data, headers=None) def test_update_nsgroup_and_section(self): security_group = { 'name': 'name', 'id': uuidutils.generate_uuid(), 'description': None, 'logging': False} nsgroup_id = uuidutils.generate_uuid() section_id = uuidutils.generate_uuid() log_sg_allowed_traffic = True with mock.patch.object(self.nsxlib.client, 'update') as update_mock,\ mock.patch.object(self.nsxlib.client, 'get') as get_mock: self.nsxlib.ns_group.update_nsgroup_and_section( security_group, nsgroup_id, section_id, log_sg_allowed_traffic) # updating the nsgroup and the section self.assertEqual(2, update_mock.call_count) # getting the rules, and get before each update self.assertEqual(3, get_mock.call_count) def test_update_lport_nsgroups(self): nsgroup_id1 = uuidutils.generate_uuid() nsgroup_id2 = uuidutils.generate_uuid() lport_id = uuidutils.generate_uuid() original_nsgroups = [nsgroup_id1] updated_nsgroups = [nsgroup_id2] with mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.' 'remove_member') as remove_mock,\ mock.patch('vmware_nsxlib.v3.security.NsxLibNsGroup.' 'add_members') as add_mock: self.nsxlib.ns_group.update_lport_nsgroups( lport_id, original_nsgroups, updated_nsgroups) add_mock.assert_called_once_with(nsgroup_id2, 'LogicalPort', [lport_id]) remove_mock.assert_called_once_with(nsgroup_id1, 'LogicalPort', lport_id) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_cluster.py0000664000175000017500000004425613623151571025065 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import unittest import mock from requests import exceptions as requests_exceptions from requests import models import six.moves.urllib.parse as urlparse from vmware_nsxlib.tests.unit.v3 import mocks from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib import v3 from vmware_nsxlib.v3 import client from vmware_nsxlib.v3 import client_cert from vmware_nsxlib.v3 import cluster from vmware_nsxlib.v3 import exceptions as nsxlib_exc def _validate_conn_up(*args, **kwargs): return def _validate_conn_down(*args, **kwargs): raise requests_exceptions.ConnectionError() def get_sess_create_resp(): sess_create_response = models.Response() sess_create_response.status_code = 200 sess_create_response.headers = {'Set-Cookie': 'JSESSIONID=abc;'} return sess_create_response class RequestsHTTPProviderTestCase(unittest.TestCase): def test_new_connection(self): mock_api = mock.Mock() mock_api.nsxlib_config = mock.Mock() mock_api.nsxlib_config.username = 'nsxuser' mock_api.nsxlib_config.password = 'nsxpassword' mock_api.nsxlib_config.retries = 100 mock_api.nsxlib_config.insecure = True mock_api.nsxlib_config.token_provider = None mock_api.nsxlib_config.ca_file = None mock_api.nsxlib_config.http_timeout = 99 mock_api.nsxlib_config.conn_idle_timeout = 39 mock_api.nsxlib_config.client_cert_provider = None provider = cluster.NSXRequestsHTTPProvider() with mock.patch.object(cluster.TimeoutSession, 'request', return_value=get_sess_create_resp()): session = provider.new_connection( mock_api, cluster.Provider('9.8.7.6', 'https://9.8.7.6', 'nsxuser', 'nsxpassword', None)) self.assertEqual(('nsxuser', 'nsxpassword'), session.auth) self.assertFalse(session.verify) self.assertIsNone(session.cert) self.assertEqual(100, session.adapters['https://'].max_retries.total) self.assertEqual(99, session.timeout) def test_new_connection_with_client_auth(self): mock_api = mock.Mock() mock_api.nsxlib_config = mock.Mock() mock_api.nsxlib_config.retries = 100 mock_api.nsxlib_config.insecure = True mock_api.nsxlib_config.ca_file = None mock_api.nsxlib_config.http_timeout = 99 mock_api.nsxlib_config.conn_idle_timeout = 39 cert_provider_inst = client_cert.ClientCertProvider( '/etc/cert.pem') mock_api.nsxlib_config.client_cert_provider = cert_provider_inst provider = cluster.NSXRequestsHTTPProvider() with mock.patch.object(cluster.TimeoutSession, 'request', return_value=get_sess_create_resp()): session = provider.new_connection( mock_api, cluster.Provider('9.8.7.6', 'https://9.8.7.6', None, None, None)) self.assertIsNone(session.auth) self.assertFalse(session.verify) self.assertEqual(cert_provider_inst, session.cert_provider) self.assertEqual(99, session.timeout) @mock.patch("vmware_nsxlib.v3.cluster.NSXRequestsHTTPProvider." "get_default_headers") def test_new_connection_with_token_provider(self, mock_get_def_headers): mock_api = mock.Mock() mock_api.nsxlib_config = mock.Mock() mock_api.nsxlib_config.retries = 100 mock_api.nsxlib_config.insecure = True mock_api.nsxlib_config.ca_file = None mock_api.nsxlib_config.http_timeout = 99 mock_api.nsxlib_config.conn_idle_timeout = 39 mock_api.nsxlib_config.client_cert_provider = None token_provider_inst = mock.Mock() mock_api.nsxlib_config.token_provider = token_provider_inst mock_api.nsxlib_config.allow_overwrite_header = False provider = cluster.NSXRequestsHTTPProvider() cluster_provider = cluster.Provider('9.8.7.6', 'https://9.8.7.6', 'nsxuser', 'nsxpassword', None) with mock.patch.object(cluster.TimeoutSession, 'request', return_value=get_sess_create_resp()): session = provider.new_connection(mock_api, cluster_provider) self.assertIsNone(session.auth) self.assertFalse(session.verify) self.assertIsNone(session.cert) self.assertEqual(100, session.adapters['https://'].max_retries.total) self.assertEqual(99, session.timeout) mock_get_def_headers.assert_called_once_with( mock.ANY, cluster_provider, False, token_provider_inst) @mock.patch("vmware_nsxlib.v3.cluster.NSXHTTPAdapter.__init__") def test_new_connection_with_ca_file(self, mock_adaptor_init): mock_api = mock.Mock() mock_api.nsxlib_config = mock.Mock() mock_api.nsxlib_config.retries = 100 mock_api.nsxlib_config.insecure = False mock_adaptor_init.return_value = None provider = cluster.NSXRequestsHTTPProvider() with mock.patch.object(cluster.TimeoutSession, 'request', return_value=get_sess_create_resp()): session = provider.new_connection( mock_api, cluster.Provider('9.8.7.6', 'https://9.8.7.6', None, None, "ca_file")) self.assertEqual("ca_file", session.verify) mock_adaptor_init.assert_called_once_with( pool_connections=1, pool_maxsize=1, max_retries=100, pool_block=False, thumbprint=None) @mock.patch("vmware_nsxlib.v3.cluster.NSXHTTPAdapter.__init__") def test_new_connection_with_thumbprint(self, mock_adaptor_init): mock_api = mock.Mock() mock_api.nsxlib_config = mock.Mock() mock_api.nsxlib_config.retries = 100 mock_api.nsxlib_config.insecure = False mock_adaptor_init.return_value = None provider = cluster.NSXRequestsHTTPProvider() with mock.patch.object(cluster.TimeoutSession, 'request', return_value=get_sess_create_resp()): session = provider.new_connection( mock_api, cluster.Provider('9.8.7.6', 'https://9.8.7.6', None, None, None, "thumbprint")) self.assertIsNone(session.verify) mock_adaptor_init.assert_called_once_with( pool_connections=1, pool_maxsize=1, max_retries=100, pool_block=False, thumbprint="thumbprint") def test_validate_connection_keep_alive(self): mock_conn = mocks.MockRequestSessionApi() mock_conn.default_headers = {} mock_ep = mock.Mock() mock_ep.provider.url = 'https://1.2.3.4' mock_cluster = mock.Mock() mock_cluster.nsxlib_config = mock.Mock() mock_cluster.nsxlib_config.url_base = 'abc' mock_cluster.nsxlib_config.keepalive_section = 'transport-zones' provider = cluster.NSXRequestsHTTPProvider() with mock.patch.object(client.JSONRESTClient, "get", return_value={'result_count': 0}): self.assertRaises(nsxlib_exc.ResourceNotFound, provider.validate_connection, mock_cluster, mock_ep, mock_conn) with mock.patch.object(client.JSONRESTClient, "get", return_value={'result_count': 1}): provider.validate_connection(mock_cluster, mock_ep, mock_conn) def _validate_con_mocks(self, nsx_version): nsxlib_config = nsxlib_testcase.get_default_nsxlib_config() nsxlib = v3.NsxLib(nsxlib_config) nsxlib.nsx_version = nsx_version mock_conn = mocks.MockRequestSessionApi() mock_conn.default_headers = {} mock_ep = mock.Mock() mock_ep.provider.url = 'https://1.2.3.4' conf = mock.Mock() conf.url_base = 'abc' conf.keepalive_section = 'transport-zones' conf.validate_connection_method = nsxlib.validate_connection_method mock_cluster = mock.Mock() mock_cluster.nsxlib_config = conf return (mock_cluster, mock_ep, mock_conn) def test_validate_connection_method_v1(self): mock_cluster, mock_ep, mock_conn = self._validate_con_mocks('2.3.0') provider = cluster.NSXRequestsHTTPProvider() with mock.patch.object(client.JSONRESTClient, "get", return_value={'application_status': 'DOWN'}): self.assertRaises(nsxlib_exc.ResourceNotFound, provider.validate_connection, mock_cluster, mock_ep, mock_conn) with mock.patch.object(client.JSONRESTClient, "get", return_value={'application_status': 'WORKING'}): provider.validate_connection(mock_cluster, mock_ep, mock_conn) def test_validate_connection_method_v2(self): mock_cluster, mock_ep, mock_conn = self._validate_con_mocks('2.4.0') provider = cluster.NSXRequestsHTTPProvider() with mock.patch.object(client.JSONRESTClient, "get", return_value={'healthy': False}): self.assertRaises(nsxlib_exc.ResourceNotFound, provider.validate_connection, mock_cluster, mock_ep, mock_conn) with mock.patch.object(client.JSONRESTClient, "get", return_value={'healthy': True}): provider.validate_connection(mock_cluster, mock_ep, mock_conn) class NSXHTTPAdapterTestCase(nsxlib_testcase.NsxClientTestCase): @mock.patch("requests.adapters.HTTPAdapter.init_poolmanager") def test_init_poolmanager(self, mock_init_poolmanager): cluster.NSXHTTPAdapter(thumbprint="thumbprint") mock_init_poolmanager.assert_called_once_with( mock.ANY, mock.ANY, block=mock.ANY, assert_fingerprint="thumbprint") class NsxV3ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase): def _assert_providers(self, cluster_api, provider_tuples): self.assertEqual(len(cluster_api.providers), len(provider_tuples)) def _assert_provider(pid, purl): for provider in cluster_api.providers: if provider.id == pid and provider.url == purl: return self.fail("Provider: %s not found" % pid) for provider_tuple in provider_tuples: _assert_provider(provider_tuple[0], provider_tuple[1]) def test_conf_providers_no_scheme(self): conf_managers = ['8.9.10.11', '9.10.11.12:4433'] api = self.new_mocked_cluster(conf_managers, _validate_conn_up) self._assert_providers( api, [(p, "https://%s" % p) for p in conf_managers]) def test_conf_providers_with_scheme(self): conf_managers = ['http://8.9.10.11:8080', 'https://9.10.11.12:4433'] api = self.new_mocked_cluster(conf_managers, _validate_conn_up) self._assert_providers( api, [(urlparse.urlparse(p).netloc, p) for p in conf_managers]) def test_http_retries(self): api = self.mock_nsx_clustered_api(retries=9) with api.endpoints['1.2.3.4'].pool.item() as session: self.assertEqual( session.adapters['https://'].max_retries.total, 9) def test_conns_per_pool(self): conf_managers = ['8.9.10.11', '9.10.11.12:4433'] api = self.new_mocked_cluster( conf_managers, _validate_conn_up, concurrent_connections=11) for ep_id, ep in api.endpoints.items(): self.assertEqual(ep.pool.max_size, 11) def test_timeouts(self): api = self.mock_nsx_clustered_api(http_read_timeout=37, http_timeout=7) api.get('logical-ports') mock_call = api.recorded_calls.method_calls[0] name, args, kwargs = mock_call self.assertEqual(kwargs['timeout'], (7, 37)) # Repeat the above tests with client cert present # in NsxLib initialization class NsxV3ClusteredAPIWithClientCertTestCase(NsxV3ClusteredAPITestCase): def use_client_cert_auth(self): return True class ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase): def _test_health(self, validate_fn, expected_health): conf_managers = ['8.9.10.11', '9.10.11.12'] api = self.new_mocked_cluster(conf_managers, validate_fn) self.assertEqual(expected_health, api.health) def test_orange_health(self): def _validate(cluster_api, endpoint, conn): if endpoint.provider.id == '8.9.10.11': raise Exception() self._test_health(_validate, cluster.ClusterHealth.ORANGE) def test_green_health(self): self._test_health(_validate_conn_up, cluster.ClusterHealth.GREEN) def test_red_health(self): self._test_health(_validate_conn_down, cluster.ClusterHealth.RED) def test_cluster_validate_with_exception(self): conf_managers = ['8.9.10.11', '9.10.11.12', '10.11.12.13'] api = self.new_mocked_cluster(conf_managers, _validate_conn_down) self.assertEqual(3, len(api.endpoints)) self.assertRaises(nsxlib_exc.ServiceClusterUnavailable, api.get, 'api/v1/transport-zones') def test_cluster_proxy_stale_revision(self): def stale_revision(): raise nsxlib_exc.StaleRevision(manager='1.1.1.1', operation='whatever') api = self.mock_nsx_clustered_api(session_response=stale_revision) self.assertRaises(nsxlib_exc.StaleRevision, api.get, 'api/v1/transport-zones') def test_cluster_proxy_connection_establish_error(self): def connect_timeout(): raise requests_exceptions.ConnectTimeout() api = self.mock_nsx_clustered_api(session_response=connect_timeout) api._validate = mock.Mock() self.assertRaises(nsxlib_exc.ServiceClusterUnavailable, api.get, 'api/v1/transport-zones') def test_cluster_proxy_connection_aborted(self): def connect_timeout(): raise requests_exceptions.ConnectionError("Connection Aborted") def all_good(): pass # First call will cause connection aborted error, but next one # should work api = self.mock_nsx_clustered_api(session_response=[connect_timeout, all_good]) api._validate = mock.Mock() self.assertEqual(cluster.ClusterHealth.GREEN, api.health) def test_cluster_round_robin_servicing(self): conf_managers = ['8.9.10.11', '9.10.11.12', '10.11.12.13'] api = self.mock_nsx_clustered_api(nsx_api_managers=conf_managers) api._validate = mock.Mock() eps = list(api._endpoints.values()) def _get_schedule(num_eps): return [api._select_endpoint() for i in range(num_eps)] self.assertEqual(_get_schedule(3), eps) self.assertEqual(_get_schedule(6), [eps[0], eps[1], eps[2], eps[0], eps[1], eps[2]]) eps[0]._state = cluster.EndpointState.DOWN self.assertEqual(_get_schedule(4), [eps[1], eps[2], eps[1], eps[2]]) eps[1]._state = cluster.EndpointState.DOWN self.assertEqual(_get_schedule(2), [eps[2], eps[2]]) eps[0]._state = cluster.EndpointState.UP self.assertEqual(_get_schedule(4), [eps[0], eps[2], eps[0], eps[2]]) def test_cluster_select_endpoint(self): conf_managers = ['8.9.10.11', '9.10.11.12', '10.11.12.13'] max_attempts = 3 api = self.mock_nsx_clustered_api(nsx_api_managers=conf_managers, max_attempts=max_attempts) api._validate = mock.Mock() eps = list(api._endpoints.values()) # all up - select the first one self.assertEqual(api._select_endpoint(), eps[0]) # run again - select the 2nd self.assertEqual(api._select_endpoint(), eps[1]) # all down - return None eps[0]._state = cluster.EndpointState.DOWN eps[1]._state = cluster.EndpointState.DOWN eps[2]._state = cluster.EndpointState.DOWN self.assertEqual(api._select_endpoint(), None) # up till now the validate method should not have been called self.assertEqual(api._validate.call_count, 0) # set up the retries flag, and check that validate was called # until retries have been exhausted api.nsxlib_config.cluster_unavailable_retry = True self.assertEqual(api._select_endpoint(), None) self.assertEqual(api._validate.call_count, api.nsxlib_config.max_attempts * len(eps)) # simulate the case where 1 endpoint finally goes up self.validate_count = 0 self.max_validate = max_attempts - 1 def _mock_validate(ep): if self.validate_count >= self.max_validate: ep._state = cluster.EndpointState.UP self.validate_count += 1 api._validate = _mock_validate self.assertEqual(api._select_endpoint(), eps[(self.max_validate - 1) % len(eps)]) self.assertEqual(self.validate_count, self.max_validate + 1) def test_reinitialize_cluster(self): with mock.patch.object(cluster.TimeoutSession, 'request', return_value=get_sess_create_resp()): api = self.mock_nsx_clustered_api() # just make sure this api is defined, and does not crash api._reinit_cluster() vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_cluster_management.py0000664000175000017500000000201313623151571027242 0ustar zuulzuul00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase class TestNsxLibClusterManagement(nsxlib_testcase.NsxClientTestCase): def test_get_restore_status(self): cluster_api = self.nsxlib.cluster_management with mock.patch.object(self.nsxlib.client, 'get') as get: cluster_api.get_restore_status() get.assert_called_with('cluster/restore/status') vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_router.py0000664000175000017500000002122513623151571024713 0ustar zuulzuul00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import mock from oslo_utils import uuidutils from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_constants from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants class TestRouter(nsxlib_testcase.NsxClientTestCase): def test_validate_tier0(self): tier0_groups_dict = {} tier0_uuid = uuidutils.generate_uuid() rtr = {'edge_cluster_id': test_constants.FAKE_EDGE_CLUSTER_ID} with mock.patch.object(self.nsxlib.router._router_client, 'get', return_value=rtr),\ mock.patch.object( self.nsxlib.edge_cluster, 'get', return_value=test_constants.FAKE_EDGE_CLUSTER): self.nsxlib.router.validate_tier0(tier0_groups_dict, tier0_uuid) self.assertEqual( tier0_groups_dict[tier0_uuid]['edge_cluster_uuid'], test_constants.FAKE_EDGE_CLUSTER_ID) self.assertEqual( tier0_groups_dict[tier0_uuid]['member_index_list'], [0]) def test_validate_tier0_fail(self): tier0_groups_dict = {} tier0_uuid = uuidutils.generate_uuid() edge_cluster = copy.copy(test_constants.FAKE_EDGE_CLUSTER) edge_cluster['members'] = [] with mock.patch.object(self.nsxlib.router._router_client, 'get'),\ mock.patch.object(self.nsxlib.edge_cluster, 'get', return_value=edge_cluster): self.assertRaises( nsxlib_exc.NsxLibInvalidInput, self.nsxlib.router.validate_tier0, tier0_groups_dict, tier0_uuid) def test_add_router_link_port(self): tags = [{'scope': 'a', 'tag': 'b'}] tier0_uuid = uuidutils.generate_uuid() tier1_uuid = uuidutils.generate_uuid() with mock.patch.object(self.nsxlib.router._router_port_client, 'create') as port_create: tier0_link_port = mock.MagicMock() tier1_link_port = mock.MagicMock() port_create.side_effect = [tier0_link_port, tier1_link_port] self.assertEqual( (tier0_link_port, tier1_link_port), self.nsxlib.router.add_router_link_port( tier1_uuid, tier0_uuid, tags)) self.assertEqual(port_create.call_count, 2) def test_remove_router_link_port(self): tier1_uuid = uuidutils.generate_uuid() with mock.patch.object( self.nsxlib.router._router_port_client, 'get_tier1_link_port', return_value=test_constants.FAKE_ROUTER_LINKT1_PORT) as port_get,\ mock.patch.object(self.nsxlib.router._router_port_client, 'delete') as port_delete: self.nsxlib.router.remove_router_link_port(tier1_uuid) self.assertEqual(port_get.call_count, 1) self.assertEqual(port_delete.call_count, 2) def test_add_centralized_service_port(self): logical_router_id = uuidutils.generate_uuid() logical_port_id = uuidutils.generate_uuid() display_name = mock.Mock() tags = mock.Mock() address_groups = mock.Mock() port = mock.Mock() with mock.patch.object( self.nsxlib.router._router_port_client, 'create', return_value=port) as create_port: csp = self.nsxlib.router.add_centralized_service_port( logical_router_id, display_name=display_name, tags=tags, logical_port_id=logical_port_id, address_groups=address_groups) create_port.assert_called_once_with( logical_router_id, display_name=display_name, tags=tags, logical_port_id=logical_port_id, address_groups=address_groups, resource_type=nsx_constants.LROUTERPORT_CENTRALIZED) self.assertEqual(csp, port) def test_create_logical_router_intf_port_by_ls_id(self): logical_router_id = uuidutils.generate_uuid() display_name = 'dummy' tags = [] ls_id = uuidutils.generate_uuid() logical_switch_port_id = uuidutils.generate_uuid() address_groups = [] with mock.patch.object( self.nsxlib.router._router_port_client, "get_by_lswitch_id", side_effect=nsxlib_exc.ResourceNotFound()) as get_port,\ mock.patch.object(self.nsxlib.router._router_port_client, "create") as create_port: self.nsxlib.router.create_logical_router_intf_port_by_ls_id( logical_router_id, display_name, tags, ls_id, logical_switch_port_id, address_groups) get_port.assert_called_once_with(ls_id) create_port.assert_called_once_with( logical_router_id, display_name, tags, nsx_constants.LROUTERPORT_DOWNLINK, logical_switch_port_id, address_groups, urpf_mode=None, relay_service_uuid=None) def test_add_fip_nat_rules(self): with mock.patch.object(self.nsxlib.logical_router, "add_nat_rule") as add_rule: self.nsxlib.router.add_fip_nat_rules( test_constants.FAKE_ROUTER_UUID, '1.1.1.1', '2.2.2.2') self.assertEqual(add_rule.call_count, 2) def test_get_tier0_router_tz(self): tier0_uuid = uuidutils.generate_uuid() self.nsxlib.feature_supported = mock.MagicMock() self.nsxlib.feature_supported.return_value = False with mock.patch.object(self.nsxlib.router._router_client, 'get', return_value=test_constants.FAKE_TIERO_ROUTER),\ mock.patch.object(self.nsxlib.edge_cluster, 'get', return_value=test_constants.FAKE_EDGE_CLUSTER),\ mock.patch.object(self.nsxlib.transport_node, 'get', return_value=test_constants.FAKE_TRANS_NODE): tzs = self.nsxlib.router.get_tier0_router_tz(tier0_uuid) self.assertEqual(tzs, [test_constants.FAKE_TZ_UUID]) def test_get_tier0_router_overlay_tz(self): tier0_uuid = uuidutils.generate_uuid() self.nsxlib.feature_supported = mock.MagicMock() self.nsxlib.feature_supported.return_value = False with mock.patch.object(self.nsxlib.router._router_client, 'get', return_value=test_constants.FAKE_TIERO_ROUTER),\ mock.patch.object(self.nsxlib.edge_cluster, 'get', return_value=test_constants.FAKE_EDGE_CLUSTER),\ mock.patch.object(self.nsxlib.transport_node, 'get', return_value=test_constants.FAKE_TRANS_NODE),\ mock.patch.object(self.nsxlib.transport_zone, 'get_transport_type', return_value="OVERLAY"): tz = self.nsxlib.router.get_tier0_router_overlay_tz(tier0_uuid) self.assertEqual(tz, test_constants.FAKE_TZ_UUID) def test_get_tier0_router_overlay_tz_via_advanced_config(self): tier0_uuid = uuidutils.generate_uuid() with mock.patch.object(self.nsxlib.router._router_client, 'get', return_value=test_constants.FAKE_TIERO_ROUTER): tz = self.nsxlib.router.get_tier0_router_overlay_tz(tier0_uuid) self.assertEqual(tz, test_constants.FAKE_TZ_UUID) def test_get_connected_t0_transit_net(self): t1_uuid = uuidutils.generate_uuid() transit_net = '1.1.1.0' link_port = { 'subnets': [{'ip_addresses': [transit_net], 'prefix_length': '31'}]} with mock.patch.object(self.nsxlib.router._router_port_client, 'get_tier1_link_port', return_value=link_port) as get_port: net = self.nsxlib.router.get_connected_t0_transit_net(t1_uuid) get_port.assert_called_with(t1_uuid) self.assertEqual('%s' % (transit_net), net) vmware-nsxlib-15.0.6/vmware_nsxlib/tests/unit/v3/test_resources.py0000664000175000017500000032015213623151571025406 0ustar zuulzuul00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import copy import eventlet import mock from oslo_serialization import jsonutils from oslo_utils import uuidutils from vmware_nsxlib.tests.unit.v3 import mocks from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.tests.unit.v3 import test_client from vmware_nsxlib.tests.unit.v3 import test_constants from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import resources from vmware_nsxlib.v3 import utils class BaseTestResource(nsxlib_testcase.NsxClientTestCase): """Base class for resources tests Contains tests for the simple get/list/delete apis and an api to get the mocked resource """ def setUp(self, resource=None): self.resource = resource super(BaseTestResource, self).setUp() def get_mocked_resource(self, mock_validate=True, response=None, response_repeat=1): session_response = None if response: session_response = mocks.MockRequestsResponse( 200, jsonutils.dumps(response)) if response_repeat > 1: session_response = [session_response] * response_repeat return self.mocked_resource( self.resource, mock_validate=mock_validate, session_response=session_response) def test_get_resource(self): if not self.resource: return mocked_resource = self.get_mocked_resource() fake_uuid = uuidutils.generate_uuid() mocked_resource.get(fake_uuid) test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, fake_uuid), headers=self.default_headers()) def test_list_all(self): if not self.resource: return mocked_resource = self.get_mocked_resource() mocked_resource.list() test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s' % mocked_resource.uri_segment, headers=self.default_headers()) def test_delete_resource(self, extra_params=None): if not self.resource: return mocked_resource = self.get_mocked_resource() fake_uuid = uuidutils.generate_uuid() mocked_resource.delete(fake_uuid) uri = 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, fake_uuid) if extra_params: uri = uri + '?' + extra_params test_client.assert_json_call( 'delete', mocked_resource, uri, headers=self.default_headers()) class TestSwitchingProfileTestCase(BaseTestResource): def setUp(self): self.types = resources.SwitchingProfileTypes super(TestSwitchingProfileTestCase, self).setUp( resources.SwitchingProfile) def test_switching_profile_create(self): mocked_resource = self.get_mocked_resource() mocked_resource.create(self.types.PORT_MIRRORING, 'pm-profile', 'port mirror prof') test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles', data=jsonutils.dumps({ 'resource_type': self.types.PORT_MIRRORING, 'display_name': 'pm-profile', 'description': 'port mirror prof' }, sort_keys=True), headers=self.default_headers()) def test_switching_profile_update(self): tags = [ { 'scope': 'os-project-id', 'tag': 'project-1' }, { 'scope': 'os-api-version', 'tag': '2.1.1.0' } ] mocked_resource = self.get_mocked_resource() fake_uuid = uuidutils.generate_uuid() mocked_resource.update( fake_uuid, self.types.PORT_MIRRORING, tags=tags) test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles/%s' % fake_uuid, data=jsonutils.dumps({ 'resource_type': self.types.PORT_MIRRORING, 'tags': tags }, sort_keys=True), headers=self.default_headers()) def test_spoofgaurd_profile_create(self): tags = [ { 'scope': 'os-project-id', 'tag': 'project-1' }, { 'scope': 'os-api-version', 'tag': '2.1.1.0' } ] mocked_resource = self.get_mocked_resource() mocked_resource.create_spoofguard_profile( 'plugin-spoof', 'spoofguard-for-plugin', whitelist_ports=True, tags=tags) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles', data=jsonutils.dumps({ 'resource_type': self.types.SPOOF_GUARD, 'display_name': 'plugin-spoof', 'description': 'spoofguard-for-plugin', 'white_list_providers': ['LPORT_BINDINGS'], 'tags': tags }, sort_keys=True), headers=self.default_headers()) def test_create_dhcp_profile(self): tags = [ { 'scope': 'os-project-id', 'tag': 'project-1' }, { 'scope': 'os-api-version', 'tag': '2.1.1.0' } ] mocked_resource = self.get_mocked_resource() mocked_resource.create_dhcp_profile( 'plugin-dhcp', 'dhcp-for-plugin', tags=tags) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles', data=jsonutils.dumps({ 'bpdu_filter': { 'enabled': True, 'white_list': [] }, 'resource_type': self.types.SWITCH_SECURITY, 'display_name': 'plugin-dhcp', 'description': 'dhcp-for-plugin', 'tags': tags, 'dhcp_filter': { 'client_block_enabled': True, 'server_block_enabled': False }, 'rate_limits': { 'enabled': False, 'rx_broadcast': 0, 'tx_broadcast': 0, 'rx_multicast': 0, 'tx_multicast': 0 }, 'block_non_ip_traffic': True }, sort_keys=True), headers=self.default_headers()) def test_create_mac_learning_profile(self): tags = [ { 'scope': 'os-project-id', 'tag': 'project-1' }, { 'scope': 'os-api-version', 'tag': '2.1.1.0' } ] mocked_resource = self.get_mocked_resource() mocked_resource.create_mac_learning_profile( 'plugin-mac-learning', 'mac-learning-for-plugin', tags=tags) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles', data=jsonutils.dumps({ 'mac_learning': { 'enabled': True, 'unicast_flooding_allowed': True, }, 'resource_type': self.types.MAC_LEARNING, 'display_name': 'plugin-mac-learning', 'description': 'mac-learning-for-plugin', 'tags': tags, 'mac_change_allowed': True, }, sort_keys=True), headers=self.default_headers()) def test_create_mac_learning_disabled_profile(self): tags = [ { 'scope': 'os-project-id', 'tag': 'project-1' }, { 'scope': 'os-api-version', 'tag': '2.1.1.0' } ] mocked_resource = self.get_mocked_resource() mocked_resource.create_mac_learning_profile( 'plugin-mac-learning', 'mac-learning-for-plugin', mac_learning_enabled=False, tags=tags) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles', data=jsonutils.dumps({ 'mac_learning': { 'enabled': False, 'unicast_flooding_allowed': False, }, 'resource_type': self.types.MAC_LEARNING, 'display_name': 'plugin-mac-learning', 'description': 'mac-learning-for-plugin', 'tags': tags, 'mac_change_allowed': True, }, sort_keys=True), headers=self.default_headers()) def test_find_by_display_name(self): resp_resources = { 'results': [ {'display_name': 'resource-1'}, {'display_name': 'resource-2'}, {'display_name': 'resource-3'} ] } mocked_resource = self.get_mocked_resource(response=resp_resources, response_repeat=3) self.assertEqual([{'display_name': 'resource-1'}], mocked_resource.find_by_display_name('resource-1')) self.assertEqual([{'display_name': 'resource-2'}], mocked_resource.find_by_display_name('resource-2')) self.assertEqual([{'display_name': 'resource-3'}], mocked_resource.find_by_display_name('resource-3')) resp_resources = { 'results': [ {'display_name': 'resource-1'}, {'display_name': 'resource-1'}, {'display_name': 'resource-1'} ] } mocked_resource = self.get_mocked_resource(response=resp_resources) self.assertEqual(resp_resources['results'], mocked_resource.find_by_display_name('resource-1')) def test_list_all(self): mocked_resource = self.get_mocked_resource() mocked_resource.list() test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/switching-profiles/' '?include_system_owned=True', data=None, headers=self.default_headers()) class LogicalPortTestCase(BaseTestResource): def setUp(self): super(LogicalPortTestCase, self).setUp(resources.LogicalPort) def _get_profile_dicts(self, fake_port): fake_profile_dicts = [] for profile_id in fake_port['switching_profile_ids']: fake_profile_dicts.append({'resource_type': profile_id['key'], 'id': profile_id['value']}) return fake_profile_dicts def _get_extra_config_dicts(self, fake_port): fake_extra_config_dicts = fake_port['extra_configs'] return fake_extra_config_dicts def _get_pktcls_bindings(self): fake_pkt_classifiers = [] fake_binding_repr = [] for i in range(0, 3): ip = "9.10.11.%s" % i mac = "00:0c:29:35:4a:%sc" % i fake_pkt_classifiers.append(resources.PacketAddressClassifier( ip, mac, None)) fake_binding_repr.append({ 'ip_address': ip, 'mac_address': mac }) return fake_pkt_classifiers, fake_binding_repr def test_create_logical_port(self): """Test creating a port. returns the correct response and 200 status """ fake_port = test_constants.FAKE_PORT.copy() profile_dicts = self._get_profile_dicts(fake_port) pkt_classifiers, binding_repr = self._get_pktcls_bindings() mocked_resource = self.get_mocked_resource() description = 'dummy' switch_profile = resources.SwitchingProfile mocked_resource.create( fake_port['logical_switch_id'], fake_port['attachment']['id'], address_bindings=pkt_classifiers, switch_profile_ids=switch_profile.build_switch_profile_ids( mock.Mock(), *profile_dicts), description=description) resp_body = { 'logical_switch_id': fake_port['logical_switch_id'], 'switching_profile_ids': fake_port['switching_profile_ids'], 'attachment': { 'attachment_type': 'VIF', 'id': fake_port['attachment']['id'] }, 'admin_state': 'UP', 'address_bindings': binding_repr, 'description': description } test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports', data=jsonutils.dumps(resp_body, sort_keys=True), headers=self.default_headers()) def test_create_logical_port_with_attachtype_cif(self): """Test creating a port returns the correct response and 200 status """ fake_port = copy.deepcopy(test_constants.FAKE_CONTAINER_PORT) profile_dicts = self._get_profile_dicts(fake_port) pkt_classifiers, binding_repr = self._get_pktcls_bindings() fake_port['address_bindings'] = binding_repr mocked_resource = self.get_mocked_resource() switch_profile = resources.SwitchingProfile fake_port_ctx = fake_port['attachment']['context'] fake_container_host_vif_id = fake_port_ctx['container_host_vif_id'] mocked_resource.create( fake_port['logical_switch_id'], fake_port['attachment']['id'], parent_vif_id=fake_container_host_vif_id, traffic_tag=fake_port_ctx['vlan_tag'], address_bindings=pkt_classifiers, switch_profile_ids=switch_profile.build_switch_profile_ids( mock.Mock(), *profile_dicts), vif_type=fake_port_ctx['vif_type'], app_id=fake_port_ctx['app_id'], allocate_addresses=fake_port_ctx['allocate_addresses']) resp_body = { 'logical_switch_id': fake_port['logical_switch_id'], 'switching_profile_ids': fake_port['switching_profile_ids'], 'attachment': { 'attachment_type': 'VIF', 'id': fake_port['attachment']['id'], 'context': { 'resource_type': 'VifAttachmentContext', 'allocate_addresses': 'Both', 'parent_vif_id': fake_container_host_vif_id, 'traffic_tag': fake_port_ctx['vlan_tag'], 'app_id': fake_port_ctx['app_id'], 'vif_type': 'CHILD', } }, 'admin_state': 'UP', 'address_bindings': fake_port['address_bindings'] } test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports', data=jsonutils.dumps(resp_body, sort_keys=True), headers=self.default_headers()) def test_create_logical_port_admin_down(self): """Test creating port with admin_state down.""" fake_port = test_constants.FAKE_PORT fake_port['admin_state'] = "DOWN" mocked_resource = self.get_mocked_resource(response=fake_port) result = mocked_resource.create( test_constants.FAKE_PORT['logical_switch_id'], test_constants.FAKE_PORT['attachment']['id'], tags={}, admin_state=False) self.assertEqual(fake_port, result) def test_create_logical_port_with_tn_uuid(self): """Test creating port with transport_node_uuid.""" fake_port = copy.deepcopy(test_constants.FAKE_CONTAINER_PORT) fake_port['parent_vif_id'] = None fake_port_ctx = fake_port['attachment']['context'] fake_port_ctx['vif_type'] = 'INDEPENDENT' fake_port_ctx['transport_node_uuid'] = test_constants.FAKE_TN_UUID profile_dicts = self._get_profile_dicts(fake_port) pkt_classifiers, binding_repr = self._get_pktcls_bindings() fake_port['address_bindings'] = binding_repr mocked_resource = self.get_mocked_resource() switch_profile = resources.SwitchingProfile mocked_resource.create( fake_port['logical_switch_id'], fake_port['attachment']['id'], traffic_tag=fake_port_ctx['vlan_tag'], address_bindings=pkt_classifiers, switch_profile_ids=switch_profile.build_switch_profile_ids( mock.Mock(), *profile_dicts), vif_type=fake_port_ctx['vif_type'], app_id=fake_port_ctx['app_id'], allocate_addresses=fake_port_ctx['allocate_addresses'], tn_uuid=fake_port_ctx['transport_node_uuid']) resp_body = { 'logical_switch_id': fake_port['logical_switch_id'], 'switching_profile_ids': fake_port['switching_profile_ids'], 'attachment': { 'attachment_type': 'VIF', 'id': fake_port['attachment']['id'], 'context': { 'resource_type': 'VifAttachmentContext', 'allocate_addresses': 'Both', 'app_id': fake_port_ctx['app_id'], 'vif_type': 'INDEPENDENT', 'transport_node_uuid': test_constants.FAKE_TN_UUID, } }, 'admin_state': 'UP', 'address_bindings': fake_port['address_bindings'] } test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports', data=jsonutils.dumps(resp_body, sort_keys=True), headers=self.default_headers()) def test_delete_resource(self): """Test deleting port.""" super(LogicalPortTestCase, self).test_delete_resource( extra_params='detach=true') def test_get_logical_port_by_attachment(self): """Test deleting port.""" mocked_resource = self.get_mocked_resource() attachment_type = nsx_constants.ATTACHMENT_DHCP attachment_id = '1234' mocked_resource.get_by_attachment(attachment_type, attachment_id) test_client.assert_json_call( 'get', mocked_resource, "https://1.2.3.4/api/v1/logical-ports/?attachment_type=%s" "&attachment_id=%s" % (attachment_type, attachment_id), headers=self.default_headers()) def test_get_logical_port_by_switch(self): """Test deleting port.""" ls_id = '111' mocked_resource = self.get_mocked_resource() mocked_resource.get_by_logical_switch(ls_id) test_client.assert_json_call( 'get', mocked_resource, "https://1.2.3.4/api/v1/logical-ports/?logical_switch_id" "=%s" % ls_id, headers=self.default_headers()) def test_clear_port_bindings(self): fake_port = copy.copy(test_constants.FAKE_PORT) fake_port['address_bindings'] = ['a', 'b'] mocked_resource = self.get_mocked_resource() def get_fake_port(*args, **kwargs): return copy.copy(fake_port) mocked_resource.client.get = get_fake_port mocked_resource.update( fake_port['id'], fake_port['attachment']['id'], address_bindings=[]) fake_port['address_bindings'] = [] test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'], data=jsonutils.dumps(fake_port, sort_keys=True), headers=self.default_headers()) def test_create_logical_port_fail(self): """Test the failure of port creation.""" fake_port = test_constants.FAKE_PORT.copy() profile_dicts = self._get_profile_dicts(fake_port) pkt_classifiers, binding_repr = self._get_pktcls_bindings() fake_port['address_bindings'] = binding_repr mocked_resource = self.get_mocked_resource(mock_validate=False) switch_profile = resources.SwitchingProfile try: mocked_resource.create( fake_port['logical_switch_id'], fake_port['attachment']['id'], address_bindings=pkt_classifiers, switch_profile_ids=switch_profile.build_switch_profile_ids( mock.Mock(), *profile_dicts)) except exceptions.ManagerError as e: self.assertIn(nsxlib_testcase.NSX_MANAGER, e.msg) def test_update_logical_port_no_addr_binding(self): fake_port = copy.deepcopy(test_constants.FAKE_CONTAINER_PORT) mocked_resource = self.get_mocked_resource() new_name = 'updated_port' new_desc = 'updated' fake_port_ctx = fake_port['attachment']['context'] fake_container_host_vif_id = fake_port_ctx['container_host_vif_id'] def get_fake_port(*args, **kwargs): return copy.copy(fake_port) mocked_resource.client.get = get_fake_port mocked_resource.update( fake_port['id'], fake_port['attachment']['id'], name=new_name, description=new_desc, parent_vif_id=fake_container_host_vif_id, traffic_tag=fake_port_ctx['vlan_tag'], vif_type=fake_port_ctx['vif_type'], app_id=fake_port_ctx['app_id'], allocate_addresses=fake_port_ctx['allocate_addresses']) fake_port['display_name'] = new_name fake_port['description'] = new_desc fake_port['attachment'] = { 'attachment_type': 'VIF', 'id': fake_port['attachment']['id'], 'context': { 'resource_type': 'VifAttachmentContext', 'allocate_addresses': 'Both', 'parent_vif_id': fake_container_host_vif_id, 'traffic_tag': fake_port_ctx['vlan_tag'], 'app_id': fake_port_ctx['app_id'], 'vif_type': 'CHILD', } } test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'], data=jsonutils.dumps(fake_port, sort_keys=True), headers=self.default_headers()) def test_update_logical_port_with_addr_binding(self): fake_port = copy.deepcopy(test_constants.FAKE_CONTAINER_PORT) mocked_resource = self.get_mocked_resource() new_name = 'updated_port' new_desc = 'updated' fake_port_ctx = fake_port['attachment']['context'] fake_container_host_vif_id = fake_port_ctx['container_host_vif_id'] pkt_classifiers, binding_repr = self._get_pktcls_bindings() def get_fake_port(*args, **kwargs): return copy.copy(fake_port) mocked_resource.client.get = get_fake_port mocked_resource.update( fake_port['id'], fake_port['attachment']['id'], name=new_name, description=new_desc, parent_vif_id=fake_container_host_vif_id, traffic_tag=fake_port_ctx['vlan_tag'], vif_type=fake_port_ctx['vif_type'], app_id=fake_port_ctx['app_id'], allocate_addresses=fake_port_ctx['allocate_addresses'], address_bindings=pkt_classifiers) fake_port['display_name'] = new_name fake_port['description'] = new_desc fake_port['attachment'] = { 'attachment_type': 'VIF', 'id': fake_port['attachment']['id'], 'context': { 'resource_type': 'VifAttachmentContext', 'allocate_addresses': 'Both', 'parent_vif_id': fake_container_host_vif_id, 'traffic_tag': fake_port_ctx['vlan_tag'], 'app_id': fake_port_ctx['app_id'], 'vif_type': 'CHILD', } } fake_port['address_bindings'] = binding_repr test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'], data=jsonutils.dumps(fake_port, sort_keys=True), headers=self.default_headers()) def test_update_logical_port_with_extra_configs(self): fake_port = copy.deepcopy(test_constants.FAKE_CONTAINER_PORT) mocked_resource = self.get_mocked_resource() new_name = 'updated_port' new_desc = 'updated' fake_port_ctx = fake_port['attachment']['context'] fake_container_host_vif_id = fake_port_ctx['container_host_vif_id'] extra_configs = self._get_extra_config_dicts(fake_port) def get_fake_port(*args, **kwargs): return copy.copy(fake_port) mocked_resource.client.get = get_fake_port mocked_resource.update( fake_port['id'], fake_port['attachment']['id'], name=new_name, description=new_desc, parent_vif_id=fake_container_host_vif_id, traffic_tag=fake_port_ctx['vlan_tag'], vif_type=fake_port_ctx['vif_type'], app_id=fake_port_ctx['app_id'], allocate_addresses=fake_port_ctx['allocate_addresses'], extra_configs=extra_configs) fake_port['display_name'] = new_name fake_port['description'] = new_desc fake_port['attachment'] = { 'attachment_type': 'VIF', 'id': fake_port['attachment']['id'], 'context': { 'resource_type': 'VifAttachmentContext', 'allocate_addresses': 'Both', 'parent_vif_id': fake_container_host_vif_id, 'traffic_tag': fake_port_ctx['vlan_tag'], 'app_id': fake_port_ctx['app_id'], 'vif_type': 'CHILD', } } test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'], data=jsonutils.dumps(fake_port, sort_keys=True), headers=self.default_headers()) def test_update_port_tags_update_add(self): """Test adding / removing tags using tags_update attribute""" fake_port = copy.deepcopy(test_constants.FAKE_PORT) orig_tags = [{'scope': 'a1', 'tag': 'b1'}, {'scope': 'a2', 'tag': 'b2'}] fake_port['tags'] = orig_tags # Add a new tag tags_update = [{'scope': 'a3', 'tag': 'b3'}] mocked_resource = self.get_mocked_resource() def get_fake_port(*args, **kwargs): return copy.copy(fake_port) mocked_resource.client.get = get_fake_port mocked_resource.update( fake_port['id'], fake_port['attachment']['id'], tags_update=tags_update) # update expected result: fake_port['tags'] = orig_tags + tags_update test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'], data=jsonutils.dumps(fake_port, sort_keys=True), headers=self.default_headers()) def test_update_port_tags_update_remove(self): """Test adding / removing tags using tags_update attribute""" fake_port = copy.deepcopy(test_constants.FAKE_PORT) orig_tags = [{'scope': 'a1', 'tag': 'b1'}, {'scope': 'a2', 'tag': 'b2'}] fake_port['tags'] = orig_tags # Add a new tag tags_update = [{'scope': 'a1', 'tag': None}] mocked_resource = self.get_mocked_resource() def get_fake_port(*args, **kwargs): return copy.copy(fake_port) mocked_resource.client.get = get_fake_port mocked_resource.update( fake_port['id'], fake_port['attachment']['id'], tags_update=tags_update) # update expected result: fake_port['tags'] = [{'scope': 'a2', 'tag': 'b2'}] test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'], data=jsonutils.dumps(fake_port, sort_keys=True), headers=self.default_headers()) def test_update_port_tags(self): """Test modifying tags using tags attribute""" fake_port = copy.deepcopy(test_constants.FAKE_PORT) orig_tags = [{'scope': 'a1', 'tag': 'b1'}, {'scope': 'a2', 'tag': 'b2'}] fake_port['tags'] = orig_tags # Add a new tag new_tags = [{'scope': 'a3', 'tag': 'b3'}] mocked_resource = self.get_mocked_resource() def get_fake_port(*args, **kwargs): return copy.copy(fake_port) mocked_resource.client.get = get_fake_port mocked_resource.update( fake_port['id'], fake_port['attachment']['id'], tags=new_tags) # update expected result: fake_port['tags'] = new_tags test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'], data=jsonutils.dumps(fake_port, sort_keys=True), headers=self.default_headers()) def test_update_port_with_force_true(self): """Test modifying tags using tags attribute with force true""" fake_port = copy.deepcopy(test_constants.FAKE_PORT) orig_tags = [{'scope': 'a1', 'tag': 'b1'}, {'scope': 'a2', 'tag': 'b2'}] fake_port['tags'] = orig_tags # Add a new tag new_tags = [{'scope': 'a3', 'tag': 'b3'}] mocked_resource = self.get_mocked_resource() def get_fake_port(*args, **kwargs): return copy.copy(fake_port) mocked_resource.client.get = get_fake_port mocked_resource.update( fake_port['id'], fake_port['attachment']['id'], tags=new_tags, force=True) # update expected result: fake_port['tags'] = new_tags headers = self.default_headers() headers['X-Allow-Overwrite'] = 'true' test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'], data=jsonutils.dumps(fake_port, sort_keys=True), headers=headers) class LogicalRouterTestCase(BaseTestResource): def setUp(self): super(LogicalRouterTestCase, self).setUp( core_resources.NsxLibLogicalRouter) def test_create_logical_router_v1_1(self): """Test creating a router returns the correct response and 201 status. """ fake_router = test_constants.FAKE_ROUTER.copy() router = self.get_mocked_resource() tier0_router = True description = 'dummy' tz_id = 'tz_id' failover_mode = 'PREEMPTIVE' allocation_pool = { 'allocation_pool_type': 'LoadBalancerAllocationPool', 'allocation_size': 'SMALL' } with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='1.1.0'): router.create(fake_router['display_name'], None, None, tier0_router, description=description, transport_zone_id=tz_id, allocation_pool=allocation_pool, failover_mode=failover_mode) data = { 'display_name': fake_router['display_name'], 'router_type': 'TIER0' if tier0_router else 'TIER1', 'tags': None, 'description': description, 'advanced_config': {'transport_zone_id': tz_id}, 'failover_mode': failover_mode, 'allocation_profile': { 'allocation_pool': allocation_pool } } test_client.assert_json_call( 'post', router, 'https://1.2.3.4/api/v1/logical-routers', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_logical_router(self): """Test creating a router returns the correct response and 201 status. """ fake_router = test_constants.FAKE_ROUTER.copy() router = self.get_mocked_resource() tier0_router = True description = 'dummy' tz_id = 'tz_id' allocation_pool = { 'allocation_pool_type': 'LoadBalancerAllocationPool', 'allocation_size': 'SMALL' } enable_standby_relocation = True with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.4.0'): router.create(fake_router['display_name'], None, None, tier0_router, description=description, transport_zone_id=tz_id, allocation_pool=allocation_pool, enable_standby_relocation=enable_standby_relocation) data = { 'display_name': fake_router['display_name'], 'router_type': 'TIER0' if tier0_router else 'TIER1', 'tags': None, 'description': description, 'advanced_config': {'transport_zone_id': tz_id}, 'allocation_profile': { 'allocation_pool': allocation_pool, 'enable_standby_relocation': enable_standby_relocation } } test_client.assert_json_call( 'post', router, 'https://1.2.3.4/api/v1/logical-routers', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_update_logical_router(self): fake_router = test_constants.FAKE_ROUTER.copy() router = self.get_mocked_resource() uuid = fake_router['id'] name = 'dummy' description = 'dummy' edge_cluster_id = 'ec_id' tz_id = 'tz_id' enable_standby_relocation = True with mock.patch.object(router.client, 'get', return_value=fake_router),\ mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.4.0'): router.update(uuid, display_name=name, description=description, edge_cluster_id=edge_cluster_id, transport_zone_id=tz_id, enable_standby_relocation=enable_standby_relocation) fake_router["display_name"] = name fake_router["description"] = description fake_router["edge_cluster_id"] = edge_cluster_id fake_router["advanced_config"]['transport_zone_id'] = tz_id test_client.assert_json_call( 'put', router, 'https://1.2.3.4/api/v1/logical-routers/%s' % uuid, data=jsonutils.dumps(fake_router, sort_keys=True), headers=self.default_headers()) def test_force_delete_logical_router(self): """Test force deleting router""" router = self.get_mocked_resource() uuid = test_constants.FAKE_ROUTER['id'] router.delete(uuid, True) test_client.assert_json_call( 'delete', router, 'https://1.2.3.4/api/v1/logical-routers/%s?force=True' % uuid, headers=self.default_headers()) def test_list_logical_router_by_type(self): router = self.get_mocked_resource() router_type = 'TIER0' router.list(router_type=router_type) test_client.assert_json_call( 'get', router, 'https://1.2.3.4/api/v1/logical-routers?router_type=%s' % router_type) def test_get_logical_router_fw_section(self): fake_router = test_constants.FAKE_ROUTER.copy() router = self.get_mocked_resource() section_id = router.get_firewall_section_id( test_constants.FAKE_ROUTER_UUID, router_body=fake_router) self.assertEqual(test_constants.FAKE_ROUTER_FW_SEC_UUID, section_id) def _test_nat_rule_create(self, nsx_version, add_bypas_arg=True, action='SNAT', expect_failure=False): router = self.get_mocked_resource() translated_net = '1.1.1.1' priority = 10 display_name = 'fake_name' data = { 'action': action, 'display_name': display_name, 'enabled': True, 'translated_network': translated_net, 'rule_priority': priority } if add_bypas_arg: # Expect nat_pass to be sent to the backend data['nat_pass'] = False # Ignoring 'bypass_firewall' with version 1.1 with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value=nsx_version): try: router.add_nat_rule(test_constants.FAKE_ROUTER_UUID, action=action, translated_network=translated_net, rule_priority=priority, bypass_firewall=False, display_name=display_name) except exceptions.InvalidInput as e: if expect_failure: return else: self.fail("Failed to create NAT rule: %s", e) test_client.assert_json_call( 'post', router, ('https://1.2.3.4/api/v1/logical-routers/%s/nat/rules' % test_constants.FAKE_ROUTER_UUID), data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_nat_rule_create_v1(self): # Ignoring 'bypass_firewall' with version 1.1 self._test_nat_rule_create('1.1.0', add_bypas_arg=False) def test_nat_rule_create_v2(self): # Sending 'bypass_firewall' with version 1.1 self._test_nat_rule_create('2.0.0') def test_nat_rule_create_v22_NO_DNAT(self): # NO_DNAT is supported from 2.2 & up self._test_nat_rule_create('2.2.0', action='NO_DNAT') def test_nat_rule_create_v2_NO_DNAT(self): # NO_DNAT is supported from 2.2 & up self._test_nat_rule_create('2.0.0', action='NO_DNAT', expect_failure=True) def test_nat_rule_create_invalid(self): # NO_DNAT is supported from 2.2 & up self._test_nat_rule_create('2.0.0', action='INVALID', expect_failure=True) def test_nat_rule_list(self): router = self.get_mocked_resource() router.list_nat_rules(test_constants.FAKE_ROUTER_UUID) test_client.assert_json_call( 'get', router, ('https://1.2.3.4/api/v1/logical-routers/%s/nat/rules' % test_constants.FAKE_ROUTER_UUID), headers=self.default_headers()) def test_nat_rule_update(self): router = self.get_mocked_resource() rule_id = '123' with mock.patch.object(router.client, 'get', return_value={'id': rule_id}): router.update_nat_rule(test_constants.FAKE_ROUTER_UUID, rule_id, nat_pass=False) data = {'id': rule_id, 'nat_pass': False} test_client.assert_json_call( 'put', router, ('https://1.2.3.4/api/v1/logical-routers/%s/nat/rules/%s' % (test_constants.FAKE_ROUTER_UUID, rule_id)), data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_delete_nat_rule_by_gw(self): router = self.get_mocked_resource() rule_id = '123' router_id = test_constants.FAKE_ROUTER_UUID gw_ip = '3.3.3.3' existing_rules = [{ 'translated_network': gw_ip, 'logical_router_id': router_id, 'id': rule_id, 'action': 'SNAT', 'resource_type': 'NatRule'}] with mock.patch.object(router.client, 'list', return_value={'results': existing_rules}): router.delete_nat_rule_by_values(router_id, translated_network=gw_ip) test_client.assert_json_call( 'delete', router, ('https://1.2.3.4/api/v1/logical-routers/%s/nat/rules/%s' % (router_id, rule_id)), headers=self.default_headers()) def test_delete_nat_rule_by_gw_and_source(self): router = self.get_mocked_resource() rule_id = '123' router_id = test_constants.FAKE_ROUTER_UUID gw_ip = '3.3.3.3' source_net = '4.4.4.4' existing_rules = [{ 'translated_network': gw_ip, 'logical_router_id': router_id, 'id': rule_id, 'match_source_network': source_net, 'action': 'SNAT', 'resource_type': 'NatRule'}] with mock.patch.object(router.client, 'list', return_value={'results': existing_rules}): router.delete_nat_rule_by_values(router_id, translated_network=gw_ip, match_source_network=source_net) test_client.assert_json_call( 'delete', router, ('https://1.2.3.4/api/v1/logical-routers/%s/nat/rules/%s' % (router_id, rule_id)), headers=self.default_headers()) def test_change_edge_firewall(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID router.change_edge_firewall_status(router_id, nsx_constants.FW_DISABLE) test_client.assert_json_call( 'post', router, ('https://1.2.3.4/api/v1/firewall/status/logical_routers/%s' '?action=%s' % (router_id, nsx_constants.FW_DISABLE)), headers=self.default_headers()) def test_add_static_route(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID dest_cidr = '10.0.0.1/24' next_hop = '1.1.1.1' tags = [{'mock_tags'}] expected_payload = {'network': dest_cidr, 'next_hops': [{'ip_address': next_hop}], 'tags': tags} router.add_static_route(router_id, dest_cidr, next_hop, tags=tags) test_client.assert_json_call( 'post', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'static-routes' % router_id), data=jsonutils.dumps(expected_payload, sort_keys=True), headers=self.default_headers()) def test_update_advertisement(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID data = {'advertise_nat_routes': 'a', 'advertise_nsx_connected_routes': 'b', 'advertise_static_routes': False, 'enabled': True, 'advertise_lb_vip': False, 'advertise_lb_snat_ip': False} with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.1.0'), \ mock.patch.object(router.client, 'get', return_value={}): router.update_advertisement( router_id, **data) test_client.assert_json_call( 'put', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'advertisement' % router_id), data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_update_advertisement_no_lb(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID data = {'advertise_nat_routes': 'a', 'advertise_nsx_connected_routes': 'b', 'advertise_static_routes': False, 'enabled': True} with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='1.1.0'), \ mock.patch.object(router.client, 'get', return_value={}): # lb args will be ignored on this nsx version router.update_advertisement( router_id, advertise_lb_vip=False, advertise_lb_snat_ip=False, **data) test_client.assert_json_call( 'put', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'advertisement' % router_id), data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_update_advertisement_rules(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID rules = [{"action": "ALLOW", "networks": ["44.0.0.0/20"], "display_name": "rule1"}, {"action": "ALLOW", "networks": ["6.60.0.0/20"], "display_name": "rule2"}] with mock.patch.object(router.client, 'get', return_value={}): router.update_advertisement_rules(router_id, rules) test_client.assert_json_call( 'put', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'advertisement/rules' % router_id), data=jsonutils.dumps({'rules': rules}, sort_keys=True), headers=self.default_headers()) def test_update_advertisement_rules_force(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID rules = [{"action": "ALLOW", "networks": ["44.0.0.0/20"], "display_name": "rule1"}, {"action": "ALLOW", "networks": ["6.60.0.0/20"], "display_name": "rule2"}] headers = self.default_headers() expected_headers = headers.copy() expected_headers['X-Allow-Overwrite'] = 'true' with mock.patch.object(router.client, 'get', return_value={}): router.update_advertisement_rules(router_id, rules, force=True) test_client.assert_json_call( 'put', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'advertisement/rules' % router_id), data=jsonutils.dumps({'rules': rules}, sort_keys=True), headers=expected_headers) def test_update_advertisement_rules_with_replace(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID rule_name_prefix = 'test' orig_rules = [{"action": "ALLOW", "networks": ["44.0.0.0/20"], "display_name": "%s rule1" % rule_name_prefix}, {"action": "ALLOW", "networks": ["6.60.0.0/20"], "display_name": "keep rule2"}] new_rules = [{"action": "ALLOW", "networks": ["99.0.0.0/20"], "display_name": "test rule3"}] expected_rules = [orig_rules[1], new_rules[0]] with mock.patch.object(router.client, 'get', return_value={'rules': orig_rules}): router.update_advertisement_rules(router_id, new_rules, name_prefix=rule_name_prefix) test_client.assert_json_call( 'put', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'advertisement/rules' % router_id), data=jsonutils.dumps({'rules': expected_rules}, sort_keys=True), headers=self.default_headers()) def test_get_advertisement_rules(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID router.get_advertisement_rules(router_id) test_client.assert_json_call( 'get', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'advertisement/rules' % router_id), headers=self.default_headers()) def test_get_debug_info(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID router.get_debug_info(router_id) test_client.assert_json_call( 'get', router, ('https://1.2.3.4/api/v1/logical-routers/%s/' 'debug-info?format=text' % router_id), headers=self.default_headers()) def test_get_transportzone_id_empty(self): # Tier0 router may fail to provide TZ id if it # is not yet connected with any Tier1 router router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID faked_responds = { 'componentInfo': [{ 'componentType': nsx_constants.ROUTER_TYPE_TIER0_DR, 'transportZoneId': None }] } with mock.patch.object(router.client, 'get', return_value=faked_responds): res = router.get_transportzone_id(router_id) self.assertIsNone(res) def _test_get_transportzone_id(self, router_type): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID faked_responds = { 'componentInfo': [{ 'componentType': router_type, 'transportZoneId': ['faked_id'] }] } with mock.patch.object(router.client, 'get', return_value=faked_responds): res = router.get_transportzone_id(router_id) self.assertEqual('faked_id', res) def test_get_transportzone_id_from_t0(self): self._test_get_transportzone_id(nsx_constants.ROUTER_TYPE_TIER0_DR) def test_get_transportzone_id_from_t1(self): self._test_get_transportzone_id(nsx_constants.ROUTER_TYPE_TIER1_DR) def test_get_redistribution(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID router.get_redistribution(router_id) test_client.assert_json_call( 'get', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'redistribution' % router_id), headers=self.default_headers()) def test_get_redistribution_rules(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID router.get_redistribution_rules(router_id) test_client.assert_json_call( 'get', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'redistribution/rules' % router_id), headers=self.default_headers()) def test_update_redistribution_rules(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID rules = mock.Mock() with mock.patch.object(router.client, 'get', return_value={}): router.update_redistribution_rules(router_id, rules) test_client.assert_json_call( 'put', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'redistribution/rules' % router_id), data=jsonutils.dumps({'rules': rules}), headers=self.default_headers()) def test_get_bgp_config(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID router.get_bgp_config(router_id) test_client.assert_json_call( 'get', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/bgp' % router_id), headers=self.default_headers()) def test_get_route_map(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID route_map_id = 'fake_route_map' router.get_route_map(router_id, route_map_id) test_client.assert_json_call( 'get', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/route-maps/%s' % (router_id, route_map_id)), headers=self.default_headers()) def test_get_ip_prefix_list(self): router = self.get_mocked_resource() router_id = test_constants.FAKE_ROUTER_UUID ip_prefix_list_id = 'fake_ip_prefix_list' router.get_ip_prefix_list(router_id, ip_prefix_list_id) test_client.assert_json_call( 'get', router, ('https://1.2.3.4/api/v1/logical-routers/%s/routing/' 'ip-prefix-lists/%s' % (router_id, ip_prefix_list_id)), headers=self.default_headers()) class LogicalRouterPortTestCase(BaseTestResource): def setUp(self): super(LogicalRouterPortTestCase, self).setUp( resources.LogicalRouterPort) def test_create_logical_router_port(self): """Test creating a router port. returns the correct response and 201 status """ fake_router_port = test_constants.FAKE_ROUTER_PORT.copy() fake_relay_uuid = uuidutils.generate_uuid() lrport = self.get_mocked_resource() data = { 'display_name': fake_router_port['display_name'], 'logical_router_id': fake_router_port['logical_router_id'], 'resource_type': fake_router_port['resource_type'], 'tags': [], 'service_bindings': [{'service_id': { 'target_type': 'LogicalService', 'target_id': fake_relay_uuid}}], 'linked_logical_switch_port_id': {'target_id': None} } with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.0.0'): lrport.create(fake_router_port['logical_router_id'], fake_router_port['display_name'], None, fake_router_port['resource_type'], None, None, None, relay_service_uuid=fake_relay_uuid) test_client.assert_json_call( 'post', lrport, 'https://1.2.3.4/api/v1/logical-router-ports', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_logical_router_port_max_attempts(self): """Test a router port api has the configured retries.""" lrport = self.get_mocked_resource() self.assertEqual(nsxlib_testcase.NSX_MAX_ATTEMPTS, lrport.client.max_attempts) def test_update_logical_router_port(self): fake_router_port = test_constants.FAKE_ROUTER_PORT.copy() uuid = fake_router_port['id'] fake_relay_uuid = uuidutils.generate_uuid() lrport = self.get_mocked_resource() with mock.patch.object(lrport.client, 'get', return_value=fake_router_port),\ mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.0.0'): lrport.update(uuid, relay_service_uuid=fake_relay_uuid) fake_router_port['service_bindings'] = [{'service_id': { 'target_type': 'LogicalService', 'target_id': fake_relay_uuid}}] test_client.assert_json_call( 'put', lrport, 'https://1.2.3.4/api/v1/logical-router-ports/%s' % uuid, data=jsonutils.dumps(fake_router_port, sort_keys=True), headers=self.default_headers()) def test_get_logical_router_port_by_router_id(self): """Test getting a router port by router id.""" fake_router_port = test_constants.FAKE_ROUTER_PORT.copy() resp_resources = {'results': [fake_router_port]} lrport = self.get_mocked_resource(response=resp_resources) router_id = fake_router_port['logical_router_id'] result = lrport.get_by_router_id(router_id) self.assertEqual(fake_router_port, result[0]) test_client.assert_json_call( 'get', lrport, 'https://1.2.3.4/api/v1/logical-router-ports/?' 'logical_router_id=%s' % router_id, headers=self.default_headers()) def test_get_logical_router_port_by_switch_id(self): """Test getting a router port by switch id.""" fake_router_port = test_constants.FAKE_ROUTER_PORT.copy() resp_resources = { 'result_count': 1, 'results': [fake_router_port] } lrport = self.get_mocked_resource(response=resp_resources) switch_id = test_constants.FAKE_SWITCH_UUID lrport.get_by_lswitch_id(switch_id) test_client.assert_json_call( 'get', lrport, 'https://1.2.3.4/api/v1/logical-router-ports/?' 'logical_switch_id=%s' % switch_id, headers=self.default_headers()) def test_get_tier1_link_port(self): """Test getting a Tier0 router uplink port by router id.""" router_id = test_constants.FAKE_ROUTER_PORT['logical_router_id'] # No ports found - raise an exception lrport = self.get_mocked_resource(response={'results': []}) self.assertRaises(exceptions.ResourceNotFound, lrport.get_tier1_link_port, router_id) # Non uplink ports found - raise an exception lrport = self.get_mocked_resource(response={'results': [ test_constants.FAKE_ROUTER_PORT]}) self.assertRaises(exceptions.ResourceNotFound, lrport.get_tier1_link_port, router_id) # uplink port exists lrport = self.get_mocked_resource(response={'results': [ test_constants.FAKE_ROUTER_LINKT1_PORT]}) result = lrport.get_tier1_link_port(router_id) self.assertEqual(test_constants.FAKE_ROUTER_LINKT1_PORT, result) def test_get_tier0_uplink_port(self): """Test getting a Tier0 router uplink port by router id.""" router_id = test_constants.FAKE_ROUTER_PORT['logical_router_id'] # No ports found - return None lrport = self.get_mocked_resource(response={'results': []}) result = lrport.get_tier0_uplink_port(router_id) self.assertIsNone(result) # Non uplink ports found - return None lrport = self.get_mocked_resource(response={'results': [ test_constants.FAKE_ROUTER_LINKT1_PORT]}) result = lrport.get_tier0_uplink_port(router_id) self.assertIsNone(result) # uplink port exists lrport = self.get_mocked_resource(response={'results': [ test_constants.FAKE_ROUTER_PORT]}) result = lrport.get_tier0_uplink_port(router_id) self.assertEqual(test_constants.FAKE_ROUTER_PORT, result) def test_get_tier0_uplink_port_ips(self): """Test getting a Tier0 router uplink port by router id.""" router_id = test_constants.FAKE_ROUTER_PORT['logical_router_id'] # No ports found - return empty list lrport = self.get_mocked_resource(response={'results': []}) result = lrport.get_tier0_uplink_ips(router_id) self.assertEqual(0, len(result)) # uplink port exists, return ips lrport = self.get_mocked_resource(response={'results': [ test_constants.FAKE_ROUTER_PORT]}) result = lrport.get_tier0_uplink_ips(router_id) self.assertEqual(1, len(result)) self.assertEqual('172.20.1.60', result[0]) class IpPoolTestCase(BaseTestResource): def setUp(self): super(IpPoolTestCase, self).setUp(resources.IpPool) def test_create_ip_pool_all_args(self): """Test creating an IP pool returns the correct response and 201 status """ pool = self.get_mocked_resource() display_name = 'dummy' gateway_ip = '1.1.1.1' ranges = [{'start': '2.2.2.0', 'end': '2.2.2.255'}, {'start': '3.2.2.0', 'end': '3.2.2.255'}] cidr = '2.2.2.0/24' description = 'desc' dns_nameserver = '7.7.7.7' pool.create(cidr, allocation_ranges=ranges, display_name=display_name, gateway_ip=gateway_ip, description=description, dns_nameservers=[dns_nameserver]) data = { 'display_name': display_name, 'description': description, 'subnets': [{ 'gateway_ip': gateway_ip, 'allocation_ranges': ranges, 'cidr': cidr, 'dns_nameservers': [dns_nameserver] }] } test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_ip_pool_minimal_args(self): pool = self.get_mocked_resource() ranges = [{'start': '2.2.2.0', 'end': '2.2.2.255'}, {'start': '3.2.2.0', 'end': '3.2.2.255'}] cidr = '2.2.2.0/24' pool.create(cidr, allocation_ranges=ranges) data = { 'subnets': [{ 'allocation_ranges': ranges, 'cidr': cidr, }] } test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_ip_pool_no_ranges_with_gateway(self): pool = self.get_mocked_resource() cidr = '2.2.2.0/30' gateway_ip = '2.2.2.1' pool.create(cidr, allocation_ranges=None, gateway_ip=gateway_ip) exp_ranges = [{'start': '2.2.2.0', 'end': '2.2.2.0'}, {'start': '2.2.2.2', 'end': '2.2.2.3'}] data = { 'subnets': [{ 'gateway_ip': gateway_ip, 'allocation_ranges': exp_ranges, 'cidr': cidr, }] } test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_ip_pool_no_ranges_no_gateway(self): pool = self.get_mocked_resource() cidr = '2.2.2.0/30' pool.create(cidr, allocation_ranges=None) exp_ranges = [{'start': '2.2.2.0', 'end': '2.2.2.3'}] data = { 'subnets': [{ 'allocation_ranges': exp_ranges, 'cidr': cidr, }] } test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_ip_pool_no_cidr(self): pool = self.get_mocked_resource() gateway_ip = '1.1.1.1' ranges = [{'start': '2.2.2.0', 'end': '2.2.2.255'}, {'start': '3.2.2.0', 'end': '3.2.2.255'}] cidr = None try: pool.create(cidr, allocation_ranges=ranges, gateway_ip=gateway_ip) except exceptions.InvalidInput: # This call should fail pass else: self.fail("shouldn't happen") def test_delete_ip_pool(self): fake_ip_pool = test_constants.FAKE_IP_POOL.copy() pool = self.get_mocked_resource(response=fake_ip_pool) uuid = fake_ip_pool['id'] pool.delete(uuid) test_client.assert_json_call( 'delete', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s' % uuid, headers=self.default_headers()) def test_force_delete_ip_pool(self): fake_ip_pool = test_constants.FAKE_IP_POOL.copy() pool = self.get_mocked_resource(response=fake_ip_pool) uuid = fake_ip_pool['id'] pool.delete(uuid, force=True) test_client.assert_json_call( 'delete', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s?force=True' % uuid, headers=self.default_headers()) def test_update_ip_pool_name(self): fake_ip_pool = test_constants.FAKE_IP_POOL.copy() pool = self.get_mocked_resource(response=fake_ip_pool) uuid = fake_ip_pool['id'] new_name = 'new_name' pool.update(uuid, display_name=new_name) fake_ip_pool['display_name'] = new_name test_client.assert_json_call( 'put', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s' % uuid, data=jsonutils.dumps(fake_ip_pool, sort_keys=True), headers=self.default_headers()) def test_update_ip_pool_gateway(self): fake_ip_pool = test_constants.FAKE_IP_POOL.copy() pool = self.get_mocked_resource(response=fake_ip_pool) uuid = fake_ip_pool['id'] new_gateway = '1.0.0.1' pool.update(uuid, gateway_ip=new_gateway) fake_ip_pool["subnets"][0]['gateway_ip'] = new_gateway test_client.assert_json_call( 'put', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s' % uuid, data=jsonutils.dumps(fake_ip_pool, sort_keys=True), headers=self.default_headers()) def test_update_ip_pool_delete_gateway(self): fake_ip_pool = test_constants.FAKE_IP_POOL.copy() pool = self.get_mocked_resource(response=fake_ip_pool) uuid = fake_ip_pool['id'] pool.update(uuid, gateway_ip=None) del fake_ip_pool["subnets"][0]['gateway_ip'] test_client.assert_json_call( 'put', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s' % uuid, data=jsonutils.dumps(fake_ip_pool, sort_keys=True), headers=self.default_headers()) def test_allocate_ip_from_pool(self): pool = self.get_mocked_resource() uuid = test_constants.FAKE_IP_POOL['id'] addr = '1.1.1.1' pool.allocate(uuid, ip_addr=addr) data = {'allocation_id': addr} test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s?action=ALLOCATE' % uuid, data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_release_ip_to_pool(self): pool = self.get_mocked_resource() uuid = test_constants.FAKE_IP_POOL['id'] addr = '1.1.1.1' pool.release(uuid, addr) data = {'allocation_id': addr} test_client.assert_json_call( 'post', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s?action=RELEASE' % uuid, data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_get_ip_pool_allocations(self): """Test getting a router port by router id""" fake_ip_pool = test_constants.FAKE_IP_POOL.copy() pool = self.get_mocked_resource(response=fake_ip_pool) uuid = fake_ip_pool['id'] result = pool.get_allocations(uuid) self.assertEqual(fake_ip_pool, result) test_client.assert_json_call( 'get', pool, 'https://1.2.3.4/api/v1/pools/ip-pools/%s/allocations' % uuid, headers=self.default_headers()) class TestNsxSearch(nsxlib_testcase.NsxClientTestCase): def setUp(self): super(TestNsxSearch, self).setUp() self.search_path = 'search?query=%s' self.mock = mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value=self.get_nsxlib_version()) self.mock.start() def tearDown(self): self.mock.stop() super(TestNsxSearch, self).tearDown() @staticmethod def get_nsxlib_version(): return '2.5.0' def test_nsx_search_tags(self): """Test search of resources with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'scope': 'user', 'tag': 'k8s'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags) search.assert_called_with(self.search_path % query) def test_nsx_search_tags_scope_only(self): """Test search of resources with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'scope': 'user'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags) search.assert_called_with(self.search_path % query) def test_nsx_search_tags_tag_only(self): """Test search of resources with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'tag': 'k8s'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags) search.assert_called_with(self.search_path % query) def test_nsx_search_tags_with_extra_attribute(self): """Test search of resource with specified tags and one attribute.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'tag': 'k8s'}] query = "%s AND %s" % (self.nsxlib._build_query(tags=user_tags), 'marked_for_delete:False') self.nsxlib.search_by_tags(tags=user_tags, marked_for_delete=False) search.assert_called_with(self.search_path % query) def test_nsx_search_tags_with_multi_attributes(self): """Test search of resource with tags and multiple attributes.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'tag': 'k8s'}] query = "%s AND %s" % (self.nsxlib._build_query(tags=user_tags), 'tea:boo AND coffee:False') self.nsxlib.search_by_tags( tags=user_tags, tea='boo', coffee=False) search.assert_called_with(self.search_path % query) def test_nsx_search_by_resouce_type_and_attributes(self): with mock.patch.object(self.nsxlib.client, 'url_get') as search: resource_type = 'HorseWithNoName' attributes = {'color': 'mauve'} self.nsxlib.search_resource_by_attributes(resource_type, **attributes) exp_query = 'resource_type:%s AND color:%s' % ( resource_type, attributes['color']) search.assert_called_with( self.search_path % exp_query) def test_nsx_search_by_resouce_type_only(self): with mock.patch.object(self.nsxlib.client, 'url_get') as search: resource_type = 'HorseWithNoName' self.nsxlib.search_resource_by_attributes(resource_type) exp_query = 'resource_type:%s' % resource_type search.assert_called_with( self.search_path % exp_query) def test_nsx_search_no_resource_type_fails(self): self.assertRaises(exceptions.NsxSearchInvalidQuery, self.nsxlib.search_resource_by_attributes, None, attributes={'meh': 'whatever'}) def test_nsx_search_resource_by_attributes_cursor_page_size(self): with mock.patch.object(self.nsxlib.client, 'url_get') as search: resource_type = 'HorseWithNoName' attributes = {'color': 'mauve'} self.nsxlib.search_resource_by_attributes( resource_type, cursor=50, page_size=100, **attributes) exp_query = 'resource_type:%s AND color:%s' % ( resource_type, attributes['color']) search.assert_called_with( (self.search_path + '&cursor=50&page_size=100') % exp_query) def test_nsx_search_tags_tag_and_scope(self): """Test search of resources with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'tag': 'k8s'}, {'scope': 'user'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags) search.assert_called_with(self.search_path % query) def test_nsx_search_tags_and_resource_type(self): """Test search of specified resource with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'scope': 'user', 'tag': 'k8s'}] res_type = 'LogicalPort' query = self.nsxlib._build_query(tags=user_tags) # Add resource_type to the query query = "resource_type:%s AND %s" % (res_type, query) self.nsxlib.search_by_tags(tags=user_tags, resource_type=res_type) search.assert_called_with(self.search_path % query) def test_nsx_search_tags_and_cursor(self): """Test search of resources with the specified tag and cursor.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'scope': 'user', 'tag': 'k8s'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags, cursor=50) search.assert_called_with( (self.search_path + '&cursor=50') % query) def test_nsx_search_tags_and_page_size(self): """Test search of resources with the specified tag and page size.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: user_tags = [{'scope': 'user', 'tag': 'k8s'}] query = self.nsxlib._build_query(tags=user_tags) self.nsxlib.search_by_tags(tags=user_tags, page_size=100) search.assert_called_with( (self.search_path + '&page_size=100') % query) def test_nsx_search_invalid_query_fail(self): """Test search query failure for missing tag argument.""" self.assertRaises(exceptions.NsxSearchInvalidQuery, self.nsxlib.search_by_tags, tags=None, resource_type=None) def test_nsx_search_invalid_tags_fail(self): """Test search of resources with the invalid tag.""" user_tags = [{'scope': 'user', 'invalid_tag_key': 'k8s'}] self.assertRaises(exceptions.NsxSearchInvalidQuery, self.nsxlib._build_query, tags=user_tags) def test_nsx_search_all_by_tags(self): """Test search all of resources with the specified tag.""" with mock.patch.object(self.nsxlib.client, 'url_get') as search: search.side_effect = [ {"cursor": "2", "result_count": 3, "results": [{"id": "s1"}, {"id": "s2"}]}, {"cursor": "3", "result_count": 3, "results": [{"id": "s3"}]}] user_tags = [{'scope': 'user', 'tag': 'k8s'}] query = self.nsxlib._build_query(tags=user_tags) results = self.nsxlib.search_all_by_tags(tags=user_tags) search.assert_has_calls([ mock.call(self.search_path % query), mock.call((self.search_path + '&cursor=2') % query)]) self.assertEqual(3, len(results)) def test_get_id_by_resource_and_tag(self): id = 'test' scope = 'user' tag = 'k8s' res_type = 'LogicalPort' results = {'result_count': 1, 'results': [{'id': id}]} with mock.patch.object(self.nsxlib.client, 'url_get', return_value=results): actual_id = self.nsxlib.get_id_by_resource_and_tag( res_type, scope, tag) self.assertEqual(id, actual_id) def test_get_id_by_resource_and_tag_not_found(self): scope = 'user' tag = 'k8s' res_type = 'LogicalPort' results = {'result_count': 0, 'results': []} with mock.patch.object(self.nsxlib.client, 'url_get', return_value=results): self.assertRaises(exceptions.ResourceNotFound, self.nsxlib.get_id_by_resource_and_tag, res_type, scope, tag, alert_not_found=True) def test_get_id_by_resource_and_tag_multiple(self): scope = 'user' tag = 'k8s' res_type = 'LogicalPort' results = {'result_count': 2, 'results': [{'id': '1'}, {'id': '2'}]} with mock.patch.object(self.nsxlib.client, 'url_get', return_value=results): self.assertRaises(exceptions.ManagerError, self.nsxlib.get_id_by_resource_and_tag, res_type, scope, tag, alert_multiple=True) class TestNsxSearchNew(TestNsxSearch): def setUp(self): super(TestNsxSearchNew, self).setUp() self.search_path = 'search/query?query=%s' @staticmethod def get_nsxlib_version(): return '3.0.0' class TransportZone(BaseTestResource): def setUp(self): super(TransportZone, self).setUp(core_resources.NsxLibTransportZone) def test_get_transport_zone_type(self): fake_tz = test_constants.FAKE_TZ.copy() tz = self.get_mocked_resource() with mock.patch.object(tz.client, 'url_get', return_value=fake_tz): tz_type = tz.get_transport_type(fake_tz['id']) self.assertEqual(tz.TRANSPORT_TYPE_OVERLAY, tz_type) # call it again to test it when cached tz_type = tz.get_transport_type(fake_tz['id']) self.assertEqual(tz.TRANSPORT_TYPE_OVERLAY, tz_type) def test_get_host_switch_mode(self): fake_tz = test_constants.FAKE_TZ.copy() tz = self.get_mocked_resource() with mock.patch.object(tz.client, 'url_get', return_value=fake_tz): tz_mode = tz.get_host_switch_mode(fake_tz['id']) self.assertEqual(tz.HOST_SWITCH_MODE_STANDARD, tz_mode) class TransportNode(BaseTestResource): def setUp(self): super(TransportNode, self).setUp(core_resources.NsxLibTransportNode) def test_get_transport_zones(self): fake_tn = test_constants.FAKE_TN.copy() tn = self.get_mocked_resource() self.nsxlib.feature_supported = mock.MagicMock() with mock.patch.object(tn.client, 'url_get', return_value=fake_tn): self.nsxlib.feature_supported.side_effect = [False, True] tzs = tn.get_transport_zones(fake_tn['id']) self.assertEqual([test_constants.FAKE_TZ_UUID], tzs) tzs = tn.get_transport_zones(fake_tn['id']) self.assertEqual([test_constants.FAKE_TZ_EP_UUID], tzs) class MetadataProxy(BaseTestResource): def setUp(self): super(MetadataProxy, self).setUp(core_resources.NsxLibMetadataProxy) def test_update_metadata_proxy(self): fake_md = test_constants.FAKE_MD.copy() md = self.get_mocked_resource() new_url = "http://2.2.2.20:3500/xyz" new_secret = 'abc' new_edge = uuidutils.generate_uuid() with mock.patch.object(md.client, 'url_get', return_value=fake_md): md.update(fake_md['id'], server_url=new_url, secret=new_secret, edge_cluster_id=new_edge) fake_md.update({'metadata_server_url': new_url, 'secret': new_secret, 'edge_cluster_id': new_edge}) test_client.assert_json_call( 'put', md, 'https://1.2.3.4/api/v1/md-proxies/%s' % fake_md['id'], data=jsonutils.dumps(fake_md, sort_keys=True), headers=self.default_headers()) def test_get_md_proxy_status(self): """Test getting proxy_status.""" mocked_resource = self.get_mocked_resource() attachment_id = 'd84ba3b8-9201-4591-8264-aad289e762ee' logical_switch_id = 'e11803a2-2d3e-452b-a834-aeb94940d272' mocked_resource.get_md_proxy_status(attachment_id, logical_switch_id) test_client.assert_json_call( 'get', mocked_resource, "https://1.2.3.4/api/v1/md-proxies/%s/%s/status" % (attachment_id, logical_switch_id), headers=self.default_headers()) class NsxLibSwitchTestCase(BaseTestResource): def setUp(self): super(NsxLibSwitchTestCase, self).setUp( core_resources.NsxLibLogicalSwitch) self._tz_id = uuidutils.generate_uuid() def _create_body(self, display_name="fake_name", admin_state=nsx_constants.ADMIN_STATE_UP, vlan_id=None, description=None, trunk_vlan=None): body = { "transport_zone_id": self._tz_id, "replication_mode": "MTEP", "display_name": display_name, "tags": [], "admin_state": admin_state } if vlan_id: body['vlan'] = vlan_id if description is not None: body['description'] = description if trunk_vlan: body['vlan_trunk_spec'] = { 'vlan_ranges': [{'start': trunk_vlan[0], 'end': trunk_vlan[1]}]} return body def test_create_logical_switch(self): """Test creating a switch returns the correct response and 200 status """ desc = 'dummy' ls = self.get_mocked_resource() ls.create(mocks.FAKE_NAME, self._tz_id, [], description=desc) data = self._create_body(description=desc) test_client.assert_json_call( 'post', ls, 'https://1.2.3.4/api/v1/logical-switches', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_logical_switch_admin_down(self): """Test creating switch with admin_state down""" ls = self.get_mocked_resource() ls.create(mocks.FAKE_NAME, self._tz_id, [], admin_state=False) data = self._create_body(admin_state=nsx_constants.ADMIN_STATE_DOWN) test_client.assert_json_call( 'post', ls, 'https://1.2.3.4/api/v1/logical-switches', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_logical_switch_vlan(self): """Test creating switch with provider:network_type VLAN""" ls = self.get_mocked_resource() vlan_id = '123' ls.create(mocks.FAKE_NAME, self._tz_id, [], vlan_id=vlan_id) data = self._create_body(vlan_id=vlan_id) test_client.assert_json_call( 'post', ls, 'https://1.2.3.4/api/v1/logical-switches', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_logical_switch_trunk(self): """Test creating switch with trunk vlan""" ls = self.get_mocked_resource() trunk_vlan = [10, 20] with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.2.0'): ls.create(mocks.FAKE_NAME, self._tz_id, [], trunk_vlan_range=trunk_vlan) data = self._create_body(trunk_vlan=trunk_vlan) test_client.assert_json_call( 'post', ls, 'https://1.2.3.4/api/v1/logical-switches', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_create_logical_switch_trunk_not_supported(self): """Test creating switch with trunk vlan without the support""" ls = self.get_mocked_resource() trunk_vlan = [10, 20] with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.0.0'): self.assertRaises(exceptions.InvalidInput, ls.create, mocks.FAKE_NAME, self._tz_id, [], trunk_vlan_range=trunk_vlan) def test_create_logical_switch_trunk_with_vlan(self): """Test creating switch with trunk vlan and vlan tag""" ls = self.get_mocked_resource() trunk_vlan = [10, 20] with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.2.0'): self.assertRaises(exceptions.InvalidInput, ls.create, mocks.FAKE_NAME, self._tz_id, [], trunk_vlan_range=trunk_vlan, vlan_id='111') def test_create_logical_switch_illegal_trunk(self): """Test creating switch with illegal trunk vlan""" ls = self.get_mocked_resource() trunk_vlan = [10] with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.2.0'): self.assertRaises(exceptions.InvalidInput, ls.create, mocks.FAKE_NAME, self._tz_id, [], trunk_vlan_range=trunk_vlan) def test_create_logical_switch_illegal_name(self): """Test creating switch with illegal name that will be escaped""" ls = self.get_mocked_resource() ls.create(mocks.FAKE_NAME + ';|=,~@', self._tz_id, []) data = self._create_body(display_name=mocks.FAKE_NAME + '......') test_client.assert_json_call( 'post', ls, 'https://1.2.3.4/api/v1/logical-switches', data=jsonutils.dumps(data, sort_keys=True), headers=self.default_headers()) def test_delete_resource(self): """Test deleting switch""" super(NsxLibSwitchTestCase, self).test_delete_resource( extra_params='detach=true&cascade=true') class NsxLibPortMirrorTestCase(BaseTestResource): def setUp(self): super(NsxLibPortMirrorTestCase, self).setUp( core_resources.NsxLibPortMirror) class NsxLibBridgeEndpointTestCase(BaseTestResource): def setUp(self): super(NsxLibBridgeEndpointTestCase, self).setUp( core_resources.NsxLibBridgeEndpoint) class NsxLibBridgeEndpointProfileTestCase(BaseTestResource): def setUp(self): super(NsxLibBridgeEndpointProfileTestCase, self).setUp( core_resources.NsxLibBridgeEndpointProfile) class NsxLibEdgeClusterTestCase(BaseTestResource): def setUp(self): super(NsxLibEdgeClusterTestCase, self).setUp( core_resources.NsxLibEdgeCluster) class NsxLibDhcpProfileTestCase(BaseTestResource): def setUp(self): super(NsxLibDhcpProfileTestCase, self).setUp( core_resources.NsxLibDhcpProfile) class NsxLibDhcpRelayServiceTestCase(BaseTestResource): def setUp(self): super(NsxLibDhcpRelayServiceTestCase, self).setUp( core_resources.NsxLibDhcpRelayService) def test_server_ips(self): fake_srv = test_constants.FAKE_RELAY_SERVICE.copy() relay_service = self.get_mocked_resource() with mock.patch.object(relay_service.client, 'url_get', return_value=fake_srv), \ mock.patch.object(self.nsxlib.client, 'url_get', return_value=test_constants.FAKE_RELAY_PROFILE): server_ips = relay_service.get_server_ips(fake_srv['id']) self.assertEqual(1, len(server_ips)) self.assertEqual(test_constants.FAKE_RELAY_SERVER, server_ips[0]) class NsxLibDhcpRelayProfileTestCase(BaseTestResource): def setUp(self): super(NsxLibDhcpRelayProfileTestCase, self).setUp( core_resources.NsxLibDhcpRelayProfile) def test_server_ips(self): fake_prf = test_constants.FAKE_RELAY_PROFILE.copy() relay_profile = self.get_mocked_resource() with mock.patch.object(relay_profile.client, 'url_get', return_value=fake_prf): server_ips = relay_profile.get_server_ips(fake_prf['id']) self.assertEqual(1, len(server_ips)) self.assertEqual(test_constants.FAKE_RELAY_SERVER, server_ips[0]) class NsxLibBridgeClusterTestCase(BaseTestResource): def setUp(self): super(NsxLibBridgeClusterTestCase, self).setUp( core_resources.NsxLibBridgeCluster) class NsxLibIpBlockSubnetTestCase(BaseTestResource): def setUp(self): super(NsxLibIpBlockSubnetTestCase, self).setUp( core_resources.NsxLibIpBlockSubnet) def test_list_all(self): if not self.resource: return mocked_resource = self.get_mocked_resource() block_id = '7' mocked_resource.list(block_id) test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s?block_id=%s' % (mocked_resource.uri_segment, block_id), headers=self.default_headers()) class NsxLibIpBlockTestCase(BaseTestResource): def setUp(self): super(NsxLibIpBlockTestCase, self).setUp( core_resources.NsxLibIpBlock) class NsxLibFabricVirtualInterfaceTestCase(BaseTestResource): def setUp(self): super(NsxLibFabricVirtualInterfaceTestCase, self).setUp( core_resources.NsxLibFabricVirtualInterface) def test_get_by_owner_vm_id(self): mocked_resource = self.get_mocked_resource() vm_id = uuidutils.generate_uuid() mocked_resource.get_by_owner_vm_id(vm_id) test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s?owner_vm_id=%s' % (mocked_resource.uri_segment, vm_id), headers=self.default_headers()) class NsxLibFabricVirtualMachineTestCase(BaseTestResource): def setUp(self): super(NsxLibFabricVirtualMachineTestCase, self).setUp( core_resources.NsxLibFabricVirtualMachine) def test_get_by_display_name(self): mocked_resource = self.get_mocked_resource() display_name = 'some-vm-name' mocked_resource.get_by_display_name(display_name) test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s?display_name=%s' % (mocked_resource.uri_segment, display_name), headers=self.default_headers()) class LogicalDhcpServerTestCase(BaseTestResource): def setUp(self): super(LogicalDhcpServerTestCase, self).setUp( resources.LogicalDhcpServer) def test_update_empty_dhcp_server(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' ip = '1.1.1.1' with mock.patch.object(mocked_resource.client, "get", return_value={}): mocked_resource.update(server_uuid, server_ip=ip) body = {'ipv4_dhcp_server': {'dhcp_server_ip': ip}} test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, server_uuid), data=jsonutils.dumps(body, sort_keys=True), headers=self.default_headers()) def test_update_dhcp_server_new_val(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' ip = '1.1.1.1' domain_name = 'dummy' existing_server = {'ipv4_dhcp_server': {'domain_name': domain_name}} # add the server ip with mock.patch.object(mocked_resource.client, "get", return_value=existing_server): mocked_resource.update(server_uuid, server_ip=ip) existing_server['ipv4_dhcp_server']['dhcp_server_ip'] = ip test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, server_uuid), data=jsonutils.dumps(existing_server, sort_keys=True), headers=self.default_headers()) def test_update_dhcp_server_replace_val(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' ip = '1.1.1.1' domain_name = 'dummy' existing_server = {'ipv4_dhcp_server': {'domain_name': domain_name, 'dhcp_server_ip': ip}} # replace the server ip new_ip = '2.2.2.2' with mock.patch.object(mocked_resource.client, "get", return_value=existing_server): mocked_resource.update(server_uuid, server_ip=new_ip) existing_server['ipv4_dhcp_server']['dhcp_server_ip'] = new_ip test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s' % (mocked_resource.uri_segment, server_uuid), data=jsonutils.dumps(existing_server, sort_keys=True), headers=self.default_headers()) def test_create_binding(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' mac = 'aa:bb:cc:dd:ee:ff' ip = '1.1.1.1' host = 'host' mocked_resource.create_binding(server_uuid, mac, ip, hostname=host) body = { 'mac_address': mac, 'ip_address': ip, 'host_name': host, } test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s/static-bindings' % (mocked_resource.uri_segment, server_uuid), data=jsonutils.dumps(body, sort_keys=True), headers=self.default_headers()) def test_get_binding(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' binding_uuid = 'binding-uuid' mocked_resource.get_binding(server_uuid, binding_uuid) test_client.assert_json_call( 'get', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s/static-bindings/%s' % (mocked_resource.uri_segment, server_uuid, binding_uuid), headers=self.default_headers()) def test_update_binding(self): mocked_resource = self.get_mocked_resource() server_uuid = 'server-uuid' binding_uuid = 'binding-uuid' mac = 'aa:bb:cc:dd:ee:ff' new_mac = 'dd:bb:cc:dd:ee:ff' ip = '1.1.1.1' host = 'host' body = { 'mac_address': mac, 'ip_address': ip, 'host_name': host, } with mock.patch.object(mocked_resource.client, "get", return_value=body): mocked_resource.update_binding(server_uuid, binding_uuid, mac_address=new_mac) body['mac_address'] = new_mac test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/%s/%s/static-bindings/%s' % (mocked_resource.uri_segment, server_uuid, binding_uuid), data=jsonutils.dumps(body, sort_keys=True), headers=self.default_headers()) class NodeHttpServicePropertiesTestCase(BaseTestResource): def setUp(self): super(NodeHttpServicePropertiesTestCase, self).setUp( resources.NodeHttpServiceProperties) def test_get_resource(self): self.skipTest("The action is not supported by this resource") def test_list_all(self): self.skipTest("The action is not supported by this resource") def test_delete_resource(self): self.skipTest("The action is not supported by this resource") def test_get_rate_limit(self): mocked_resource = self.get_mocked_resource() rate_limit = 40 body = {'service_properties': {'client_api_rate_limit': rate_limit}} with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.2.0'),\ mock.patch.object(mocked_resource.client, "url_get", return_value=body): result = mocked_resource.get_rate_limit() self.assertEqual(rate_limit, result) def test_update_rate_limit(self): mocked_resource = self.get_mocked_resource() old_rate_limit = 40 new_rate_limit = 50 body = {'service_properties': { 'client_api_rate_limit': old_rate_limit}} with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.2.0'),\ mock.patch.object(mocked_resource.client, "url_get", return_value=body): mocked_resource.update_rate_limit(new_rate_limit) body['service_properties'][ 'client_api_rate_limit'] = new_rate_limit test_client.assert_json_call( 'put', mocked_resource, 'https://1.2.3.4/api/v1/node/services/http', data=jsonutils.dumps(body, sort_keys=True), headers=self.default_headers()) test_client.assert_json_call( 'post', mocked_resource, 'https://1.2.3.4/api/v1/node/services/http?action=restart', headers=self.default_headers()) class TestNsxlibClusterNodesConfigTestCase(BaseTestResource): def setUp(self): super(TestNsxlibClusterNodesConfigTestCase, self).setUp( resources.NsxlibClusterNodesConfig) def test_delete_resource(self): self.skipTest("The action is not supported by this resource") def test_get_managers_ips(self): mocked_resource = self.get_mocked_resource() body = {'results': test_constants.FAKE_CLUSTER_NODES_CONFIG} with mock.patch.object(mocked_resource.client, "url_get", return_value=body): result = mocked_resource.get_managers_ips() self.assertEqual([test_constants.FAKE_MANAGER_IP1, test_constants.FAKE_MANAGER_IP2], result) class NsxlibHostSwitchProfilesTestCase(BaseTestResource): def setUp(self): super(NsxlibHostSwitchProfilesTestCase, self).setUp( resources.NsxlibHostSwitchProfiles) class InventoryTestCase(BaseTestResource): CONTAINER_CLUSTER = "k8s-cluster-1" def setUp(self): super(InventoryTestCase, self).setUp(resources.Inventory) def test_get_resource(self): mocked_resource = self.get_mocked_resource() mocked_resource.get('ContainerCluster', self.CONTAINER_CLUSTER) base_url = 'https://1.2.3.4/api/v1/fabric/container-clusters' surfix = '/%s' % self.CONTAINER_CLUSTER test_client.assert_json_call( 'get', mocked_resource, base_url + surfix, headers=self.default_headers()) def test_list_all(self): mocked_resource = self.get_mocked_resource() mocked_resource.list( self.CONTAINER_CLUSTER, 'ContainerApplication') base_url = 'https://1.2.3.4/api/v1/fabric/container-applications' surfix = '?container_cluster_id=%s' % self.CONTAINER_CLUSTER test_client.assert_json_call( 'get', mocked_resource, base_url + surfix, headers=self.default_headers()) def test_delete_resource(self, extra_params=None): mocked_resource = self.get_mocked_resource() mocked_resource.delete('ContainerCluster', self.CONTAINER_CLUSTER) base_url = 'https://1.2.3.4/api/v1/fabric/container-clusters' surfix = '/%s' % self.CONTAINER_CLUSTER test_client.assert_json_call( 'delete', mocked_resource, base_url + surfix, headers=self.default_headers()) def test_create_resource(self): mocked_resource = self.get_mocked_resource() body = {'resource_type': 'ContainerCluster', 'name': 'k8s-1', 'external_id': 'id-1', 'cluster_type': 'Kubernetes', 'infrastructure': {'infra_type': 'vSphere'}} mocked_resource.create('ContainerCluster', body) base_url = 'https://1.2.3.4/api/v1/fabric/container-clusters' test_client.assert_json_call( 'post', mocked_resource, base_url, data=jsonutils.dumps(body, sort_keys=True), headers=self.default_headers()) def test_update(self): mocked_resource = self.get_mocked_resource() body = {} update_dict = {'external_id': '1234', 'resource_type': 'Application', 'name': 'service-1', 'labels': [{'key': 'key-1', 'value': 'value-1'}]} mocked_resource.update( self.CONTAINER_CLUSTER, [('CREATE', update_dict)]) item = {} item["object_update_type"] = 'CREATE' item["container_object"] = update_dict body = {"container_inventory_objects": [item]} base_url = 'https://1.2.3.4/api/v1/inventory/container/' surfix = '%s?action=updates' % self.CONTAINER_CLUSTER test_client.assert_json_call( 'post', mocked_resource, base_url + surfix, data=jsonutils.dumps(body, sort_keys=True), headers=self.default_headers()) class DummyCachedResource(utils.NsxLibApiBase): @property def uri_segment(self): return 'XXX' @property def resource_type(self): return 'xxx' @property def use_cache_for_get(self): return True @property def cache_timeout(self): return 2 class ResourceCache(BaseTestResource): def setUp(self): super(ResourceCache, self).setUp(DummyCachedResource) def test_get_with_cache(self): mocked_resource = self.get_mocked_resource() fake_uuid = uuidutils.generate_uuid() # first call -> goes to the client mocked_resource.get(fake_uuid) self.assertEqual(1, test_client.mock_calls_count( 'get', mocked_resource)) # second call -> goes to cache mocked_resource.get(fake_uuid) self.assertEqual(1, test_client.mock_calls_count( 'get', mocked_resource)) # a different call -> goes to the client fake_uuid2 = uuidutils.generate_uuid() mocked_resource.get(fake_uuid2) self.assertEqual(2, test_client.mock_calls_count( 'get', mocked_resource)) # third call -> still goes to cache mocked_resource.get(fake_uuid) self.assertEqual(2, test_client.mock_calls_count( 'get', mocked_resource)) # after timeout -> goes to the client eventlet.sleep(2) mocked_resource.get(fake_uuid) self.assertEqual(3, test_client.mock_calls_count( 'get', mocked_resource)) # after delete -> goes to the client mocked_resource.delete(fake_uuid) mocked_resource.get(fake_uuid) self.assertEqual(4, test_client.mock_calls_count( 'get', mocked_resource)) # And from cache again mocked_resource.get(fake_uuid) self.assertEqual(4, test_client.mock_calls_count( 'get', mocked_resource)) # Update the entry. The get inside the update is from # the client too, because it must be current) mocked_resource._update_with_retry(fake_uuid, {}) self.assertEqual(5, test_client.mock_calls_count( 'get', mocked_resource)) # after update -> goes to client mocked_resource.get(fake_uuid) self.assertEqual(6, test_client.mock_calls_count( 'get', mocked_resource)) class SystemHealthTestCase(BaseTestResource): def setUp(self): super(SystemHealthTestCase, self).setUp(resources.SystemHealth) def test_create_ncp_status(self): mocked_resource = self.get_mocked_resource() cluster_id = "b8b089f-338c-5c65-98bd-a5642ae2aa00" status = "HEALTHY" mocked_resource.create_ncp_status(cluster_id, status) body = {'cluster_id': cluster_id, 'status': status} base_url = ('https://1.2.3.4/api/v1/systemhealth/container-cluster/' 'ncp/status') test_client.assert_json_call( 'post', mocked_resource, base_url, data=jsonutils.dumps(body, sort_keys=True), headers=self.default_headers()) vmware-nsxlib-15.0.6/setup.py0000664000175000017500000000200613623151571016137 0ustar zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) vmware-nsxlib-15.0.6/.coveragerc0000664000175000017500000000020513623151571016545 0ustar zuulzuul00000000000000[run] branch = True source = vmware_nsxlib omit = vmware_nsxlib/tests/* [report] ignore_errors = True [report] ignore_errors = True vmware-nsxlib-15.0.6/requirements.txt0000664000175000017500000000104513623151571017713 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=4.0.0 # Apache-2.0 decorator>=4.3.0 # BSD eventlet>=0.24.1 # MIT netaddr>=0.7.18 # BSD tenacity>=5.0.1 # Apache-2.0 six>=1.10.0 # MIT oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.serialization>=2.28.1 # Apache-2.0 oslo.service>=1.31.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 pyOpenSSL>=17.1.0 # Apache-2.0 vmware-nsxlib-15.0.6/README.rst0000664000175000017500000000022413623151571016114 0ustar zuulzuul00000000000000============= vmware-nsxlib ============= * Free software: Apache license * Source: https://opendev.org/x/vmware-nsxlib Features -------- * TODO vmware-nsxlib-15.0.6/tox.ini0000664000175000017500000000617413623151571015752 0ustar zuulzuul00000000000000[tox] envlist = py37,pep8 minversion = 2.0 skipsdist = True [testenv] install_command = pip install {opts} {packages} basepython = python3 setenv = VIRTUAL_ENV={envdir} PYTHONWARNINGS=default::DeprecationWarning passenv = TRACE_FAILONLY GENERATE_HASHES http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY usedevelop = True deps = -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = sh commands = stestr run {posargs} stestr slowest # there is also secret magic in ostestr which lets you run in a fail only # mode. To do this define the TRACE_FAILONLY environmental variable. [testenv:common] # Fake job to define environment variables shared between dsvm/non-dsvm jobs setenv = {[testenv]setenv} OS_TEST_TIMEOUT=180 commands = false [testenv:functional] setenv = {[testenv]setenv} {[testenv:common]setenv} OS_TEST_PATH=./vmware_nsxlib/tests/functional OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} deps = {[testenv]deps} -r{toxinidir}/vmware_nsxlib/tests/functional/requirements.txt [testenv:dsvm-functional] setenv = {[testenv]setenv} OS_SUDO_TESTING=1 OS_FAIL_ON_MISSING_DEPS=1 OS_TEST_TIMEOUT=180 sitepackages=True deps = {[testenv:functional]deps} commands = [tox:jenkins] sitepackages = True [testenv:releasenotes] commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:pep8] deps = {[testenv]deps} commands = # Checks for coding and style guidelines flake8 {[testenv:genconfig]commands} whitelist_externals = sh bash [testenv:bandit] deps = -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} -r{toxinidir}/test-requirements.txt commands = bandit -r vmware_nsxlib -n 5 -ll [testenv:cover] commands = python setup.py test --coverage --coverage-package-name=vmware_nsxlib --testr-args='{posargs}' coverage report [testenv:venv] commands = {posargs} [testenv:docs] deps = -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} -r{toxinidir}/doc/requirements.txt commands = sphinx-build -b html doc/source doc/build/html [flake8] # E125 continuation line does not distinguish itself from next logical line # E129 visually indented line with same indent as next logical line # E741 ambiguous variable name # N530 direct neutron imports not allowed # W504 line break after binary operator # N531 translations hints ignore = N530,E125,E129,E741,N531,W504 show-source = true builtins = _ exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,.ropeproject import-order-style = pep8 [hacking] import_exceptions = vmware_nsxlib._i18n [testenv:genconfig] commands = [testenv:uuidgen] commands = check-uuid --fix [testenv:lower-constraints] deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/doc/requirements.txt -r{toxinidir}/requirements.txt vmware-nsxlib-15.0.6/.stestr.conf0000664000175000017500000000007213623151571016677 0ustar zuulzuul00000000000000[DEFAULT] test_path=./vmware_nsxlib/tests/unit top_dir=./ vmware-nsxlib-15.0.6/test-requirements.txt0000664000175000017500000000124013623151571020665 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking>=1.1.0 # Apache-2.0 bandit!=1.6.0,>=1.1.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.00 fixtures>=3.0.0 # Apache-2.0/BSD flake8-import-order==0.12 # LGPLv3 mock>=2.0.0 # BSD python-subunit>=1.0.0 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 testresources>=2.0.0 # Apache-2.0/BSD testtools>=2.2.0 # MIT testscenarios>=0.4 # Apache-2.0/BSD tempest>=17.1.0 # Apache-2.0 pylint==1.7.6 # GPLv2 requests-mock>=1.2.0 # Apache-2.0 vmware-nsxlib-15.0.6/HACKING.rst0000664000175000017500000000022413623151571016223 0ustar zuulzuul00000000000000vmware-nsxlib Style Commandments ================================ Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ vmware-nsxlib-15.0.6/CONTRIBUTING.rst0000664000175000017500000000122313623151571017066 0ustar zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html If you already have a good understanding of how the system works and your OpenStack accounts are set up, you can skip to the development workflow section of this documentation to learn how changes to OpenStack should be submitted for review via the Gerrit tool: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/vmware-nsxlib vmware-nsxlib-15.0.6/PKG-INFO0000664000175000017500000000172013623151652015524 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: vmware-nsxlib Version: 15.0.6 Summary: A common library that interfaces with VMware NSX Home-page: https://opendev.org/x/vmware-nsxlib Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ============= vmware-nsxlib ============= * Free software: Apache license * Source: https://opendev.org/x/vmware-nsxlib Features -------- * TODO Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 vmware-nsxlib-15.0.6/setup.cfg0000664000175000017500000000234113623151652016250 0ustar zuulzuul00000000000000[metadata] name = vmware-nsxlib summary = A common library that interfaces with VMware NSX description-file = README.rst author = OpenStack author-email = openstack-discuss@lists.openstack.org home-page = https://opendev.org/x/vmware-nsxlib classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 [files] packages = vmware_nsxlib [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 [upload_sphinx] upload-dir = doc/build/html [compile_catalog] directory = vmware_nsxlib/locale domain = vmware_nsxlib [update_catalog] domain = vmware_nsxlib output_dir = vmware_nsxlib/locale input_file = vmware_nsxlib/locale/vmware_nsxlib.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = vmware_nsxlib/locale/vmware_nsxlib.pot [build_releasenotes] all_files = 1 build-dir = releasenotes/build source-dir = releasenotes/source [egg_info] tag_build = tag_date = 0 vmware-nsxlib-15.0.6/lower-constraints.txt0000664000175000017500000000104213623151571020662 0ustar zuulzuul00000000000000bandit==1.1.0 coverage==4.0 decorator==4.3.0 eventlet==0.24.1 fixtures==3.0.0 flake8==2.6.2 flake8-import-order==0.12 hacking>=1.1.0 # Apache-2.0 mock==2.0.0 netaddr==0.7.18 openstackdocstheme==1.18.1 oslo.i18n==3.15.3 oslo.log==3.36.0 oslo.serialization==2.28.1 oslo.service==1.31.0 oslo.utils==3.33.0 oslotest==3.2.0 pbr==4.0.0 pyOpenSSL==17.1.0 python-subunit==1.0.0 pylint==1.7.1 reno==2.5.0 requests-mock==1.2.0 six==1.10.0 sphinx==1.6.5 stestr==1.0.0 tempest==17.1.0 tenacity==5.0.1 testresources==2.0.0 testtools==2.2.0 testscenarios==0.4 vmware-nsxlib-15.0.6/MANIFEST.in0000664000175000017500000000013513623151571016164 0ustar zuulzuul00000000000000include AUTHORS include ChangeLog exclude .gitignore exclude .gitreview global-exclude *.pyc