././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2462552 vmware-nsx-15.0.1.dev143/0000755000175000017500000000000000000000000015177 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/.coveragerc0000644000175000017500000000015700000000000017323 0ustar00coreycorey00000000000000[run] branch = True source = neutron omit = neutron/tests/*,neutron/openstack/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/.mailmap0000644000175000017500000000111600000000000016617 0ustar00coreycorey00000000000000# Format is: # # lawrancejing Jiajun Liu Zhongyue Luo Kun Huang Zhenguo Niu Isaku Yamahata Isaku Yamahata Morgan Fainberg ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/.pylintrc0000644000175000017500000000753500000000000017056 0ustar00coreycorey00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. You may set this option multiple times. # # Note the 'openstack' below is intended to match only # neutron.openstack.common. If we ever have another 'openstack' # dirname, then we'll need to expand the ignore features in pylint :/ ignore=.git,tests,openstack [MESSAGES CONTROL] # NOTE(gus): This is a long list. A number of these are important and # should be re-enabled once the offending code is fixed (or marked # with a local disable) disable= # "F" Fatal errors that prevent further processing import-error, # "I" Informational noise locally-disabled, # "E" Error for important programming issues (likely bugs) access-member-before-definition, no-member, no-method-argument, no-self-argument, not-an-iterable, # "W" Warnings for stylistic problems or minor programming issues abstract-method, abstract-class-instantiated, arguments-differ, attribute-defined-outside-init, bad-builtin, bad-indentation, broad-except, dangerous-default-value, deprecated-lambda, expression-not-assigned, fixme, global-statement, literal-comparison, no-init, non-parent-init-called, not-callable, protected-access, redefined-builtin, redefined-outer-name, signature-differs, star-args, super-init-not-called, super-on-old-class, unpacking-non-sequence, unused-argument, unused-import, unused-variable, unsubscriptable-object, useless-super-delegation, # TODO(dougwig) - disable nonstandard-exception while we have neutron_lib shims nonstandard-exception, # "C" Coding convention violations bad-continuation, consider-iterating-dictionary, consider-using-enumerate, invalid-name, len-as-condition, misplaced-comparison-constant, missing-docstring, singleton-comparison, superfluous-parens, ungrouped-imports, wrong-import-order, wrong-import-position, # "R" Refactor recommendations abstract-class-little-used, abstract-class-not-used, consider-merging-isinstance, consider-using-ternary, duplicate-code, interface-not-implemented, no-else-return, no-self-use, redefined-argument-from-local, simplifiable-if-statement, too-few-public-methods, too-many-ancestors, too-many-arguments, too-many-boolean-expressions, too-many-branches, too-many-function-args, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-nested-blocks, too-many-public-methods, too-many-return-statements, too-many-statements, cyclic-import, no-name-in-module, bad-super-call [BASIC] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long # and be lowecased with underscores method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$ # Module names matching neutron-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [FORMAT] # Maximum number of characters on a single line. max-line-length=79 [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [CLASSES] # List of interface methods to ignore, separated by a comma. ignore-iface-methods= [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules= # should use openstack.common.jsonutils json [TYPECHECK] # List of module names for which member attributes should not be checked ignored-modules=six.moves,_MovedItems [REPORTS] # Tells whether to display a full report or only the messages reports=no ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/.stestr.conf0000644000175000017500000000006700000000000017453 0ustar00coreycorey00000000000000[DEFAULT] test_path=./vmware_nsx/tests/unit top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/.testr.conf0000644000175000017500000000036700000000000017273 0ustar00coreycorey00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./vmware_nsx/tests/unit} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/.zuul.yaml0000644000175000017500000001010200000000000017132 0ustar00coreycorey00000000000000- project: templates: - build-openstack-docs-pti - check-requirements - openstack-python3-ussuri-jobs-neutron - openstack-python3-ussuri-jobs check: jobs: - vmware-tox-lower-constraints - openstack-tox-pep8: required-projects: - openstack/neutron - openstack/networking-l2gw - openstack/networking-sfc - x/vmware-nsxlib - openstack/neutron-fwaas - openstack/neutron-dynamic-routing - openstack/neutron-vpnaas - x/tap-as-a-service - openstack/octavia - openstack-tox-py36: timeout: 5400 required-projects: - openstack/neutron - openstack/networking-l2gw - openstack/networking-sfc - x/vmware-nsxlib - openstack/neutron-fwaas - openstack/neutron-dynamic-routing - openstack/neutron-vpnaas - x/tap-as-a-service - openstack/octavia - openstack-tox-py37: timeout: 5400 required-projects: - openstack/neutron - openstack/networking-l2gw - openstack/networking-sfc - x/vmware-nsxlib - openstack/neutron-fwaas - openstack/neutron-dynamic-routing - openstack/neutron-vpnaas - x/tap-as-a-service - openstack/octavia - openstack-tox-py38: timeout: 5400 required-projects: - openstack/neutron - openstack/networking-l2gw - openstack/networking-sfc - x/vmware-nsxlib - openstack/neutron-fwaas - openstack/neutron-dynamic-routing - openstack/neutron-vpnaas - x/tap-as-a-service - openstack/octavia gate: queue: vmware-nsx jobs: - vmware-tox-lower-constraints - openstack-tox-pep8: required-projects: - openstack/neutron - openstack/networking-l2gw - openstack/networking-sfc - x/vmware-nsxlib - openstack/neutron-fwaas - openstack/neutron-dynamic-routing - openstack/neutron-vpnaas - x/tap-as-a-service - openstack/octavia - openstack-tox-py36: timeout: 5400 required-projects: - openstack/neutron - openstack/networking-l2gw - openstack/networking-sfc - x/vmware-nsxlib - openstack/neutron-fwaas - openstack/neutron-dynamic-routing - openstack/neutron-vpnaas - x/tap-as-a-service - openstack/octavia - openstack-tox-py37: timeout: 5400 required-projects: - openstack/neutron - openstack/networking-l2gw - openstack/networking-sfc - x/vmware-nsxlib - openstack/neutron-fwaas - openstack/neutron-dynamic-routing - openstack/neutron-vpnaas - x/tap-as-a-service - openstack/octavia - openstack-tox-py38: timeout: 5400 required-projects: - openstack/neutron - openstack/networking-l2gw - openstack/networking-sfc - x/vmware-nsxlib - openstack/neutron-fwaas - openstack/neutron-dynamic-routing - openstack/neutron-vpnaas - x/tap-as-a-service - openstack/octavia - job: name: vmware-tox-lower-constraints parent: openstack-tox-lower-constraints timeout: 5400 required-projects: - openstack/neutron - openstack/networking-l2gw - openstack/networking-sfc - x/vmware-nsxlib - openstack/neutron-fwaas - openstack/neutron-dynamic-routing - openstack/neutron-vpnaas - x/tap-as-a-service - openstack/octavia ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542531.0 vmware-nsx-15.0.1.dev143/AUTHORS0000644000175000017500000005353600000000000016263 0ustar00coreycorey00000000000000AKamyshnikova Aaron Rosen Aaron Rosen Aaron-Zhang231 Abhishek Chanda Abhishek Raut Abhishek Raut Abishek Subramanian Adam Gandelman Adam Harwell Adin Scannell Adit Sarfaty Ailing Zhang Akash Gangil Akash Gangil Akihiro MOTOKI Akihiro Motoki Akihiro Motoki Aleks Chirko Alessandro Pilotti Alessandro Pilotti Alessio Ababilov Alessio Ababilov Alex Holden Alex Kang Alexander Ignatov Alexei Kornienko Alexey I. Froloff Amey Bhide Amir Sadoughi Andre Pech Andrea Frittoli Andreas Jaeger Andreas Jaeger Andrew Boik Andrey Epifanov Angus Lees Ann Kamyshnikova Anna Khmelnitsky Ante Karamatic Anthony Veiga Anton Frolov Arata Notsu Armando Migliaccio Armando Migliaccio Armando Migliaccio Arvind Somy Arvind Somya Assaf Muller Attila Fazekas Avishay Balderman AvnishPal Baodong (Robert) Li Baodong Li Baohua Yang Ben Lin Ben Nemec Ben Nemec Benedikt Trefzer Bernhard M. Wiedemann Bertrand Lallau Bhuvan Arumugam Bo Lin Bob Kukura Bob Melander Boden R Boris Pavlovic Brad Hall Brad Hall Bradley Jones Brandon Logan Brant Knudson Brian Haley Brian Waldon Britt Houser Cady_Chen Cao Xuan Hoang Carl Baldwin Carol Bouchard Cedric Brandily Chandan Kumar Chang Bo Guo Chengli XU Chirag Shahani Christian Berendt Christoph Arnold Christoph Thiel Christopher Chu Lin Chuck Chuck Carlino Chuck Short Clark Boylan Claudiu Belu Clint Byrum Cédric Ollivier Dan Florea Dan Prince Dan Wendlandt Dane LeBlanc Daniel Gollub Darragh O'Reilly Darragh O'Reilly Darren Birkett Davanum Srinivas Dave Cahill Dave Lapsley Dave Tucker David Ripton Dazhao Debo Deepak N Deepthi Kandavara Jayarama Derek Higgins Devang Doshi Dhanashree Gosavi Dirk Mueller Divya ChanneGowda Doug Hellmann Doug Hellmann Doug Wiegley DuYaHong Duarte Nunes Ed Bak Edgar Magana Edgar Magana Elena Ezhova Emilien Macchi EmilienM Eoghan Glynn Eric Brown Eric Windisch Erik Colnick Eugene Nikanorov Evgeny Fedoruk Fawad Khaliq Flavio Percoco Francisco Souza Franck Yelles Francois Deppierraz Francois Eleouet Gabriel Wainer Gary Kotton Gary Kotton Gauvain Pocentek Ghe Rivero Giridhar Jayavelu Gordon Chung Gordon Chung Guilherme Salgado Guoqiang Ding Haiwei Xu Han Zhou Hareesh Puthalath Harsh Prasad He Jie Xu He Yongli Hemanth Ravi Henry Gessau Henry Gessau Henry Gessau HenryGessau HenryVIII Hiroaki KAWAI Hirofumi Ichihara Hironori Shiina Hisaharu Ishii Hui HX Xiang Hui Xiang Ian Wienand Ignacio Scopetta Ihar Hrachyshka Ihar Hrachyshka Ilya Pekelny Ilya Shakhat Ionuț Arțăriși Irena Berezovsky Iryoung Jeong Isaku Yamahata Isaku Yamahata Itsuro Oda Itzik Brown Ivan Kolodyazhny Ivar Lazzaro Ivar Lazzaro JJ Asghar JUN JIE NAN Jacek Swiderski Jakub Libosvar James E. Blair James E. Blair James Page Janet Yu Jason Dillaman Jason Kölker Jason Zhang Jaume Devesa Jay Pipes Jay S. Bryant Jeremy Hanmer Jeremy Stanley Jesse Andrews Jiajun Liu Jian Wen Jianing Yang Joe Gordon Joe Harrison Joe Heck Joe Mills John Davidge John Dewey John Dunning John Jason Brzozowski John Kasperski John Perkins John Schwarz Jon Grimm Jonathan LaCour Jordan Tardif Jorge Miramontes Juergen Brendel Julia Varlamova Juliano Martinez Juliano Martinez Julien Danjou Jun Park Justin Hammond Justin Lund KAWAI Hiroaki KIYOHIRO ADACHI Kaiwei Fan Kanzhe Jiang Ken'ichi Ohmichi Keshava Bharadwaj Kevin Benton Kevin Benton Kevin Benton Kevin L. Mitchell Kiall Mac Innes Kobi Samoray Koert van der Veer Koteswara Rao Kelam Koteswara Rao Kelam Kris Lindgren Kui Shi Kun Huang Kyle Mestery Kyle Mestery Lars Kellogg-Stedman Leon Cui Li Ma Li Ma Lianghwa Jou Liping Mao LipingMao Livnat Peer Lorin Hochstein Luis A. Garcia Luiz H Ozaki Luke Gorrie Ly Loi Madhav Puri Major Hayden Mandeep Dhami Manish Godara Marga Millet Mark McClain Mark McClain Mark McLoughlin Mark T. Voelker Martins Jakubovics Maru Newby Maru Newby Maruti Mate Lakat Matt Dietz Matt Odden Matt Riedemann Matthew Edmonds Matthew Treinish Matthew Treinish Matthew Weeks Mehdi Abaakouk Michael J Fork Michael Smith Michael Still Michal Kelner Mishali Miguel Angel Ajo Miguel Lavalle Miguel Ángel Ajo Mike Bayer Mike Kolesnik Mithil Arun Mohammad Banikazemi Monty Taylor Morgan Fainberg Moshe Levi Motohiro OTSUKA Mukul Murali Birru Nachi Ueno Nachi Ueno Nader Lahouti Nguyen Hai Truong Nguyen Hung Phuong Nguyen Van Duc Nick Bartos Nikolay Sobolevskiy Nishant Kumar Numan Siddique Oleg Bondarev Ondřej Nový Paul Michali Paul Ward Peng Xiao Peng Yong Peter Feiner Petrut Lucian Pierre Hanselmann Pierre RAMBAUD Pierre Rognant Piotr Siwczak Piotr Siwczak Pradeep Kilambi Praneet Bachheti Prashant Shetty Prasoon Telang Praveen Yalagandula Preeti Mirji Pritesh Kothari PriyankaJ Przemyslaw Czesnowicz Puneet Arora QunyingRan Raghu Katti Rahul Priyadarshi Raildo Mascena Rajaram Mallya Rajeev Grover Rajesh Mohan Rajesh Mohan Rajiv Kumar Ralf Haferkamp Ray Chen Rich Curran Rick Clark Robert Collins Robert Collins Robert Kukura Robert Li Robert Mizielski Robert Pothier RobinWang Rodolfo Alonso Hernandez Roey Chen Roey Chen Rohit Agarwalla Rohit Agarwalla Roman Bogorodskiy Roman Podoliaka Roman Podolyaka Roman Prykhodchenko Roman Sokolkov Romil Gupta RongzeZhu Rosario Di Somma Rossella Sblendido Rossella Sblendido Rudrajit Tapadar Rui Zang Russell Bryant Ryan Moats Ryan Moe Ryan O'Hara Ryan Petrello Ryota MIBU Ryu Ishimoto Sachi King Sahid Orentino Ferdjaoui Saksham Varma Salvatore Salvatore Orlando Salvatore Orlando Salvatore Orlando Salvatore Orlando Sam Betts Sam Hague Samer Deeb Santhosh Santhosh Kumar Sascha Peilicke Sascha Peilicke Sascha Peilicke Saurabh Chordiya Sayaji Sean Dague Sean Dague Sean M. Collins Sean M. Collins Sean McCully Sean McGinnis Sean Mooney Senhua Huang Serge Maskalik Sergey Kolekonov Sergey Lukjanov Sergey Skripnick Sergey Vilgelm Sergey Vilgelm Sergio Cazzolato Shane Wang Shashank Hegde Shashank Hegde Shih-Hao Li Shiv Haris Shivakumar M Shuangtai Tian Shweta P Shweta P Shweta Patil Sidharth Surana Siming Yin Simon Pasquier Sitaram Dontu Soheil Hassas Yeganeh Somik Behera Somik Behera Sourabh Patwardhan Sphoorti Joglekar Sridar Kandaswamy Sridhar Ramaswamy Sridhar S Stanislav Kudriashev Stephen Gordon Stephen Gran Stephen Ma Steven Gonzales Steven Hillman Steven Ren Sudhakar Sudheendra Murthy Sudipta Biswas Sukhdev Sukhdev Sumit Naiksatam Sumit Naiksatam Sushil Kumar Swaminathan Vasudevan Swapnil Kulkarni (coolsvap) Sylvain Afchain Sławek Kapłoński Takaaki Suzuki Takuma Watanabe Tatyana Leontovich Terry Wilson Thierry Carrez Thomas Bechtold Tim Miller Tom Cammann Tom Fifield Tomasz Paszkowski Tomoe Sugihara Tomoko Inoue Tong Liu Trinath Somanchi Tyler Smith Vasiliy Khomenko Vijay Kankatala Vincent Untz Vishal Agarwal Vishal Agarwal Vishvananda Ishaya Vivekanandan Narasimhan Vu Cong Tuan Wei Wang Weidong Shao Wlodzimierz Borkowski Wu Wenxiang Xiaolin Zhang XieYingYun Xu Chen Xu Han Peng Xuhan Peng YAMAMOTO Takashi Yaguang Tang Yalei Wang Yang Yu Yang Yu YangLei Ying Liu Yong Sheng Gong Yong Sheng Gong Yoshihiro Kaneko Youcef Laribi YuYang Yuanchao Sun Yuriy Taraday Yusuke Muraoka Yves-Gwenael Bourhis ZHU ZHU Zang MingJie Zhenguo Niu Zhenmei Zhesen ZhiQiang Fan ZhiQiang Fan Zhongcheng Lao Zhongyue Luo aaronorosen aaronzhang231 abhishek.talwar alexpilotti armando-migliaccio armando-migliaccio asarfaty ashok2988 berlin cedric.brandily chen-li chnm-kulkarni dekehn e0ne eperdomo eperdomo@cisco.com <> fujioka yuuichi fumihiko kakuma garyduan garyk gecong1973 gengchc2 gessau ghanshyam ghanshyam ghanshyam gongysh gongysh gordon chung hyunsun inspurericzhang ivan-zhu jasonrad jingliuqing joe@midokura.com johndavidge jun xie junbo justin Lund kedar kulkarni kvrshenoy lawrancejing lijunjie linb liu-sheng liudong liuqing lizheming llg8212 luke.li lzklibj marios mark mcclain mat mathieu-rohon melissaml mouad benchchaoui ncode openstack rajeev rajeev ritesh.arya rohitagarwalla rohitagarwalla roagarwa@cisco.com <> ronak root root rossella rtmdk@163.com sadasu salvatore <> sanuptpm shaofeng_cheng shihanzhang shu,xinxin shutingm siyingchun skseeker snaiksat sridhargaddam stanzgy sukhdev sunqingliang6 sushma_korati sysnet trinaths venkatamahesh vikas vinkesh banka vishala vmware vmware_nsx_ci wangbo wangqi whitekid xchenum yangxurong yuyafei yuyangbj zhanghongtao zhangyanxian zhhuabj Édouard Thuleau ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/CONTRIBUTING.rst0000644000175000017500000000115400000000000017641 0ustar00coreycorey00000000000000================= Contributor Guide ================= If you would like to contribute to the development of OpenStack, you must follow the steps documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/vmware-nsx ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542530.0 vmware-nsx-15.0.1.dev143/ChangeLog0000644000175000017500000133501700000000000016763 0ustar00coreycorey00000000000000CHANGES ======= * NSX|P: Fix dhcp migration admin utility * NSX|P: Do not use special subnet rollback with policy DHCP * NSX-V3| remove unused method * NSX|V3+P: allow network deletion in case DHCP port not found * NSX|V: Fix SG icmp rules creation * Octavia: Improve callbacks for non TVD cases * Fix openstack client for vmware-nsx * NSX|V3: remove checks for features that are always supported * NSX|P: Fix trunk driver detach calls * Fix broken DB unittests * NSX|V: Skip unsupported tempest tests * NSX|V: Fix security groups rules creation * NSX|P: Support policy DHCP v6 * NSX|P: Return False at the end of \_is\_overlay\_net * NSXv: use correct DFW config for LBaaS rules * NSX|V3: remove checks for features long supported * NSX|P: Fix transactions related issues * NSX|V3: Remove support for non dynamic creteria * NSX|P: Add validation on number of segment subnets * NSX|P: Do not use transactional apis if not supported * NSX|P: Improve error handling for MP mdproxy port * NSX|V3: Fix broken unit tests * NSX|P Fix get\_subnets for router interfaces * NSX|P: Fix segments update upon dhcp/router interfaces changes * Fix openstack-client support as a new version was released * NSX|P: reduce logs upon Octavia statistics collection * NSX|V3+P: Fix loadbalancer delete to support error cases * NSX|P: Fix LB listener default pool validation * NSX|P: Use policy dhcp & mdproxy in plugin UT * NSX|P: Support policy DHCP * NSX|V3: Remove duplicate az config for tier0 * NSX|V3+P: Fix ladbalancer utility * Update for removal of agent tempest tests * Handle edges with different number of tunnels * Fix broken unit tests * NSX|P: Support segment & port admin state * Upgrade decorator version to match openstack projects * Support zuul & tox with py38 * NSX|P: devstack cleanup router interfaces * Complete dropping py27 support goal * Bump neutron-lib to 2.0.0 * Fix broken unit tests * NSX|P: Fix devstack doc regarding MD Proxy config * Fix admin utils doc typos * Fix some typos * V2T: Handle external network mapping * Add common code for network interfaces queries * NSX|V3+P: Allow external subnet updates * Fix availability zones unit tests * NSX|P: Mark WAF profile config as deprecated * NSX|P: Do not create octavia lb vip port on NSX backend * Migrate octavia resources * NSX|V: Fix get\_edges for python3 * NSX|P: Fix loadbalancer deletion port handling * NSX|V: Partial support for distributed routers with FWaaS-V2 * NSX|V3: Fix devstack cleanup for tier0 ports * NSX|P: be more careful when deleting a network * Fix log message type * V2P migration: Improve config file migration * Add logging in get\_ports for all plugins * Fix typo in Octavia driver * NSX|P: Remove unused LBaaS method * Add octavia validations to Pre-migration checks admin utility * NSXv: evaluate NAT rules correctly * NSX|P Change order of actions while deleting a port * NSX|P: Change order of NAT and Edge Firewall rules * V2P migration: Improve config file migration * NSX|P: Use policy search for getting neutron net id * NSX|V: Fix distributed router validation * NSX|P: Fix TZ error message * Fix neutron projects in requirements to match train branch * NSX|P: Delete mdproxy port upon network deletion * NSX|P: Use policy api for ipv6 routing global config * NSX|P: verify segment realization before creating MP md proxy * NSX|P: Replace passthrough api for edge nodes with policy api * NSX|P: Use the policy lib for lb tags instread of nsxlib * NSX|P: Use transactions for NAT rules create and delete * NSX|P: Use transactions for security groups and rules creation * Pre-migration config files generation * DVS: Improve port actions performance * NSX|V: Fix error handling for distributed router interface * Python3 support - use correct pip * NSX|V3+P: support removing LB listener default pool * NSX|V3: Fix vpnaas driver getting tier0 * NSXV: fix router static routes for non-admin users * NSX|P: Fix typo in comment * NSX|P: Use transactional API for router actions * NSX|P: Use transactional API for port create & update * Pre-migration checks admin utility * NSX|V3+P: Prevent IPv6 default router creation as static route * Fix octavia driver unittests * Stop testing python 2 * NSX|P: Fix metadata get\_subnets * NSX-TVD: Copy filters dict before calling plugin * Add train DB migration milestone * NSX|V3+P: Improve subnet create & update with DHCP * NSX|V3+P: Validate static routes ip version * NSX|V: Validate SG rule remote-ip-prefix is not 0.0.0.0/x * NSX|P: support policy MDproxy 15.0.0 ------ * NSX|V3+P: Fix MDProxy TZ validation * NSX|P: Fail network creation of mdproxy creation fails * NSX|v+v3+p: Allow resetting port binding host * NSX|V: Configurable backend security group name * NSX|V3+P: Validate network TZ match the MDproxy one * Fix master branch lower-constrains * Octavia driver: agent implementation * NSX|V3+P: Log app profile deletion errors * bump neutron-lib to 1.29.1 * Bump openstackclient & osc-lib version * NSX|V3+P: Add vlan-transparent flag to port vif details * NSX|V3+P: Fix mdproxy handling for vlan transparent * NSX|P: Fix devstack cleanup to use the default domain * NSX|P: Do not set None description on resource partial updates * NSX-T: Add advanced-service-providers to extension * NSXv: Cleanup redundant FW rules from VDR * NSXv: VDR interface operation performance * NSX|P: Fix edge FW rules for VPNaaS * NSX|V: Add configuration validation * NSX|V3+P: fix \_get\_subnets\_for\_fixed\_ips\_on\_port * NSX|V3+P: Improve router interface actions performance * Fix member update in octavia * Update octavia-lib requirements * NSX|V: Remove unused VCNS apis * NSX|P: RE-Enable GW firewall when creating a service router * NSXP: Trunk support * NSX|P: Fix broken unit tests * Improve devstack doc * NSXv: Use HTTPS protocol for OpenStack HTTPS HM * use payloads for ROUTER\_GATEWAY events * NSX|V: Support LB HM expected codes * NSX|P: Handle concurrent resource creation at init * NSX|P: Improve security group creation * Revert "Use config override to update dummy config for FWaaS" * NSX|V: Fix FWaaS V2 handling 0.0.0.0 ips * NSX|V3+P: subnet\_create performance improvements * NSX|P: Check service router realization * NSX|P: Remove warning logs from admin utilities * Update api-ref location * NSX|V3+P: Prevent adding cookie session persistence to HTTPS listeners * Update tenacity version and usage * Use config override to update dummy config for FWaaS * TVD: Add log messages to admin utility * NSX|P Add admin utility usage information * NSX|V3+P: Fix address pairs validation * NSX|P: Validate availability zones * NSX|V3+P migration: Support migration of Octavia objects * NSX|P: Fix router interface subnet validation * Use FWaaS DB api instead of querying the DB directly * NSX|V3+P migration: Fix different issues * NSX|V3: Update listener default pool in update action * Remove log\_helper from lbaas methods * NSX|V3: Delete dangling virtual server * NSX|V3+P: Fix max address pairs check & message * NSX|V3+P: Admin utility to replace tier0 * NSX|P: Add control over realization interval * Raise proper error when NSX resources are not found * NSX|V3+P migration: Support migration of FWaaS V2 objects * Octavia driver: use globals for transport and server * NSX|V: Add condition for port update when using dup ip\_address * NSX|P rename Lb internal method * update for python3 train jobs * api\_replay for migration into the policy plugin * Avoid relying on default NDRA profile * NSX|P: Do not set the tier1 TZ * NSX|V3+P: Remove illegal characters from user-id in NSX headers * bump neutron-lib to 1.28.0 * NSX|P: Handle tooManyRequests error from the NSX * NSX|P: Add caching to the networks NSX ID * NSX|P: Rollback update GW info in case of backend issues * NSX|V3: Retry getting the LB service in case of creation failure * NSX|V: Bug fixing for allowed address\_pairs * NSX|V3 migration: Ignore internal NSX-V objects * [NSX-v] Improve handling of port security transition * Housekeeper: getter bugs * NSX|V3 migration: Fix \_fixup\_res\_dict for api\_replay * NSX|P: Refactor GW FW creation & deletion * NSX|P: Remove LB WAF support * Revert "Skip broken unit test temporarily" * update test-requirements.txt to only what's needed * NSXv: retry on timeout while updating interface * NSX|P Fix network update rollback in case of NSX error * NSX|P: Check for vpn service while updating GW * NSX|V3+P: Ignore IP ::/x in security group rules * NSX|P: fix broken UT * NSX|P: Make sure tenant ID is added to the default pool in Octavia * NSX|P: Improve LB unit tests * NSX|V3: Update LB vip with device owner * NSXT: use correct binding to retrieve HM id * bump neutron-lib up to 1.27.0 * Add bindep.txt for vmware-nsx * [NSX-v3+p] Support switching session persistence type * NSX|P+V3: Improve get\_ports performance with designate * [NSX-p] Check for a LB before enabling/disabling service router * NSX|P: Fix LB VIP related issues * Handle missing tenant\_id in listener's pool * NSX|P: Fix plugin get\_port * NSX|P: Allow vlan-transparent networks with overlay TZ * Add more info in Octavia driver error handling * Skip broken unit test temporarily * NSX|P: Fix the check for LB service existence * [NSX|p] delete\_service\_router does not need context * [NSX|v3] LBaaS: process VS update only if VS exists * [NSX-v3] Do not log \_process\_vs\_update method call * [NSX|v3] L2 gw: store segmentation id in DB in any case * NSX|V3+P: Restict VLAN networks creation * [NSX-V] Allow VIP-like address if allow\_multiple\_address\_pairs=True * NSX|P: Support listener default pool with session persistence * NSX|P: Set ipsec endpoints advertisment * NSX|V3+P: Catch NSX error when trying to update edge firewall * NSX|V3: Fix router check for LB service * NSX|P: Add LB driver unit tests * NSX|V3+P: Support QoS on ENS networks * NSX|P Lb session persistence cleanup * NSXP: advertise static routes on no-snat * NSX|P: VPNaaS driver * NSX|V3+P: Levarage the tagging plugin for nsx logical ports * [devstack] Skip ovs\_cleanup for vmware\_nsx plugins * [devstack] Fix recursion issue in \_ovsdb\_connection * Devstack: install\_neutron\_projects bugs * NSX|P: Fix LB listener update * use octavia-lib for exceptions * Move import out of octavia driver's way * NSX|V3: Fix Lb listener deletion * NSXP: place project id in security rule tag * NSX|P: Fix LB deletion without a router * update bandit, hacking and flake8 requirements * NSXP: Forbid multiple listeners per LB pool * NSX|V3: Fix VPN service deletion * Devstack - use Neutron branch for related projects * NSX|P: Update tier1 GW and route adv together * NSX|V: prevent the deletion of Lb interface ports * DVS: Add plugin validations * TVD: Retry on pluging initialization * NSX|V3+P: restrict associate floatingip to router interface/DHCP ports * Fix broken unit tests * retire the NSX MH plugin * NSX|V3: Create service router upon LB member creation * Adding TVD + Policy plugins opts * NSX|V3 Fix dhcp-relay validation * Update six requirements to 1.11 * NSXT LB: initialize client, server SSL profiles * NSX|V: prevent updating router size * NSX: Restrict enable-dhcp on update external subnet * NSX|P: Handle update of dhcp port * Remove neutron-lbaas support & dependencies * NSX|V3+P: Change max allowed host routes * NSX|P: Create/delete tier1 locale-service upon router create/delete * NSX|V3+P: add context param to is\_overlay\_network abstract func * NSXP: parse statistics correctly * NSX|V: Fix metadata admin utility from missing config * NSX|V: check edge existence before updating router admin state * NSX|V3: Admin utility for reusing existing default section * NSXP LB: use correct attribute for LB service id * NSX|P: Whitelist IPv6 MLD in default FW section * NSX|P: Validate internal IPAM driver * NSXv: Subnet NAT rule configuration * NSX|P: Fix removing LB session persistence from a pool * NSXP: LB TERMINATE\_HTTPS bugs * NSX|V3+P: Fix HTTP response code for cluster down * NSX|P: Cleanup partial update workaround * NSX|V3: Specify disabled mac learning profile for NSX port * NSXP: loadbalancer, listener statistics * NSX|V3: Fix octavia default pool handling * NSX|V3+P: Delete backend HM when the LB pool is deleted * NSX|P: Fix LB L7 rule update * NSX|P: Handle missing data in NSX loadbalancer status * NSX|P: Add unit tests for ipv6 * cleanup openstack git refs and lower constraints * NSX|P: Fix loadbalancer delete cascade * NSX|P: Fix removing LB session persistence from a pool * NSX|P: Fix fwaas rules update * bump neutron-lib to 1.26.0 * NSXP: update member admin state * NSX|P: Use LB get\_network\_from\_subnet carefully * NSX|V3+P: Add support for Octavia member enable-backup * NSX|P: Improve router GW setting * NSX|V3+P: use LB tags constants * NSX|P: Fix LB operational status getter * NSXv: Resolve FWaaS-LBaaS conflict * Ignore 0.0.0.0 ips in FWaaS (and not just 0.0.0.0/x cidrs) * NSX|V adminUtils: detect and clean orphaned section rules * NSX|V3+P fix dhcp status when updating subnet * Fix bulk subnets unittests * NSX|P Fix different LBaaS issues * NSX|P: Fix ipv6 adverisement rules * Dropping the py35 testing * NSXP/v3: lookup router with different owner * Fix devstack doc FWaaS v2 sections * Fix deletion of SG rules when deleting their remote group * Fix devstack documentation for Octavia * OpenDev Migration Patch * NSX|P: Add ipv6-specific router adv rule * NSXP: Use native route advertisement rule API * NSX|P: Always use default domain * NSXP: Fix LB service tags on public subnet * NSX-V3: Update edge firewall after SR creation * NSX|P: Fix dhcp config with dual stack * NSXP LB: pass lb\_pool\_id when needed * NSXP: protect router when it hosts a loadbalancer * Fix session persistence profile management * NSX|P: Handle slaac profile for vlan subnets * NSX|V3 adminUtils: detect and clean orphaned section rules * NSX|P: Improve error handling when deleting backend resources * Delete SG rules when deleting their remote group * NSX|P: Use random ID in FWaaS service * Add DB migration milestones for Stein * NSX|P: Update slaac config on router * NSX|P: Disable API passthrough for enable\_standby\_relocation * NSX|P: Improve exclude list creation * NSX|P: Fix LB waf profile usage * Octavia: add o-da to required service list * NSX|P: Fix removal of provider security groups from port * NSXP LB: Complete Octavia init * NSX|P: Enable IPv6 Router Advertisement * update requirement versions for stein * Temporarily use octavia exceptions * NSXP: Delete LB pool when listener is broken * NSX|P: Configure exclude list for port-security-disabled ports * NSX|P: Sleep before first check of realization * Implement providernet.\_raise\_if\_updates\_provider\_attributes internally * NSX|P: Add ipv6 neighbor discovery to global FW * NSX|V3: Support listener default pool with session persistence * use trunk constants from neutron-lib * NSX|V3+P: Ensure router GW & interfaces do not overlap * NSX|P: Limit one ipv6 subnet per network * Handle multiple default SG creation in all plugins * Retire oslosphinx * NSXP: Use router entity on call to get\_realized\_id * NSX|P: LB WAF profile support * NSXP: LBaaS/Octavia support * NSX|P: Add configuration parameters for realization timeouts * NSX|P Fix comments typos * NSX|V3+P: Add verification of num defined address pairs * NSX|V3+P: remove redundent code in get\_port/s * NSX|V: Restrict creating conflicting address\_pair in the same network * NSX|P: Fix provider security groups * NSX|V3: Forbid multiple listeners per LB pool * NSX|P: Initialize slaac ndra profile on startup * NSX|P: Fix vlan router attachment for dual stack * NSX|P: Add ipv6 link local address to bindings * NSX|V3: Simplify LBaaS implementation * NSX|P: Do not create router port on backend * NSX|P: Fix dual stack subnets on backend * NSX|P: Enable ipv6 forwarding on startup * NSX|P: Remove attachment type from segment port * NSX|P: Explicitly set no mac learning profile on ports * NSX|P: Fix ethertype SG translation * NSX|P Change minimal supported backend version to 2.5 * [NSX-v3] Use bridge endpoint profiles for L2 gateways * NSX|V3+P: Enable ipv6 in allowed address pairs * NSX|P: Fix groups scope&tag condition for FWaaS * NSX|V3+P: Enable dual stack fixed IPs * Revert "NSX|V3: Simplify LBaaS implementation" * Start using octavia-lib * Add octavia-lib to vmware-nsx managed libraries * update constraints in prep for stein * NSXv: admin util metadata breakage recovery * NSX|P: Fix the default section membership * NSX|V3+P: Limit number of subnet static routes per backend * NSX|P remove sleep before deleting policy services * NSX|P FWaaS V2 support * NSX|P: Fix groups scope&tag condition * Add tarballs to install on RHOSP director for NSX integration * remove use of quota\_db * NSX|V3: Simplify LBaaS implementation * NSX|V+V3: Refresh octavia driver DB session * Remove usage of tempest test\_l3\_agent\_scheduler * Octavia: Add logging on driver exceptions * NSX|P: Add scope to security group rules * NSX|V: enable allow\_address\_pairs upon request * bump neutron-lib to 1.25.0 * remove external\_network\_bridge option * NSX|V3: Fix octavia pool update operation * Revert "NSX|V: Add lock before spoofguard IPs operations" * NSX|V3: Change status code of SG failure * NSX|V3+P: Respect default keyword for physical\_net * NSX|P: Fix nsxlib api call * NSX|P: Add TZ configs to devstack code * skip DVR specific dynamic routing tests * NSX|V admin utils: Find and fix spoofguard policies mismatches * NSX|V: Add lock before spoofguard IPs operations * NSX|V3+V: Fix octavia completor func * NSX|P: Use the correct domain ID in SG rule creation * NSX|P: Support vlan router interfaces * NSX|V3: Add unit tests mocks to avoid loggin errors * NSX|V3+V: Handle fwaas policy modification * NSXv: improve logging for metadata setup * NSX|V3: FWaaS v2 without GW * NSX|P: Add mocks to nsxlib in unit tests * Upgrade appdirs lower constraints * normalize and use ALIAS in supported\_extension\_aliases * NSX|V+V3: Fix FWaaS RPC bindings * use api def ALIAS in supported\_extension\_aliases * Fix provider security group exception call * NSX|V3: prevent user from changing the NSX internal SG * NSX|V3+P: allow removing qos policy from a port * NSX|V3+P: Fix subnet creation with dns domain * NSX|V3+V: Handle fwaas policy removal * NSX|V3: Fix LB advertisment to use admin context * NSX|V+V3: Fix Octavia statuses update * NSX|P: Handle ipv4 and ipv6 traffic in SG * use neutron-lib trunk resource names * NSX|V+V3: support octavia delete cascade * NSX|P: Temporary worakround for delete SG rule * NSX|V3+P: Create port bindings for dhcp ports * NSX|V+V3: Use the right port device id for octavia vip * [NSX|V3] Listener data might not have loadbalancer name * NSX|V+V3: relax FWaaS validation * NSXT LBaaS: Fail HM delete operation on exception * bump neutron-lib to 1.23.0 * NSX|P: Fix vlan transparent configuration * NSX|V: FWaaS-V2 driver * Octavia driver: various fixes * DVS: Prevent from creating a vlan network with same tag and dvs * Octavia error handling * NSX|P: Enable mac change in switching profiles * DVS: Disable port security and security group * [NSX-V] Ensure binding exists before assigning lswitch\_id * Remove FWaaS V1 code * NSX|P: disable tier1 firewall before removing the service router * NSX|V: Fix update section header * NSX|V3: Validate FWaaS cidrs * NSX|P: Add neutron object ID to NSX tags * use payloads for PORT BEFORE\_DELETE callbacks * use payloads for all SUBNETPOOL\_ADDRESS\_SCOPE events * Devstack: Delete old project before deciding how to get the new code * NSX|P: Forbid cert operations without passthrough * NSX|P: Allow only 1 router interface per network * stop using register\_dict\_extend\_funcs * use payloads for SECURITY\_GROUP BEFORE\_CREATE events * NSX|V3: Init FWaaS before spawn * NSX|V3+P: reuse common devstack code * Remove ryu from requirements * Multi-DVS Support When Hypervisor is vCenter * Devstack: Fix failed of ml2 directory creation * Convert policy.json into policy-in-code * bump neutron-lib to 1.22.0 * NSXv: implement loadbalancer status * Fix security group broken code & tests * Devstack: Fix failed of ml2 directory creation * Multi-DVS Support When Hypervisor is vCenter * Fix cffi lower constraints * NSX|P+V3: Do not allow external subnets overlapping with uplink cidr * Devstack: Fix ml2 config file creation for FWaaS-V2 * NSX|P Fix port admin state setting * NSX|V3: Fix operator on LB advertisment rule * NSX|V3+P: Set router standby relocation when creating service router * NSX|P: Fix default SG section criteria * Revert "NSX|P: No need to add resource name on update" * NSX|P network & port admin state support * NSX|P dns integration support * use payloads for all SUBNETPOOL\_ADDRESS\_SCOPE events * NSXT: Remove redundant code, add logging * NSX|V3 Support expected codes for LB HM * NSX|P: No need to add resource name on update * Update Octavia doc to use noop network driver * NSX|V3: Fix ipam to check subnets carefully * NSX|P support selection tier1 edge cluster * NSX|P: Support router standby relocation * NSX|V3: remove redundent ens check * NSX|P: Handle subnet update and port dhcp bindings * NSX|P: Support Tier0 / Transport zone related actions * NSX|P support init configuration by NSX tags * NSX|P: Support bulk subnet create * NSX|P metadata proxy support * NSX|P: DHCP & MD Proxy devstack cleanup * NSX|T: Add enable standby relocation * NSX|P Add priorities to NAT rules * NSX|P: QoS support * NSX-T+P: Delete subnet in case of dhcp error * NSX|P: Consume nsxlib folderization patch * Fix the misspelling of "except" * Add safty checks when getting port provider securtiy groups * NSX|P: Add error handling when getting the segment realized id * NSX|V3: Fix LB statistics getter * NSX|V3 Fix LB pool create * Fix Octavia devstack instructions * NSX|P: Plugin code cleanup * NSX|P: Initial dhcp support * NSX|V3 reuse code for re-initializing the AZs in the unit tests * NSX|P Availability zones support * Devstack: Create ml2 config file for FWaaS-V2 * NSXv3: Typo in LB dummy driver * NSX|V3: Add L2GW connection validation * NSX|P Add more router unit tests * NSX|P: Add project ID to NSX objects tags * NSX|P add port security support * NSX|P: Call correct method on SG update * NSX|T: Optional distinct edge cluster uuid for T1 router * Move native dhcp logic to common plugin * NSX|P: Remove tier1 router service in cleanup * DVS: Adding proper error message when missing net provider data * NSX|V3: Configure tier0 transit networks * NSX|P Fix router interface removal * use neutron-lib for resource\_extend * NSX|P: Fix segment port vif type * Revert "NSX|V: Disable some CI tests that fails often with timeout" * NSX|V: Disable some CI tests that fails often with timeout * NSX|P Add SG rules groups cleanup * NSX|P: Add router advertisement & static routes * NSX|P: Policy plugin use passthrough api * use payloads for SERVICE\_EDGE events * test/prep lower-constraints with py36 * LBaaS: Session persistence for NSX-v3 * use get\_updatable\_fields from neutron-lib * Fix qosqueue unit tests * NSX|T: DR only Neutron logical router * fix lower constraints * NSX|P: Initial availability zone support * TVD: Add missing VPN driver api * Policy plugin floating IPs support * NSX|P: Fix router interface segment id * Policy plugin: Add devstack/admin-utils for client auth * NSX|P: Add router interfaces NAT rules * Update devel info: mailing list * NSX|V3: FWaaS translate 0.0.0.0 to Any ip * NSX|P: Initialize segment profiles * bump neutron-lib to 1.21.0 * NSX|V use context reader for router driver * NSX|V Fix AdminUtils get apis to use the right context * Change openstack-dev to openstack-discuss * NSX|V New admin utility to list existing NSX policies * TVD Octavia: Fix stats\_getter parameters list * NSXv: avoid changing of LBaaS port states * NSX|P: Basic router interface & GW support * TVD LBaaS: fix operational status api * use payloads for ROUTER\_INTERFACE BEFORE\_DELETE events * Fix devstack docs for advanced services * Use tenant context to get router GW network * NSX|v: refactor unittests with disabled dhcp * stop patching RbacNeutronDbObjectMixin * Set correct switch security profile when port sec disabled * NSX-v3: Fix listener for pool not fetched anymore * NSX-v3: Prevent comparison with None * stop using common\_db * stop using \_get\_collection\_query from CommonDbMixin * stop using \_apply\_filters\_to\_query * use neutron-lib for \_model\_query * NSXv: use admin context for metadata port config * NSX|P: Initial admin utilities * Skip new unsupported unittests * NSX|v3: Delete unused vpn services * NSX|P: Port create/update/delete enhancments * NSX-v3: Fix LB HTTP/HTTPS monitor impl * NSX|V Fix orphaned networks and bindings * NSX|V3: Fix LB VIP advertisement * NSX|V: Fix broken unittests * NSX|P provider networks support * NSX|V3 Fix dhcp binding rollback * Update min tox version to 2.0 * Policy plugin - fix port tags * use model\_query from neutron-lib * Ensure NSX VS is always associated with NSX LBS * use lib's object registry for neutron objects * NSX|P: Handle port security * NSX|V3: validate LBaaS NSX stats fields * Fix OSC client extension initialization * TVD verify loadbalancer project match the LB object project * TVD: Do not crash in case the project is not found * NSX|V3: Fix FW(v2) status when deleting an illegal port * NSX|V3: Fix member fip error message * NSX|P: Add devstack cleanup for basic NSX resources * NSX|P: Add OS + project tags to all NSX objects * TVD: Add LBaaS get\_operating\_status support * use context manager from neutron-lib * Fix octavia listener unit tests * NSX|V Fill VIF data for upgraded ports * NSX|V3: Do not fail on router rollback * NSX|P: fix typo in nsxp cleanup script * NSX|P: Fix security group rule validation * NSX|P Fix devstack cleanup call * Devstack plugin: fetch Neutron only when needed * NSX|P: fix bug in cleanup script * NSX|P: Start adding tempest tests * Policy plugin: fix port creation parameters * NSX|V3 Add NO-NAT rules only for routers with enabled SNAT * Policy plugin: Add Network & ports basic unit tests * Policy plugin: Move missing method from v3 to common * Policy plugin: integrate with create\_or\_overwrite output change * NSXv: Metadata should complete init * NSX|V admin utils: Fix rpc method override * NSX|V3: Add LB status calls validations * NSX|V fix LBaaS operation status function params * NSX-Policy: Support NSX policy in devstack * NSX|P: remove sys usage from plugin * Fix admin utils doc formatting * Fix some unittests global issues * Update .zuul.yaml * NSX|V3 remove lbaas import to allow the plugin to work without lbaas * NSX|P: Initial connectivity support * NSX|V Allow updating port security and mac learning together * NSX|P: Policy pluging initial SG support * NSX: moving devstack funcs to common * NSX|V3: Change external provider network error message * NSX|V: Improve SG rule service creation * Skip Octavia init in UT * NSX|V3: remove redundent DB calls for floatingips * TVD: Octavia support * Remove release pipeline * NSX|V3: Support security features for ENS switches * use retry\_if\_session\_inactive from neutron-lib * NSX|V: Fix BGP plugin get operations * NSX|V+V3: Prevent adding different projects routers to fwaas-V1 * add py36 jobs and tox target * NSX|P: Use nsx\_p certificate configuration * NSX|V+V3: Octavia driver * use openstack-tox-docs job * Remove templates that will not work here * Refactor provider SG validation * move per branch zuul config into repo * opt in for neutron-lib consumption patches * use common rpc and exceptions from neutron-lib * Adjust router notification test * NSX|V: Validate DVS Id when creating flat/vlan network * NSX|V3: Restrict update of LB port with fixed IP * NSX|V3: Fix bug in checking lbaas port dev-owner * NSX|V: Fix devstack cleanup for python 3 * NSX|V3: restrict allowed-address-pairs on LB port * NSX-Policy: Skeleton for the new NSX Policy plugin * NSX|V3: Refactor network & port validations * NSX|V3: Fix returned network type is ddi check * NSX|V3: read original subnet only once during update * NSX|V3: Add support for 'direct' vnic types * NSX|V3: Check specific exception when deleting dhcp port * NSX|V3 Validate rate-limit value in admin utilitiy * NSX|V3 adminUtils: Use nsx plugin to get ports * NSX|V3: Fail on unsupported QoS rules * NSX|V: Avoid updating the default section at init * NSX|V3: Fix external LB member create * Update UPPER\_CONSTRAINTS\_FILE for stable/rocky * NSX-V3| Fix port MAC learning flag handling * NSX|V: Fix host groups for DRS HA for AZ * NSX|V3: VPN connection status update * NSX|V3: LBaaS operating status support * NSX|V3 update port revision on update\_port response * NSX|V Fix policy security group update * NSX|V Fix policy security group update * NSX|V+V3 QoS rbac support * NSX|V+V3 QoS rbac support * Devstack: Use the right python version in cleanup * NSX|V: Fix host groups for DRS HA for AZ * fix misspelling 'configuration' of functions * NSX|V3 update port binding for callbacks notifications * NSX|V3 update port binding for callbacks notifications * NSX|V3: Support new icmp codes and types * NSX|V3: Support new icmp codes and types * NSX|V3: Add VPNaaS driver tests * NSX|V3: Make sure LB member is connected to the LB router * NSX|V3: Prevent adding an external net as a router interface * NSX|V3: Make sure LB member is connected to the LB router * Remove extra period from invalidInput messages * NSX|V: Shorten the L2 bridge edge name * NSX|V3: Prevent adding an external net as a router interface * NSX|V: Shorten the L2 bridge edge name * NSX|V3: Fix port binding update on new ports * NSX|V3: Fix port binding update on new ports 13.0.0 ------ * NSX|V3: New admin utility to show MP cluster managers IPs * Tag the alembic migration revisions for Rocky * NSX|V3: Block subnets with host routes and DHCP disabled * NSX|V3 Add port binding to the DB * NSXv: Add SG rule description to NSX notes field * NSX|V+V3: Move FW section logging update to admin utility * NSX|V3 Add log message on netowrk creation/update with QoS * Fix LBaaS statistics * NSX|V3: Ensure early router GW validations * NSX|V3 exclude multicast addresses from NSX IPAM * NSX|V3 refactor add router interface code * Remove invalid config from local.conf sample * NSX|V3: Skip icmp rule tempest test * Fix NSX-V QoS tests to use legal tenant ID * NSX|V: Combine plugin & MD proxy unit tests * NSX|V3: Handle port-not-found during get\_ports * use get\_port\_binding\_by\_status\_and\_host from lib * NSX-V3: Fix security-group unittests * Upgrade OSC security groups code * use callback payloads for ROUTER/ROUTER\_GATEWAY BEFORE\_DELETE events * NSX|V3 remove unused parameter from router port call * NSX|V3: Update failed LB member status * NSX|V3: Add LB pool description to the NSX pool * update requirements for neutron-lib 1.18.0 * NSX|V3: Add housekeeping jobs * add venv target to tox for release job * NSX|v: fix port binding * NSX|V3 replace deprecated nsxlib apis 2018.0rc1 --------- * NSX|v: Fix port\_binding usage * Add py36 testenv * Get devstack ready for PY35 * NSX|V3: Prevent enabling MAC learning on trusted ports * NSX|V3: Do not allowed changing dhcp port owner * fix py27 gate failure due to astroid 2.0 * Fix bug in install\_neutron\_projects function * Skip new neutron unittests * Devstack: check if plugin/project is enabled * NSX|V3 prevent router SNAT with VPN service * Clarify NSX-V vs. NSX-T * Fix six.moves import for pep8 * change port bindings shim * NSX|V3 use network dns\_domain on port fqdn * Add housekeeper GET/PUT run options * NSXAdminV3: Add message on client cert generation * NSX|V3 add lbass\_pending housekeeping job to doc * Housekeeper: deliver output * NSX|V3: do not allow changing the external flag of a network * NSX-V: Admin state of internal LB port * NSX|V: Fix FWaaS error firewall delete * NSX|v3 Admin utils refactor + additions * NSX|V3 fix global SG creation duplication * TVD LBaaS fix for core plugin * make python 3 the default for tox.ini * fix typos * Add devstack cleanup for loadbalancers * modify grammatical errors * replace 'a edge cluster' with 'an edge cluster' * fix typo * NSX|V3: Fix update network rollback * NSX|V3: Support LBaaS monitor update * shim references for port\_binding * fix lower constraints targets in tox ini * updated lower constraints jobs for include dependencies * LBaaS legacy mode bugfix * NSX|V3: Fix ENS check on update network with QoS * Fix V3 UT mocks to return the correct type of output * NSX|V3 add LBaaS listener description to the backend object * Fix LB drivers to have a core plugin * NSX|v Do not allow changing the type of a router with firewall * NSX|V3: Add ovs\_hybrid\_plug flag in ports * Fix FWaaS callbacks imports * NSX-V: fix exception for distributed router * NSXv3: add pool-level lock for LB pool member operations * NSX|V3: Fix router delete callback for LBaaS * bump neutron-lib requirement to 1.16.0 * add enabled/disable on lbaas listener * NSX|V+V3: Preparations for Octavia support * Housekeeper: implement get\_housekeeper\_count method * Add mock to the requirements * Replace os.makedirs to avoid process race * stop using safe\_reference from common db mixin * NSX|v3: Do not retry on DB duplications on section init * Stop using nsxlib deprecated classes * NSX-V: py35 compatibility * AdminUtils V3: Support NSX objects with no tags * AdminUtils v3: Fix FWaaS v2 initialization * AdminUtils V3: Do not set nat\_pass for NO-NAT rules * use retry\_db\_errors from neutron-lib * NSXv: Handle listener failures on backend * use get reader/writer session from neutron-lib * load neutron objects using neutron-lib * add stestr to test requirements * NSX|V3: Wait for another neutron to create default section * NSX-V: decode authToken for supporting python 3.5 * Integrate with FWaaS plugable driver * NSX|V3: Do not add security profile for native DHCP ports * LBaaS pending objects housekeeping * Fix broken unit tests * NSX|v: Support icmp-v6 security group rules * NSX-V3: Prevent deletion of router gw/interface used by LB * NSX|V adminUtils: List & clean NSX portgroups * NSX|V3: remove unused LB callback * Devstack: refactor neutron projects install * NSX|V3: cleanup duplicate sections on startup * NSX-V&V3: support filters for list availability zones action * Fix broken unittests * NSX|TVD: address metadata subnet request * TVD Admin utils: Fix FWaaS calls * NSX|V3: pass list to log formaters * NSX|v3: : ensure that 0.0.0.0/# is treated correctly in SG rules * NSX|V3: Flat networks do not support DDI * use db utils from lib * NSX-V: Prevent LBaaS L7 policy creation for HTTPS listeners * NSX|V3: support 'local' type for external network * NSXv3: Update devstack metadata settings for uwsgi * dhcp\_meta/rpc: cleaned up greenthread.sleep(0) calls * Adopt to neutron automatically expiring obsolete relationships * Fix some typos * NSX|V Do not allow router with lbaas migration * TVD: Fix devstack calls to certificate create/delete * NSX-T: Disable Port Security by default for ENS TZ * NSXv: Fix member delete bug * NSX-V3: Enhance subnet overlap checks * NSX|V fix create/delete subnet race condition * NSX|v: Fix create\_dhcp\_binding error handling * NSXv: Use router Edge for LBaaSv2 unit tests * NSX-V: Add server-ip-address to the supported dhcp options * Trivial: Update pypi url to new url * Add script to clean up all backup edges owned by Neutron * Delete the database entry when backend is not found * Fix vmware\_nsx documentation * NSXv: Allow use of router edge for LBaaS * TVD L2GW: support missing methods * NSX-V3: Add agent config to FWaaSV1 callbacks * TVD: fix project plugin OSC to display errors * NSXv: Fix admin util member IP overwrite * NSX-V3: fail FWaaS rules with 0.0.0.0/x cidrs * use rpc Connection rather than create\_connection * NSX|V3: Use empty value and not ANY for remote prefix * uncap eventlet * Fix pep8 new warnings * NSX-V3: Do not add mac learning profiles for ENS port * Adding --force support for nsxadmin backup-edges resources * NSX-V3: Add agent config to FWaaSV2 callbacks * NSX|V3: ensure that 0.0.0.0/0 is treated correctly * NSX-V3: Do not remove edge id from logical router * NSXv: Fix LBaaS foreign key migration * Replace uuid.uuid4() with uuidutils.generate\_uuid() * NSX-V3 Use admin context for VPN port creation * NSX|TVD: address metadata subnet request * Handle network and port create/update, ENS + QOS * Allow NSX plugins to work without VPNaaS * Remove duplicate policy entries * tox: Remove unnecessary configuration * Allow NSX plugins to work without FWaaS * NSX-v3: fix overlay network check * Add lower-constraints job * NSX|V3: fix ENS VLAN attach to router * Filter port-list based on security-group * NSX|V3: prevent attaching transparent VLAN to router * NSX|V3: ensure external subnet does not conflicts with T0 address * Use current neutron project for devstack & CI * Updated from global requirements * Update lib to >=12.0.0.0 * NSX-V3: prevent the user form changing vpn internal ports * Updated from global requirements * add py27-dev and pep8-dev tox targets * NSX-V3: Enhance VPNaaS related validations * Fix tvd-v devstack * Remove tox\_install.sh * NSX|V3: improve trandparent VLAN support * NSX-V3 devstack cleanup for VPNaaS * TVD allow devstack init without V3 transport zone * NSX-V3 Add NO-DNAT rules only if supported * NSX-V3 fix FW rules for VPNaaS * NSX|V3: prevent disabling MAC learning on ENS TZ * Revert "use str names rather than classes for orm relationships" * NSX-V3 Fix some VPNaaS configuration bugs * TVD: Support TVD devstack with disabled plugins * NSX|V: fix for existing spoofguard policy * Updated from global requirements * Move neutron to requirements * TVD: configure the list of supported plugins * L2GW: support missing methods * NSX|V3: ensure that description is not None * Fix security groups ext\_properties loading * Updated from global requirements * TVD: Fix get-floatingips plugin selection * AdminUtil: V3 refactor get md\_proxy\_status * NSX-V3: Update NSX objects on network update * NSX-V: Avoid warning on deleting dhcp binding for non compute port * use str names rather than classes for orm relationships * Updated from global requirements * NSX|T DFW support * NSX-V3: verify router transport zones * AdminUtil: using nsxlib func for orphan dhcp rm * Remove old TODO comments * NSX-V3: Add router overlay TZ with GW * Fix broken unit tests * NSX-v3: Add default tier0 router to AZ config * NSX-V3 Add vlan transparent warnings on init * NSX|V3: prevent duplicate default FW sections * TVD: update port migration for V -> T instances * NSX-v3: Remove unused lbaas utils method * migrate to stestr * use plugin common utils from neutron-lib * remove SHARED constant that now lives in neutron-lib * AdminUtils: Improve NSXv security admin utils * NSX-V3: Fix AZ retrieving for DHCP profile * use common agent topics from neutron-lib * fix Parameter spelling error in unit test * Enable configuration to decide on vlan tag per TZ * NSX-V3 Add NO NAT rules for router interfaces * fix url in HACKING.rst * NSX|V3: router attached to VLAN must have gateway * TVD IPAM support * TVD: Admin utility for migrating a project * NSX-V Admin Utils: List BGP GW edges * NSX-V3 add ens\_support arg to devstack * TVD: Add service plugins to separate list results * NSX-V3: do not add the DHCP profile for ENS networks * NSX|V: treat edge case when spoofguard entry already exists * NSX-v3 VPNaaS: Use a local address from the external network * NSX|TVD: add ability to add extra filters * NSX|V: ensure that no sec groups and port sec will discard traffic * release note update * TVD: Make sure lbaas subnet belongs to the correct plugin * AdminUtils: Skip housekeeping on admin utils calls * TVD: fix get\_<>s but plugin with filters * use is\_extension\_supported from neutron-lib * resource2 is depracted, use resource * TVD Fwaas: prevent adding wrong plugin routers to FW * admin utility enabled nsx-update for security groups (V and T) * Fix admin utils doc * Tag the alembic migration revisions for Queens * NSX|V3: ensure that 0 is in the guest tag range * TVD: Make sure subnets project is the same as the network * Add logging to help detect port security conflicts * NSX|V3: validate external subnet has no DHCP enabled * NSX-V3: Update vlan-transparent case for network * NSX-V: make sure error fw can be deleted * NSXv DNS integration * NSX|V: default support for IGMP traffic * NSX|V3: only allow physical network to be confogured for external net * NSX|V3: allow a router to be attached to a VLAN network * NSX|V: spoofguard\_enabled disabled enhancement * NSXv3 DNS integration * TVD: do not support policy extension for nsx-t plugin * TVD: fix get\_<>s but plugin with filters * TVD: Fix filtering without project id * TVD: no longer experimental * Use the new PTI for document build * NSX|V3: enable VLAN transparent to be configured with VLAN * TVD: Fix FWaaS typo * AdminUtils NSX-v: Fix SG migration to policy * Remove pbr warnerrors in favor of sphinx check * TVD: ensure that extra attributs are set for router iif they exist * AdminUtils: Skip unsupported resources * NSX|V3: enahance admin utility for metadata proxy list * AdminUtil NSX-v3: recreate dhcp server for a network * NSX|TV: validate plugin is available * NSX-v3: VPNaaS supports only No-SNAT routers * Updated from global requirements * NSXv3: allow use api\_workers=-1 * NSX\_V3: add flag to indicate if ENS networks can be created * use vlantransparent api def from neutron-lib * TVD: FWaaS plugins * NSX|V: ensure that only LAG is configured and not standby * TVD: make security group logging more robust * TVD: ensure get\_ports works for DVS plugin * NSX-V3 devstack: cleanup VPNaaS objects * use multiprovidernet api definition from neutron-lib * TVD: ensure that can return specific tenant/project requests * Updated from global requirements * NSX-V3 FWaaSV2 prevent adding compute ports to FW group * TVD AdminUtils: Use only objects from specific plugin * NSX-v3: fix update\_router\_firewall error handling * TVD|AdminUtils: Add all nsxv/v3 utils to tvd * Updated from global requirements * AdminUtils: NSX-V3: Show and update the NSX rate limit * NSX-v| Do not allow setting qos policy on port * Fix VPN api as the NSX api changed * NSX|V: enable binding floating ip's per AZ * TVD availability zones * TVD: support lbaasv2 'provider' filtering * NSX\_V3: enable non native DHCP to work with AZ support * NSX-v3: Inform FWaaS when a router interface is removed * TVD: move plugins init\_complete code to the end of init * NSXv, DVS: Use neutron DB name instead of neutron\_nsx * use api attributes from neutron-lib * TV: doc creation of admin for a specific plugin * TVD: improve default plugin failure at boot time * NSX\_V3: make sure that member creation is serialized * NSX-v3: Use logical switch id in FWaaS V2 rules * Remove leftover debug logs * TVD: Add VPNaaS wrapper driver * TVD project plugin mappings validations * NSX|V: ensure port security is enabled for address pair support * NSX\_V3: ensure that the correct router attributes are read * BUG\_FIX: policy.d copy over for devstack * TVD: use warning if plugin is not supported * TVD: improve get subnets * TVD: get\_address\_scopes and get\_subnet\_pools support * use service aliases from plugin constants * NSX|V3: VPNaaS support * TVD: create subnet bulk support * use callback payloads for \_SPAWN events * NSX|v+v3: Prevent adding 0.0.0.0 route to router * AdminUtils NSX-V3 fix for FWaaS callbacks * TVD: fix logging configuration at boot * Housekeeper: Per-job readonly option * NSX|TVD: ensure that port update is under transaction * NSX|V3: fix load balancer admin util list * TVD: Add NSX-v CI exclusions * NSX-V+V3: Fix network availability zones extend func * Housekeeper: trigger execution * NSXv HK: recover broken backup edge appliances * TVD: fix plugin apis to allow CI job to succeed * DVS: fix get\_por and get\_portst * TVD: Fix devstack cleanup * AdminUtils NSX-v3: Add config import * NSX-v: Fix VPNaaS driver * NSX|v+v3: Prevent adding default route to router * TVD: Update devstack doc with different services * TVD servives: Handle the case where plugin is disabled * TVD+BGP: adapt the nsx bgp plugin to be used in TVD * TVD: l2gw support * NSX-V3 Fix router availability zones * TVD: Add default plugin configuration * Updated from global requirements * NSXv: update static routes in LBaaS loadbalancer * NSXv: Allow exclusive router deletion with LBaaS * Plugin housekeeper * NSX-TVD basic unittests * NSXv3: Continue HM delete in case of inconsistence * TVD: Support DVS plugin calls * TVD: add in DVS extenion\_driver support * TVD: ensure no empty project is added to the DB * NSX-TVD QoS drivers support * NSX-TVD: fix extensions list * NSX-TVD: Fix md proxy internal tenant * NSX-TVD migration admin util * NSX-TVD: Add some logs at init and mappings * NSX-TV fwaas drivers * Updated from global requirements * TVD: ensure that get ports does not throw exception * NSX TVD: V, T and simple DVS Coexist in the same plugin * Updated from global requirements * NSX|V3: Move logic from fwaas driver to the v3 plugin * NSX|V3 complete init of fwaas core plugin * NSX|V: Fix Fwaas for distributed router * NSXv: Handle LBaaSv2 listener inconsistency * NSX|V3: improve ENS exception limitations * NSX|V3: transparent support for logical switches * NSX|V prevent deleting md proxy neutron objects * TVD: add support for get\_\*s * NSX|V3: prevent DHCP port deletion with native support * TVD: support housekeeper for TVD * NSX|V3: ensure provider securiry updates are done * NSX|V3: upgrade NSX version in the unit tests to 2.2 * Create common devstack files for V and T * NSX|V3: ensure that metadata works with windows instances * Fix DVS devstack configuration * Integrate with floating ips OVO * NSX|V: Add configuration for shared rotuer size * NSXv3: Handle association fip to vip in different cases * NSX|V: Improve no-dhcp subnet unit tests * TVD: enable DVS to be configured * NSXv3: Fix a typo to return router\_id * NSX|v: Add qos policy id to port * Reset static dhcp binding on mac address change * NSXv3: Validate LB router gateway * NSXv3: Update FIP on VIP gracefully * Updated from global requirements * NSXv3: Update VIP on multiple listeners * NSXv3: Fix pool member update * NSX|V: validate DVS in network-vlan-ranges * NSX|V: support name update for VLAN and FLAT networks * NSXv3: Fix listener/pool update * Update the doc link * TVD: LBaaS support * NSX|V3 rename the default availability zone * NSX|v3: Fix create network exception handling * NSX|v3: Add routers availability zones * NSX|v+v3: Support default availability zones * NSXv3: Check cert existance before creation * NSX|V: add HA status to admin list of edges * NSX|V: Ignore empty gw info on router creation * NSX|V3: fix create port with provider security group * Add metadata setting for new created dhcp edge * NSXv3: Refactor LBaaS L7 code * Remove setting of version/release from releasenotes * Updated from global requirements * Retry with stale DB values * NSX|V3: use vmware-nsxlib that does not have neutron-lib * NSX|v validate PG provider networks * NSX|V3: ensure that a readable name is set for LB resources on NSX * NSX|v: Fix provider network creation * NSX-Migration of default security groups * Updated from global requirements * NSXv3: Add validation when attaching lbs to router * use flavors api def from neutron-lib * NSX|V3: enable DHCP and metadata to work with non overlay networks * NSX|V3: allow updating a floatingip without changing the port * DVS: ensure that network is configured if one of more hosts are down * use dvr api def from neutron-lib * Updated from global requirements * use l3 flavors api def from neutron-lib * NSX|v+v3: fix validate network callback * NSX|V3 fix lbaas get plugin code * NSX|V: use elevated context to get external net for router gw * NSX|V: no spoofguard policy for portgroup provider network * NSX|V: fix timeout out issues * use l3 ext gw mode api def from neutron-lib * NSX|V3: use contexts 'user\_identity' for the 'X-NSX-EUSER' header * NSX|V prevent adding illegal routes * NSX|V fix exception & typo in vpn driver * Fix devstask doc for service plugins * use router az api def from neutron-lib * NSX-V3| Do not allow adding QoS to dhcp ports * NSX|V3: allow VLAN router interfaces if nsx supports it * Infrastructure support for FWaaS logging * NSX|V: ensure that a session object is created per thread/context * NSXv3: Update tag if lb name is updated * NSX|V: Use flavors for load balancer size * NSXv3: Fix TERMINATED\_HTTPS listener * Cleanup test-requirements * NSX|V3: address edge case of subnet deletion when attached to router * Updated from global requirements * Updated from global requirements * NSX|V3: add request-id support * Fix broken unit tests * use l3 api def from neutron-lib * cleanup unit test usage of api extension maps * NSXv3: Limit tag value to maximum 40 * use extra route api def from lib * DVS: Add support for dns-integration extension * NSX|V3: fix lbaas exception text * NSXv3: Truncate lb name in tag * use addr pairs api def from lib * NSX|V Fix get\_dhcp\_binding output parsing * Logging neutron request-id in NSX * Fix security groups tests * use FAULT\_MAP from neutron-lib * NSX|V: add in security rule tags for 'project\_id' * Fix shared routers locks * NSX|V3: inject X-NSX-EUSER * Fix to use . to source script files * NSXv3: Fix load balancer delete issue * use availability zone api def from lib * NSX|V3: use route from nsxlib * NSX|V: honor nsxv.spoofguard\_enabled at port create * NSX|V remove warning on no dhcp edge * NSX|V save backend calls on delete subnet * use external net api def from lib * NSX|V Adding unittests for SG rule bulk creation * NSX|V Ignore port-security at the network level * NSX|V add shared router logs * NSX|V: Support add/remove dvs for VLAN provider networks * NSX|V add more details to vcns debug logs * NSX|V: use session objects for requests * NSX|V: Fix vcns timeout exception * NSX|V Do not share edges between tenants * NSX|V fix exclude list counting * NSX|V3: ensure that DHCP profile is not created for ENS port * NSXv: Recover from LBaaSv2 HM inconsistency * zuul v3 gate changes * NSX|v: fix broken unittests * NSXv3: Update router advertise\_lb\_vip * NSX|V3: Disallow port-security on port/net on ENS TZ * Updated from global requirements * Remove SCREEN\_LOGDIR from devstack * NSX|V3: allow VLAN router interfaces if nsx supports it * NSX|V3: Add DHCP relay firewall rules * NSX|V3 refactor fwaas to support plugin rules * NSX-V: add support for mac learning extension * NSXv3: Fix LB pool algorithm * NSXv3: Change reponse code for L7 redirect and reject * Fix typo * NSX|V Fix warning when disabling network port security * NSXv3: Handle floating ip for loadbalancer VIP * NSX|V3 indentation & typo fixing in client certificate * NSX|V3 make certificate unittests inherit from sql tests * Updated from global requirements * NSX|V3: nsx-provider network bugs fixing * use new payload objects for \*\_INIT callbacks * NSX|V raise error on mdproxy init * NSX|V: check md proxy handler exists before usage * NSX|V: do not build NAT rules for v6 networks * NSX|V3 Add validations to DHCP relay * NSXv3: Change LB rule match type to REGEX * Updated from global requirements * use common constants from lib * NSXv3: Fix loadbalancer stats exception * NSX|V: add exception log if router create fails * NSXv3: Fix a typo in LBaaS member\_mgr * NSX|V complete init of fwaas core plugin * Revert "Temporarily disable flowclassifier tests" * NSX|v3: FWaaS v2 support * Updated from global requirements * Remove wrong alias from setup * NSX|V: ensure the allowed logging includes default DHCP rules * Updated from global requirements * Temporarily disable flowclassifier tests * NSX|v3: DHCP Relay support * NSX|v3: provider networks updates * Updated from global requirements * NSX|V: prevent V6 subnet from being attached to a DVR * NSX|V AdminUtil handle orphaned router vnics * NSXv: add timeout parameter for backend calls * NSX|V: Add log messages to retry attempts * Update reno for stable/pike * NSX|V3: AdminUtil updating server ip of md-proxy * NSXv3: Fix L7 rule create/delete base on latest API * Fix to use . to source script files * NSXv3: Update binding if listener has been deleted * NSXv3: Rewrite client certificate provider * NSXv port-binding support * Updated from global requirements * NSX|V3: Do not enable port security on router interface * Nsx admin: Initialize nsx-lib on demand * Updated from global requirements * NSX|v3 use nsxlib in devstack cleanup * NSX|v AdminUtil ignore irrelevant orphaned networks * NSX|V3 Admin utils expect different notFound error * Remove unuse router-bindings constants * NSX|v: Admin Util remove router binding orphaned entries * NSX|V3: ensure that update port does provider validations * NSXv3: Refactor LBaaS L7 based on API change * Add NSXv3 LBaaS driver config for devstack * NSXv: Implicitly disable port-security for direct vnic-type ports * NSX|V3: enforce provider rules not being set when not port sec * NSX|V: keep availability zones on router migration * Updated from global requirements * Fix port create with mac learning set to false * Tag the alembic migration revisions for Pike * AdminUtils:NSX|V3: Add orphaned routers list & clean * Updated from global requirements * NSX|V: remove unuse dmethod \_get\_sub\_interface\_id * NSX|V fix error message when changing port security * NSX|V3: validate transport zone at provider net creation * NSX|V: ensure locking when removing a network from DHCP edge * NSX|v3: add network/port description to backend objects * NSXv3: Prevent router deletion if it has lb attachment * NSXv3: Refactor LBaaS driver to fix binding issue * Updated from global requirements * Don't add provider security-group when psec is disabled * NSX|v3: do not allow setting router admin state to False * NSX|v3 plugin: Fix typo * NSX|v3: process the port security of the dhcp ports * NSX|v: get internal net by az fix * Remove vmware\_nsx\_tempest * NSXv3: Add new tags for LBaaS resources * NSX|v: Handle address scope change on subnetpool * NSX|V3: ensure that MAC learning does not invoke switch profiles * NSX|v: fix deletion of edges in PENDING\_UPDATE * NSX|V: skip interface actions on old lbaas member create * NSX|V: serialize rule removal * NSX|V: ensure that segmentation ID's are unique * NSXAdmin: Fix nsx-v gw-edge deploy witho no default-gw * NSX|v3: configure additional switching profiles per AZ * NSX|V and NSX|V3: remove deprecated config variables * NSXv3: Remove os-lbaas-lb-id tag for lb service * NSXv3: Add release note for LBaaS * Add in support for direct-physical vnic types * NSXv3: Fix deletion issue when listener has pool * Add Pike release notes * NSX|v+v3: forbid multiple fixed ips in a port * Admin-Utils NSX|v3: Fix constants and typo in cert util * NSX|V: skip metadata proxy subnets for BGP updates * NSX|V: autodraft does not require a NSX reboot * NSX|V3: fix trunk issues * NSX|V: make use of granular API for getting DHCP binding * NSXv3: Delete lb binding after pool deletion * NSX|V: save PUT when restarting neutron * NSX|v: lock shared routers interface actions * NSXv3: Update func to add virtual server to service * AdminUtil|NSX-v: complete plugin init * Admin-Utils|NSX-v: dhcp recreate fix * NSX|v: add device id to dhcp ports * NSX|v3: do not allow provider sec groups if not port-sec * NSXv3: Add admin utils for LBaaS resource * NSX|V: ensures updates on subnet are atomic * NSX|v3: disable port security on dhcp ports * AdminUtil|NSX-v: complete plugin init * use neutron-lib address scope apidef * NSXv3: Handle address scope change on subnetpool * use dns api def from neutron-lib * NSX|V3: ensure that subnet update takes host routes into account * NSXv: LB objects delete failure while pool binding missing * FWaaS: remove deprecated exceptions * Unblock gate unit tests * NSX|V3: don't fail on already deleted network/port * use qos constants from neutron-lib * NSX|V: remove invalid dvs-id validation * use qos DriverBase from neutron-lib * NSXv3: Add Neutron LBaaS Layer7 Support * NSXV: use correct exception for NoResultFound * NSXv: locking DHCP * NSXv DHCP locking refactor * NSX|V: ensure that router updates are atomic * NSXAdmin: Fix default gateway setting when creating BGP gw-edge * Admin util: add not for DHCP and metadata native support * NSX|V3: ensure that DB binding is updated if there is an IP change * Tempest: Fixed a bug # 1914831 * Updated from global requirements * Updated from global requirements * NSXv BGP: Fix bgp-peer esg-id validation * NSX|nsxadmin install update * Updated from global requirements * Removing qos test scenarios due to pyshark removal from test-requirements.txt. Will add these tests back with new tempest design and workaround for pyshark * Remove pyshark from test-requirements * NSXv: Fix method name typo * Discard east-west traffic between different address scopes * NSXv3: Neutron LBaaS nsxv3 support * NSX|v: refactor shared router FW rules creation * NSXV: Fix default ICMPv6 firewall rules * NSX|V: enable plugin to decide on VLAN tag * NSXT instance migration: Improve logging * VMware-NSX:add install doc command * NSX|V3: ensure neutron raises better exceptions * Enable admin or owner to configure snat * Local copy of scenario test base class * NSX|V3: admin utility get ports to skip Qos read * NSX|V: ensure no sec groups if network port security is disabled * NSX|V3: honor host routes * Tempest: Added new design for tempest test cases * NSXv3: Move away from locking in cert provider * NSX-migration: Add logging and handle errors * Tempest: tempest.test.attr() is deprecated. Moving to new function * NSXV: ensure that binding update does not fail if deleted * NSX|V3: Configure TZ, router and profiles using tags * NSX|v: Fix error on service insertion config * NSXT instance migration: support file logging * NSXv: Support ipsec VPNaaS on nsxv driver * NSX migration fix nosnat + keep ips * NSXv: Add a configured delay after enabling ECMP on edge * NSX|V3: support ranges in fw rules ports * NSX|V: support large port ranges in service insertion * NSX|V: support big ranges in fw rules ports * NSX|v+v3: Use elevated context for address scopes checks * NSX|V: treat edge cases with edge deletions * NSX|v: handle old loadbalancers interfaces * NSXv: Backup pool locking * Use flake8-import-order plugin * NSX|V3: devstack cleanup exclude list ports on devstack * [Tempest]: Adding of 'plr' attribute for distributed routers * NSXv3: Add lock around filename in cert provider * NSX|V3: fix devstack when native DHCP is False * Added compatibility to pyroute2>=0.4.15 * NSX|v3 refactor trunk driver to use nsxlib api * AdminUtils NSX|v: recreate router by Id * NSX|V: Support QoS ingress rules * NSX|V3: clean up parent/tag handling * NSX|v: Add FW rules for same scope subnets * AdminUtils NSX|V: router recreate fix type check * [Tempest] spoofguard test fix from OpenStack plugin change * NSX|V: do not fail distributed router deletion * NSX|V3: Do not add SNAT rules if same address scope * NSX|v3: Enforce address scopes for no-NAT routers * AdminUtil NSX|v: Fix dhcp recreate * NSX|v+v3: Fail if adding another project router to FWaaS * NSXv BGP: Fix bgp-peer esg-id validation * NSX|V3: ensure that devstack cleanup delets switches * NSXv: use regular DHCP edges for VDR metadata * NSX|v3: Add firewall tag to the router * NSX|V: Do not add SNAT rules if same address scope * Revert "Remove neutron-fwaas exception usage" * [Tempest] NSXV L2GW cleanup fix * {Tempest]: Changes done while updating port with PSG * Fix a few pep8 errors in db.py * Remove neutron-fwaas exception usage * Create base plugin for common nsx-v/nsx-v3 code * NSX|V: only update host groups if AZ correctly defined * NSXv: LB pool delete failure while binding missing * NSX|V: delete old pending-update/delete edges * NSX|V3: fix trunk initialization * Remove white space * NSXv: Don't allow security-group in no port-security * [AdminUtil] NSX|v3: Update router NAT rules to not bypass the FW * NSX|V3: Support QoS ingress rules * NSX|V3: enable creation of provider VLAN network * NSX|V: make sure host groups updated only once * NSX|v AdminUtil: Fix edges utilities with az * use service type constants from neutron\_lib plugins * NSXv BGP: Fix shared router on gateway clear * NSXv BGP: Fix password value when not specified * NSX|V: prevent a floating IP being configure on a no snat router * NSX|V3 Fix FwaaS rule with no service * Remove new unsupported unittests * NSX|v: disable service insertion if no driver configured * NSX|V3: Warn if backend does not support FWaaS * [NSXV]Fix l2gateway creation failure * NSX|v+v3: Support default qos policy * NSX|v: Fix LBaaS session persistence * NSX|V3: Use QoS precommit callback to validate rules * NSXv3: Add util to check version 2.1.0 * NSX|V: Keep existing members when updating LBaaS pool * NSX|v: refactor FWaaS driver * NSX|V3: FWaaS-v1 support * NSX-V3| fix devstack cleanup * NSXv3: Race condition fix for cert provider * Address OVO breakage * [Tempest]: Adding of scenario cases for FWaaS * [Tempest]: Adding sleep between PSG creation and adding to backend * Fix NSX|V3 unit tests * NSXv AdminUtil: Final touches * Fix devstack doc titles * NSX|V: Fix FWaaS exceptions * NSX|V: Call NSX backend once for dvs validations * NSX|V raise error when FWaaS uses unsupported routers * NSX|V AZ validation message * NSX|V: Validate AZ HA configuration * NSX|V: Fix LBaaS session persistence without cookie name * NSX|V: Validate availability zones dvs-ids * NSX|v: call backend scoping objects only once during init * Tempest: Fixed failed few tempest scenario test cases * NSXv3: More locking for certificate provider * Fix NSX|v3 qos unit tests * NSX|V: use the same DHCP if multiple subnets on same network * NSXV3: harden subnet creation for external networks * NSX|V: honor provider security rules * QOS: fix unit test breakage * NSX-T Migrate: migration script for libvirt * NSX-migration: remove physical-network for flat networks * NSXv BGP: Fix get\_bgp\_peer 'esg\_id' attr visibility * NSX Migration: support keystone v3 & other fixes * LBaaS: Share lb\_const module for nsxv and nsxv3 * use attribute functions/operations from neutron-lib * NSXv BGP: Adding IP address check for ESG BGP peer * NSXv3: Solve race condition in DB cert provider * NSXv: Fix before delete notification for shared rotuer * NSXv Admin util: BGP GW edges deployment and configuration * use core resource attribute constants from neutron-lib * NSX Admin: Fix plugin identification * LBaaS: Share base\_mgr between nsxv and nsxv3 * NSXV3: ensure that devstack does not get invalid config * BGP unittests * NSX|v: Fix LBaaS session persistence * NSXv BGP: Return with error for invalid bgp-peer-remove request * NSXv BGP: Raise an error if user add duplicate networks or peers * NSXv BGP: Add policy rules * Skip DHCP options tests in v3 tempest * NSXv BGP: Use elevated context inside callbacks * NSX|V: Support tftp-server dhcp option * NSX|V: Skip md-proxy routers in fwaas * Tempest: Tempest test NSX security groups failing * Fix nsx-migration script * NSX|V fix crash when enabling subnets dhcp * NSXv3: Fix devstack issue on compute node * NSX|v: keep snat status when changing router type * [Tempest]: Adding of more cases for FWaaS * Switch to SUBNET from SUBNET\_GATEWAY * NSX|V: Fix broken unit tests * Fixes vmware\_nsx\_tempest tempest plugin issues with tempest * Tempest: Port Types network cleanup fix * NSX|V3: ensure that network rollback works correctly * NSX|V: ensure that the subinterface validations are atomic * Update fwaas driver in devstack.rst * Use vmware\_nsx aliases for neutron core plugins * use extra\_dhcp\_opt api-def from neutron-lib * Add firewall\_drivers entry\_point in setup * NSX|V: Fix use case with no FWaaS for a router * use is\_port\_trusted from neutron-lib * NSXv BGP: Use BGP peering password correctly * NSXv BGP: Allow BGP only on networks with address-scope * NSX|V: ensure that FLAT provider network is deleted * NSXv: Enforce address scopes for no-NAT routers * AdminUtils: Fix crash in nsx-v router-recreate * AdminUtils: Fix security-group migrate * AdminUtils: Fix firewall-section list of operations * [Tempest]: Adding of FWaaS api testcases * Policy: enable distributed router to be created by all * NSXV/L2GW: validate that portgroup is used only once * use plugin constants from neutron-lib * NSXv Admin: Print usage if no properties are given for bgp-gw-edge * NSX|V: LOG.exception only when there is an exception * AdminUtils:NSX|V: Add orphaned networks list & clean * Use oslo\_utils to mask password in logs * Remove pbr warnerrors in favor of sphinx check * NSXv BGP: Fix KeyError on not an ESG bgp-peer * NSXv: Adding missing devstack configuration for BGP * NSXv Admin: Fix gw-edge firewall config * NSX|V: Do not add NAT rules in router firewall with FWAAS * NSX|v AdminUtil: Fix bgp-gw-edge create with az * NSX|V3: ensure 'exclude' tag is correctly set * Unbreak gate again * Address gate issues * NSX|V: fix missing spoofguard ID validation * NSXv: Mask passwords when logging debug messages * NSX|v: Add fip to exclusive router after migration * AdminUtils:Fix NSX-v metadata secret * Add vmware\_nsxv entry\_point in setup * NSX|V3: treat missing exclude list entry on delete * Rename api-replay to nsx-migration * Fix policy file breakage * Tempest: 2 scenario tests are fixed * NSXv BGP: Add more unittests * Split and move policy rules to policy.d dir * NSX|V: prevent deadlock with subnet creation and deletion * NSXv: add lbaas statistics support * NSX|V: fix vnic allocation for AZ and metadata * NSX|v: Distributed router type update failure * Tempest|DVS: Add \_list\_ports for dvs scenario test case * NSXAdmin: Block cert commands when feature is off * [Tempest]: Adding of removed method from upstream in vmware\_nsx\_tempest repo * Address network filtering issue * NSXv BGP: Fixing get-advertise-routes * AdminUtil:NSX|V3: change metadata server * AdminUtil:NSX|V3: Fix plugin calls * Basic QoS scenarios: Testing bandwidth-limit, DSCP rule with traffic root@prome-mdt-dhcp412:/opt/stack/tempest# python -m testtools.run vmware\_nsx\_tempest.tests.scenario.test\_qos\_ops Tests running... tempest/clients.py:45: DeprecationWarning: Using the 'client\_parameters' argument is deprecated client\_parameters=self.\_prepare\_configuration()) Warning: Permanently added '172.24.4.9' (RSA) to the list of known hosts * Update changes for \_get\_marker\_obj * NSX-v3| fix delete-router when there is no backend id * NSX-v| Fix FWAAS rules in DB * use neutron-lib port security api-def * Update code to work with oslo.config enforcements * Fix OSC client to work with versions greater than 3.10 * NSX|V3: admin util for migrating exlcude list ports * Fix client breakages * use neutron-lib constants rather than plugin constants * Correct config help information error * NSX|V3: Support specific IP allocations in IPAM * OSC 3.10 integration * [Tempest]: Changes done in allowed address pair scenrio testcases * [tempest]: Changes done for port security scenario testcases * NSX-v+v3| remive unused QoS definition * Tempest: Adding network config param * Tempest: OpenStack Port Types Support API tests * Adding pyshark requirements for QoS scenario testing * Fix gate jobs * [Tempest]: Adding "Prevent NSX admin from deleting openstack entities" testcases Incorporated nsxv3\_client.py for backend operations Made changes to nsxv3\_client.py for specific reqests Test results: root@prome-mdt-dhcp412:/opt/stack/tempest# python -m testtools.run vmware\_nsx\_tempest.tests.nsxv3.scenario.test\_client\_cert\_mgmt\_ops.TestCertificateMgmtOps Tests running... tempest/clients.py:45: DeprecationWarning: Using the 'client\_parameters' argument is deprecated client\_parameters=self.\_prepare\_configuration()) tempest/scenario/manager.py:50: DeprecationWarning: Read-only property 'manager' has moved to 'os\_primary' in version 'Pike' and will be removed in version 'Ocata' cls.flavors\_client = cls.manager.flavors\_client tempest/test.py:376: DeprecationWarning: Read-only property 'os' has moved to 'os\_primary' in version 'Pike' and will be removed in version 'Ocata' if hasattr(cls, "os"): * NSX|v3: Use nsxlib features list * Adjust qos supported rules to Neutron * NSX|v: Support more than 2 hostgroups * NSX|V: configure correct physical\_network * NSX|V: enhance error message for invalid scope\_id * NSX|V3: treat DHCP server max entries * NSX|v fix some host group placement issues * NSX|V3: Fix exclude port issue during delete port * NSX|V fix FWaaS rules order when router is added to FW * NSX|V Fail dist router set gw if edge not found * NSX-V| add IPv6 link-local address to spoofguard * Tempest:Deploy and Validate Neutron resources using HEAT Template on NSXT|V * NSXv: Use BGP protocol to learn default gateways * NSXv3: Default native\_dhcp\_metadata to True * NSXv: Fix validation for bgp peer 'esg\_id' attr and peer removal * AdminUtils: Fix NSX-V dhcp-edge recreate * NSXV3: ensure all OS ports are added to default section * Stop using CommonDbMixin apis * NSX|V3: fix issues with exclude list * NSXV3: ovs bridge was not getting created in restack * NSX|V: ensure that monitor ID is persisted with LB alg update * NSX|V: enable an external network to create backing network * NSX-V3| Integrate with nsxlib refactored code * Validate L2gateway exists in backend * Update following tempest changes * NSX|V: Ensure that 6.2.x can start with transparent vlan config * NSX|V3: fix exclude list initialization * NSXv BGP: Update edge bgp identifier on router GW updates * NSX|V: provide admin utility to update default cluster section * NSX-V3: Fix qos code in plugin * Use neutron-lib callbacks module * NSX-V| Fix FWaaS rules order * Fix broken unit tests * Revert "NSXv: Don't remove default static routes on edge" * NSXv: Don't remove default static routes on edge * NSXv: Adding notification for router GW port update * NSXv BGP driver: Add missing log expansion variable * NSX|V: be able to deal with more than 256 edges * NSXv: Notify on router migration before removing it from edge * NSX-V| Fix FWaaS deployment on distributed router * NSX|V: fix distributed router interface deletion * Integration with new neutron code * [Tempest]: Adding of timer in between backend operations * [tempest]: Adding of missing function in vmware\_nsx\_tempest * Tempest: Removed old and unused folder * NSXv, NSXv3: Enable address-scope extension * Fix transaction issues with network/subnet facade updates * NSXv BGP support * NSXv: Adding more rotuer driver notifications * VMware:vmware-nsx release note update * NSX-V| Fix Fwaas handling ports * NSX-V| Fix exclusive router deletion * Fix api-replay unittest teardown * NSXv: Adding notifications for router service edge events * NSX|V: check edge ID before locking edge * [Tempest]: Reusing Lbaasv2 cases for nsxv3 plugin also * NSX-V FWaaS(V1) support * NSXv3: Force delete router * NSXv3: Hide client auth password * Fix unit tests * Fix NSX-V qos tests * NSX|V: add in support for DHCP options * Use new enginefacade for networks, subnets * [Tempest]: Reusing Lbaasv2 cases for nsxv3 plugin also * NSX|V3: fix bulk subnet breakage * Tempest: Fixed scenarios for SSH authentication failures * Tempest: Fixed SSH authentication failures * [Tempest] Deploy and Validate Neutron resources using HEAT Orchestration Template * AdminUtils NSX-V| fix sections reorder * NSX-V3| fix devstack cleanup * Drop log translations * NSX-V| Adding datacenter to availability zones config * Fix some reST field lists in docstrings * Use neutron-lib provider net api-def * Prevent non-admin user specifying port's provider-security-groups * Remove logging leftovers * Tempest: Fix for test\_mdproxy\_with\_server\_on\_two\_ls test case * Removing irrelevant note in README file * Skip spawn plugin init methods in unittests * NSX-V3| fix unittests mock * Tempest: Fixed TestRouterNoNATOps bugs and enhanced the test cases * NSXv3: Always clean client certificate in devstack * NSX|V: sync firewall with addition of new ports * NSX-V3| Fix AZ when native dhcp is disabled * NSX-V| improve AZ validation * Updated from global requirements * NSX-V3: add transport zones to availability zones * Remove Tap-as-a-service Support * Tempest: Removed skip test from test\_nsx\_port\_security.py * Tempest: Device driver does not allow Change of MAC address when interface is UP * [Tempest]: Adding of removed method from upstream * NSX-V3: Fix QoS delete * NSXv3: Add support for secure metadata-proxy access * Update api-replay for nsx-v->nsx-v3 migration * NSX-V3| network availability zones support * NSX|V: fix \_vcm parameter * Tempest: test.idempotent\_id is deprecated * Tempest: NSXv3 Logical resource get query cursor fix * Skip configuring integration bride on ESXi compute * [Tempest]: Modified QoS API tests * NSX|V: add in exclusive DHCP support * NSX|V: ensure correct parameter is passed * Fix subnet-deletion issue * NSXv3 Admin: Multiple client certificate support * Fix unit test that uses get\_random\_mac * NSXv3: Add certificate expiration alert * NSXv: Subnet create and attachment concurrency * Pass dhcp\_client in renew\_lease() * Fix OSC plugin global declaration * NSXv: Fix tempest test failures due to KeyError 'primaryAddress' * Fix IPAM unittests * [Tempest]: Adding subnet-pool api testcases * [Tempest]: Added scenario cases for port security feature * Tempest: change test. to decorators.idempotent\_id and addCleanup() * Generalize the availability-zones code * NSX|V: fix host group exception * Remove unconstrained for vmware-nsxlib * AdminUtils: Add utility for config validation * [Tempest]: Changes done to add missing methods from upstream * [Tempest]: Added api cases for port security feature * [Tempest]: Add MAC learn API neg test - port security * Use vmware-nsxlib from master * Use neutron-lib's context module * NSX|V: ensure that DVS name is unique * Fix admin-utils unit test * Tempest: admin-policy scenario basic operation test * NSX|V3: skip random failing test * NSX-T: nsxadmin UTs * NSX-T: Rewrite client certificate unit tests * Switch using exec\_command() directly * Updated from global requirements * NSX|V Fix lbaas l7 reject action * Tempest: Moving from test.idempotent\_id to decorators.idempotent\_id * Updated from global requirements * NSX|V3: Use client cert provider in nsxlib config * Updated from global requirements * NSX|V: remove leftover code from md\_proxy * Updated from global requirements * NSX|V: move migration to correct folder * Fix admin utils unit-tests * Fix DB breakages * NSX-V3: Add support for dhcp-opts extensions for ports * NSX|V: delete old pending-create edges * NSX-V: Improve DHCP edge firewall rules * NSXv3: Rename parent\_tag to traffic\_tag * Fix LBAAS L7 policy upgrade * Enhanced unittests for admin utils * AdminUtils NSX|V: Fix rotuer recreate utility * NSX|V: Support changing the position of LBAAS L7 policy * DVS: Add support for 'direct' vnic types * Don't use Tempest internal methods * NSX|V: add support for VLAN trunk with VLAN network * AdminUtil NSX|v Fix constants import * Switch to oslo\_log * AdminUtils NSXv3: Fix SG admin utils and their documantation * [dvs] Enable vlan-transparent extension * NSX|V: Add support for 'direct' vnic types * NSX|V: remove skipped transparent vlan test * Fix typo in unit test * NSXv| Fix path comparison in lbaas L7 rules * NSX|V: serialize rule creation * NSX-MH: Remove failing unit tests * AdminUtil|NSXv: Add az & db status to edges utils * NSX|V remove plugins vdn\_scope member * Refactor DvsManager code * NSX|V: ensure that the DRS is 'should' and not 'must' * NSX-V3| Qos without RPC notifications * Fix ipam table primary key constraint * NSX|V: conly create host groups is ha is enabled * NSX|V: add in a cleanup method for host-groups * NSX|V: validate that entries exist * NSX|V: enhance admin utility * Remove redundant pass in tests * Replace db get\_session with get\_reader/writer\_session * NSX|V: improve host group management * Fix to use correct config options for image\_ssh\_user * NSX|V: only update host groups for edges that are not distributed * Tag the alembic migration revisions for Ocata * NSX|V: add support for host groups for DRS HA * NSX|V: transparent support for virtualwires * NSX: Add devstack.rst to contain all devstack config * NSXv: Add metadata configuration to the availability zones * Edit config variables in README for TaaS nsxv3 driver * [Tempest]: Adding of Allowed address pair scenario cases * Updated from global requirements * NSXv: LBaaS default FW rule should be accept-any * NSXv: Connect LB interfaces to member subnets * [Tempest]: Added Provider Security Group cases for nsxv * Prepare for using standard python tests * Tempest: Scenario tests for Disable spoofgurad with NSXv * [Tempest]: Adding of Provider security Group cases * Remove support for py34 * NSX|V: add more locks and cleanup edge bindings * NSXAdmin: Add parameters to certificate generation * NSX-v| LBAAS L7 support * Use https for \*.openstack.org references * NSXv: Remove router dependency for LBaaS * NSXV3: Client certificate private key encryption * NSXAdmin: add import and nsx-list commands for client cert * NSXv: Add backup pools ranges to each AZ config * NSX|V: ensure that static bindings are consistent * Fix typo in README.rst * Remove redundant import * NSXv: Fix update port update with provider security-groups * NSXv: Add unit tests for md\_proxy * Stop sending notifications for router update/delete * NSXV3: Initial client certificate auth support * Updated from global requirements * [Admin-Util] NSXv: fix plugin issues * NSXv: Fix backend error handling * NSXv: Edge random placement * NSXv: New way to configure availability zones * NSX|V: remove deprectaed vcns section * NSXv: Support update dvs list for VLAN provider networks * Fix cleanup prints * NSXv: Fix dist router call to add fw rules * NSX cleanup script to clean only related resources * NSXv| Use the current DVS when creating a teaming policy * NSX|V: fix broken unit tests * NSXV3: ensure that mac learning enabled has port sec disabled * [Admin-Utils] delete all backup edges * Tempest: Scenario tests for Provider security group with NSXv3 * NSXv: Fix pool logging calls * NSX|V: set bind\_floatingip\_to\_all\_interfaces to False by default * [Admin-Util] NSX-V|Reorder L3 firewall sections * Remove psutil dependency * NSXv: Subnet DHCP enable/disable with VDR * Updated from global requirements * NSX-V| Fix policy SG errors * NSXv: Do not lock RPC filter update * NSX-V| add firewall rules to dhcp edge * NSXv: lock DHCP edge while making changes * Tempest: Changes done in dhcp\_121 for bug#1797152 * NSX|V: only do SNAT traffic per interface for specific IP's * Use neutron-lib portbindings api-def * Fix vmware\_nsx tempest plugin * Updated from global requirements * Use neutron-lib provider net api-def * Ignore specific backend error for invalid identifier * NSX|V: ensure backwards compatibility for vdr\_transit\_network * NSX-V3| Do not allow adding QoS to router ports * NSX|V: ensure that FW rule updates on edge are locked * NSX|V: add in missing lock(s) * NSX-V| update port port-security flag in all cases * NSX-V3| Fix qos switching profile project name * H402 hacking have been deprecated * NSXv: Reduce DB calls while gathering network edges * NSXv: return subnet edges only when requested * NSXv: recover from database conflicts for VDR DHCP * NSX|V: ensure that metata port cleanup is done * Updated from global requirements * NSX-V3| Validate Qos burst size before rule creation * NSX|V3: ensure that port security is set correctly on backend * Remove pagination skipped tests * Updated from global requirements * Fix QoS tests to use project ID * NSXV+NSXV3: Add support for dns-integration extension * NSX-V3| IPAM support for subnet update * NSX-V| prevent rules creation for SG with policies * Add in skip for breaking test * NSXV+NSXV3: add support for pluggable extensions * NSX|V3 IPAM support * Fix router extra attr processing * NSX|V: set teaming standby ports * NSX-V| Prevent port creation with an existing MAC * Updated from global requirements * NSX|V: ensure that a provider portgroup can be attached to a edge * Fix IPAM drivers entry point in setup.cfg * NSX-V| Fix SG creation with nsx policy * NSX-v3| Update router description on backend * Updated from global requirements * Remove references to Python 3.4 * Using sys.exit(main()) instead of main() * NSXV3: invoke get\_connected\_nsxlib only once per invocation * NSXv3: Allow running devstack without installing OVS on it * NSX-V| Validate default policy configuration if use\_nsx\_policies * Updated from global requirements * NSX|V3 refactor plugin profiles init code * NSX|V: learn the default L3 section ID * Updated from global requirements * NSX|V3: prevent a floating IP being configure on a no snat router * NSXV devstackgaterc file * Use CORE from neutron-lib * Fix TODO in vnic index tests * Fix pep-8 warning of long line * QoS Config: add minimum value of 1.0 to qos\_peak\_bw\_multiplier * Replace subscribe with register for rpc callbacks * Add oslo.privsep and pyroute2 to test requirements * Updated from global requirements * Replace "Openstack" with "OpenStack" * NSX|V Fix router resize for older NSX versions * NSX|V remove security group from NSX policy before deletion * Fix baremetal config options in Tempest plugin * Updated from global requirements * NSX|V3 add default gateway to static binding * Fix firewall rule to allow ping on DHCP edge * NSXv: LBaaS enable acceleration for TCP listener * Tempest: Added MDProxy scenario test cases * Tempest: API tests for Provider security group with NSXv3 * NSXv: LBaaS driver should not maintain member FW rule * NSX|V: do not connect DVR to DHCP edge if not DHCP enabled * Add missing space in config help * NSX|V add RPC listeners endpoints * NSX|V: add configuration variable for dns\_search\_domain * NSX-V: Add support for log level in router flavors * Admin-Util: Create a NSX-v DHCP edge for a network * NSX-V3: Handle pagination in devstack cleanup * NSX|v add IPAM driver to setup.cfg * Populate plugin directory to fix port operation in nsxadmin v3 * Admin-Util: Delete NSX-v backup edges by name * NSX|V update router edge when gateway subnet changes * NSX|v+v3 handle provider sgs in create port sg list * NSX|V: improve support of bulk subnets * Modify MH\_tests Old style class definition * Add edge syslog configuration support to router flavors * Tempest: API tests for MAC Learning with NSXv3 * Tempest: admin-policy API test cases * NSX|V3: fix issues with disabling port security * Create NSGroup for port exclusion * Updated from global requirements * Remove NSGroup manager unit tests * Ignore NotFound response when deleting firewall rule * NSXv: Plugin name constants file invalid * NSX|v fix get\_network\_availability\_zones method signature * NSX-Admin: Add ability to configure loglevel on edges * NSX|v fix security-group policy validation * NSXv3: Removing the use of ns-group manager * NSX|V3: delete DHCP port prior to deleting subnet * NSXv3: Use neutron\_port\_dhcp\_profile for DHCP ports * Remove vim header from source files * NSX|v+v3: Allow multiple provider security groups on port * Using assertIs(Not)None() instead of assert(Not)Equal(None) * Add Apache 2.0 license to source file * NSXv: retry call to create\_port base method * NSX|V3: ensure that latest devstack works * NSX-Devstack: Install vmware-nsxlib from git * Use ExtensionDescriptor from neutron-lib * Admin utility: add in ability to update edge reservations * NSXv3-Admin: fix migrate-nsgroups-to-dynamic-criteria * NSX|V: ensure that sub interface is cleaned up when disabling DHCP * NSX-V add nsx-policies extension * Modify use of assertTrue(A in B) * Using assertIsNone() instead of assertEqual(None) * Remove white space between print and () * Use DB field sizes instead of \_MAX\_LEN constants * NSX-Admin: Support syslog configuration for edges * NSX|V: fix broken unit tests * NSXv: Resume router port delete when edge missing * NSX|V3: ensure that the NSX port name is updated correctly * Don't include openstack/common in flake8 exclude list * NSX|V3: only configure mac-learning if necessary * NSXv: Log an error on DB inconsistencies * NSX|V: configure DVS for devstack if needed * Cosmetic change in unit test * Tempest: Providing support for tenant-delete feature * MH plugin tests - remove directory.\_instance * NSXv3: Change default metadata route to 169.254.169.254/31 * Integration with neutron-lib plugin directory * Add None protection for router and vnic binding objects * NSX|V3: ensure bridge is created if octavia is running * Tempest: Add east west scenario test * Make vmware-nsx capable of handling depends-on * By default, add floating IP NAT rules to each vnic on router * [NSXv3]: Turn off psec for L2GW ports * NSX|V: ensure correct teaming for port groups * NSX|V: cache NSX version number * Tempest: Remove skip decorator for security group * NSX|MH: Fix \_update\_fip\_assoc due to upstream change * Tempest: Fix tenant\_id for nsxv scenario test * Removed redundant 'the' * NSXv3: Fix a package import * NSX|V: admin utility - add in missing teaming values * NSX-V3: Fix security-group logging * Updated from global requirements * Nsxv3: Add admin utility to clean orphaned DHCP servers * NSXv: Make VDR transit net configurable * NSX|V3: update DHCP static bindings when DHCP is enabled * NSXv3: Fix an exception typo * Tempest: TaaS Client for Tap Service and Tap Flow * [Admin-util] NSX|V admin util to use a policy in a security group * NSX|V do not update SG logging if SG has a policy * NSX|V: add in locks for DHCP binding updates * Tempest: Remove deprecated tenant\_id * NSXv3: Catch backend failure during delete\_subnet * Add release notes for NSX-V policy support * NSX|V policy: get SG description from the policy * Add security group policy extension to OSC * Add security group extensions to OSC * OSC integration - port extensions * NSX|V: all calls to NSX are sync - no need to check running jobs * NSXv: concurrent subnet creation bugfix * NSXv3: Add error handling for SQL timeout case * NSXv3: Fix DHCP upgrade script * NSX|V: fix typo * NSXAdmin: Ignore response codes when issueing a delete fw section req * Updated from global requirements * Use neutron\_lib converters instead of neutron * NSX|V support security groups rules with policy configuration * NSX|v+v3: Allow multiple provider security groups per tenant * NSX|V - initial support for NSX policy * Use L3 constant from neutron lib * NSX|V3: fix path for exceptions * Updated from global requirements * NSXv3: Native DHCP is not supported for non-overlay networks * Integration with nsxlib * Updated from global requirements * NSX|v QoS fix DSCP rule creation * NSX|V QoS fix shaping direction * Updated from global requirements * Prepare for neutron-lib DB support * Fix osprofiler breakage for admin utils * NSXv3: Fix checking DHCP switching profile in admin utility * NSX|V: fix validations for non-ascii characters * OCS plugin + initial extensions support * NSX|v+v3: QoS BW translations should be rounded * Updated from global requirements * Use Port list type in the NSX configuration * Remove deprecation warnings * Raising proper error in case of router-interface addition * Updated from global requirements * NSXv3: Clean up pending entries when create\_router failed * Tempest: Add router NoNAT scenario tests * NSXv3: Enhance exception handling in create\_subnet\_bulk function * Updated from global requirements * tempest: lbaas l7-switching API tests * Tempest: NSXv3 Native DHCP Negative Test * Use compare\_elements from neutron\_lib * NSX: remove depracted DB warnings * NSX|V3: ensure race is prevented when attaching network to router * Tempest: Add tests to cover Native DHCP * tempest: lbaas l7 switching scenario tests * NSX|V3: ensure that the mac learning profile exists * NSXv3: Fix allowed address pairs switching profile * NSXv3: Create logical router after neutron router is created * Fix update dhcp bindings * NSXv3: Fix mac learning init bug if nsxv3 is 1.0.x * NSX|V3 fix nsxlib raised error with managers * NSX|v3 replace dhcp profile and metadata proxy uuids with names * Stop adding ServiceAvailable group option * NSXv3: Add plugin-specific create\_subnet\_bulk function * Include alembic migrations in module * Updated from global requirements * Enable release notes translation * NSXv3: Fix MAC Learning Profile POST API * NSXv3: Fix typo in cluster reinitialization * NSX|V3: enhance exception handling * devstack: fixed backend cleanup during unstack * NSX|V3 update client with max attempts * NSX|V: validate that a flat network is configured correctly * NSX|V: remove unused parameter * NSX|V: add context to missing configure\_router\_edge * NSX|V: add missing contexts * NSXv3: Fix NSGroupManager initialization test * Replace retrying with tenacity * Updated from global requirements * nsxlib refactor: config + neutron deps * nsxlib refactor - add hierarchy to the nsxlib apis * nsxlib refactor - remove cfg usage * Updated from global requirements * nsxlib refactor continue * Tempest: Change parameters to accommodate to tempest master * Fix broken flow classifier tests * Tempest: router\_size create and update tests * Tempest: Update nsxv3\_client to query more than 1k * Tempest: Add Native DHCP UniScale Tests * Add native DHCP config in nsxv3 sample local.conf * Updated from global requirements * NSX|v AdminUtil list dhcp-bindings on missing edge * NSXv3: Fix attachment setting during create\_port and update\_port * Tempest: Add back addCleanup\_with\_wait * NSX|v: Fix shared router clear gateway * Remove deprecation warnings * Fix broken unit tests * NSXv3: Fix problem when reset lport attachment * TrivialFix: typos in cli.py * TrivialFix: typos in client.py * NSX|V3 support different credentials for the NSX manages * Tag the alembic migration revisions for Newton * Add api-ref in MD format * NSXv: Change metadata port when router is deleted * NSX|v fix router migrate with metadata * Updated from global requirements * NSX|V3: ensure that octavia ports receive DHCP addresses * NSX|V3: Fix update\_subnet issue * NSX|V3: Delete DHCP binding if nova instance is deleted * NSXv: DHCP reconfigure on VDR interface removal * NSX|V3: ensure Mac learning has port security disabled * NSXv: create worker pool on new context * NSXv3: Don't advertise NAT routes in NoNAT case * Fix broken unit tests for python 3 * NSX|V3: Add retry logic for deleting logical router port * Updated from global requirements * Add release note for native DHCP/Metadata support * NSXv: Configure metadata when detaching from VDR * NSX|MH: add in deprecation warning * Fix broken unit tests * [Admin-util]: Add support to update resource pool ID for NSXv edges * NSX|V improve validate\_network performance * Fix test\_migration import * Adding release notes for new feature - provider security-groups * Fix broken unit tests - add project\_id * Tempest: Fixed error with nonexist module * NSXv3: Add support for trunk service driver * Remove deprectaion warnings for db models * Update reno for stable/newton * NSX|V3: check if subnet overlaps with shared address space * Updated from global requirements * Add releasenotes for NSXv3 TaaS driver * NSX|V fix IPAM driver log message format * Add release notes for the Newton features * NSX|V: make DHCP DB binding creation more robust * Fix Admin utils tests - resources registration * NSX|v: do not resize a router if not necessary * NSX|V remove duplicate log message at edge\_utils * NSX|V: ensure that log message does not cause exception * NSX|V fix router\_binding az default value after migration * NSXv: use contexts correctly while using threads * [NSXv3]: get\_floatingips filter must pass a list of ports * NSX\_V3: always set the metadata proxy in nova.conf * NSX|V3: Add relatedErrors in the details of ManagerError exception * NSX|v IPAM support for external & provider networks * NSX|V3: ensure that the NSX credenatials are set for devstack * NSX|V: enable port group to belong to a spoofguard policy * Fix more backup edges at the backend * NSXV3-devstack: added parameter -1 to curl command * Use model\_base from neutron\_lib * NSX|V router flavor support * NSX|V3: Fix connected routes not advertised * Fix provider sg delete by non admin and non admin rule change * Fix failing unit tests from neutron changes * NSX|V: return default AZ if name AZ not found * NSX|V3: be more robust under port name update * NSX|V3: ensure that variables are assigned * Admin util: remove deprecation warning * [Admin-Utils] NSX-V3 upgrade vm ports after migration * [NSX|v3]: L2gateway fails to start when Bridge cluster configured * NSXT: Adding a script to set global firewall config autodraft option * NSXv3: Fix tap-flow-create to use floating IP of destination port * Updated from global requirements * NSX|V: remove invalid parameter from context * NSX|V3: do not disable native DHCP when subnet is not empty * Fix fetching dvportgroup name * NSXv: remove LBaaSv1 code * NSX|V3: Update upgrade scripts for native DHCP/Metadata * NSX|V3: Remove unnecessary debug message * Remove lbaas migrate utility * NSX|V3: Enhance add router interface failure handle * NSXv: recover from bad VDR DHCP bind case * NSX|V delete metadata ports upon deleting the dhcp edge * Tempest suite file for various versions of VIO testing * Pull out dhcp and metadata tests to their own file * NSX|V3: Add codes to handle corner cases in native DHCP support * Fix tempest.conf generation * NSX|v Fix router type update to not update all the attributes * Update tox.ini for upper constraints * Tempest: Add native DHCP methods in NSXv3 client * nsxv3: refactor test\_plugin TestNsxV3Utils * remove some db method access from nsxlib code * NSX|V: do a retry for interface update * NSX|V: do not rename edge if not necessary * NSX|V3: remove backend intercation from DB transaction * Change the lock trace message * NSXv: LBaaSv2 shared pools * api\_reply: migrate routers static routes * api\_reply support for QoS migration * api\_reply: NSX-v support + activate tests * [dvs] Validate network name for portgroup networks * Expose advertise\_static\_routes prop from router api * Updated from global requirements * Remove work-around for failed l3 test * NSX|V: retry on failed virtual wire create * NSX-V service insertion fix callback registry * NSX|V: ensure that metadata works with 'internal' ipam * Fix broken unit tests * nxv3: mass refactor of nsxlib * NSX|V3: validate if 'destination' exists * NSX|V3: only run cleanup for devstack if q-svc is enabled * Make it possible for DvsManager to manage its own DVS * Change native DHCP/MDProxy log messages from info to debug level * Remove unused members arg frm add\_router\_link\_port * Fix failing L3 test * Tempest: Removed bug decorators, renamed obsolete def * Remove deprecation warning - security group db * NSXv3Admin: Fix mismatches for security-groups * Tempest: Add dvs specific test cases * Tempest: Providing support for dhcp-121 feature * Enable DeprecationWarning in test environments * call correct stop method * NSXv - Support provider security-groups * QoS integration - callbacks should support a list of policies * [NSX|V]: Fix add\_router\_interface for shared router driver * NSXv: eliminate task use from edge deletion * Updated from global requirements * skip failing l3 tests for now * Tempest: Network tags clients, CRUD and Filter testing * [dvs] set physical network as dvportgroup moid * NSXv3: Raise the proper exception if nsgroup doesn't exists * Admin util: use correct config options * Use neutron-lib add\_validator for registration * Rename tenant to project changes * NSX|V3: update the nova API with the metadataproxy secret * Updated from global requirements * Tempest: Update nsxv3 scenario suite * Updated from global requirements * NSXv: duplicate code * NSX|V3: ensure that the manager, user and password are set * NSXv: eliminate task use from edge update * NSXv: eliminate task use from NAT update * NSX|V refactor create\_dhcp\_edge\_service to avoid code duplication * NSXv: eliminate task use from update routes * nsxv3: provider security groups * Provider Security groups * NSXv: eliminate task from edge rename operation * NSXv: eliminate task use from edge creation * Update models to use the base model for tenant details * DVS: provide a dhcp\_driver class for plugin * NSX|V3: Delete backend DHCP servers during devstack cleanup * NSX|V3: Make metadata route configurable for native metadata service * NSX|v Metadata proxy handling edge problems * Fix xenial pep8 problems - add translations * NSX|V remove async calls to backend * NSX|V add dhcp-mtu extension to subnet * NSX|V3: Enable service\_metadata\_proxy for native metadata service * [NSXv3]: Add support for L3SPAN * NSXv: use synchronous call for firewall update * Updated from global requirements * NSX|V3: Fix delete\_network issue with native DHCP * NSX|V3: fix issue with OVS manager configuration * NSXv - dispose unused code * NSX|V: don't throw exception when same vnic is configured * Update CIF creation request body * NSXv - eliminate task use from delete\_interface * NSXv - log the call stack while locking * DVS Plugin: Add Support for updating a network * Tempest: Providing support for disable spoofguard feature * NSX|V Distributed router PLR creation with availability zones * NSX|V3: configure ovs manager according to NSX version * NSX|V Use configured appliance size for PLR routers * NSX|V3: configure devstack for native DHCP and metadata support * Override default value of Q\_USE\_PROVIDERNET\_FOR\_PUBLIC * Remove discover from test-requirements * Fix broken unit tests * NSX|V: remove validations for AZ's * Service Insertion remove networking-sfc import * NSX|V3: make use of agent socket * NSX|V+V3: Fix QoS peak bandwidth calculation * Make Unittests pass * NSX|V add edge\_ha per availability zone * NSXv: check bindings validity in route methods * NSX|v Add default availability zone to l2 gateway router creation * [Admin-Util NSX|V] availability zones support * NSXv - metadata status in admin utility * [Admin-Util NSX|V] add more information to backend edges list * [Admin-Util] recreate NSX|v router edge * NSXv- Exit while updating inteface on invalid edge * Updated from global requirements * Updated from global requirements * NSXv - LBaaSv1 to LBaaSv2 migration * NSX|V3: Add user-friendly names for backend DHCP/MDPROXY entities * Fix perodic failing unit tests due to sorting * QoS unit tests - fix deprecation warning * Move the get\_router ops out from subtransaction * Updated from global requirements * NSXv - validate that router binding was found * Raise exception when put CIDR into address pairs * NSX|V Extend Availability Zones to support data stores * NSX|v service insertion handle upgrade * NSX-V Service insertion support * Integrate QoS constants change * Update gate logic * [Admin-Util] list/delete orphaned backend edges in NSX-v * NSX|V unit tests - return edges names in fake vcns get\_edges * [Admin-Util] list missing backend networks for NSXv * [Admin-utils] NSXv recreate DHCP edge * Fix periodic falling test * LBaaSv2 foreign keys * NSX|V: add in edge resource configuration support * NSXv: Make router exception more informative * Remove white space between print and () * NSXv - add timestamps to NSXv driver tables * Update Admin-Util RST file with missing/incomplete apis * Updated from global requirements * Add Python 3.5 venv and classifier * Fix README file for better readability * NSX|V3: minor fixes for native DHCP support * [NSXv3]: Tap-as-a-Service NSXv3 driver * Updated from global requirements * NSXv: do not fail on spoofgaurd policy error * The policy file did not take effect in devstack env * Fixed typo in policy rules * Replace raw\_input with input to make PY3 compatible * python3: make unit tests pass * NSX|V - fix exclude list error handling * NSXv: Fix failure in lbaas edge selection * [Admin-Util] fix plugin object in nsxv dhcp-binding util * Admin utility RST file * Show statistics after running coverage * [Admin-Util] add missing edges to nsxv backup-edges list-mismatches * Admin utility: provide possible teaming values * Remove tenant\_id from parent model * [Admin-Util] Fix bad input handling * Admin utility: define choices and resoucres per plugin type * NSX|V: only update firewall if router binding exists * NSX|V: fix conflicting IP's when more than one subnet defined * NSX|V remove vnic from spoofguard only if port-security is enabled * NSX|V: don't fail port deletion if static binding deletion fails * Fix model migration sync tests * Tempest: Support lbaas api & scenario tests to run against upstream * Add testresources to test-requirements * Add in missing test-requirement * Updated from global requirements * Fail silently when deleting security-group rule * NSX|V: ensure route update is aromic for shared routers * NSX|V: don't fail when router binding does not exist * NSX-V: Re-raise exception if failed to assign security-group to port * NSX|V: fix edge case with admin utility and spoofguard * DVS plugin - fix type * NSX|V: retry for network deletion * NSX|V: don't log eception when edge is not active * NSXv3: Support CH nsgroup membership using dynamic criteria tags * [Admin-Util NSX|V] update the data stores of an existing edge * NSX|V: add in missing lock for updating nat rules on edge * NSX|V: address DB lock wait timeouts in the plugin * Unit test for nsx|v + nsx|t admin utils * [Admin-Util] cleanup to avoid crashing in extreme cases * Tempest: Fix upstream patch 32049 which replacing oo-wrap * Use AFTER\_INIT in lieu of AFTER\_CREATE * NSX|V3 utility to identify CrossHairs version * [dvs] support 'portgroup' provider type * NSX|V tests - add missing fake vcns api & fix existing * NSX|v3 fix MAC learning exception format * Skip QoS update while creating network during init * Ensure that \_ does not override translation function * Rename URL components for native DHCP support * Prep for pbr warnerrors * Updated from global requirements * NSX|V3: import conditional mock * NSX|V3: Add support for native metadata proxy service * NSX-V support updating port-security of a port * NSX|V: prevent exception with router deletion * NSX-v QoS - fix refactor integration to use the correct api * NSX|V: fix broken unit tests * Updated from global requirements * NSX|V3: mac learning support * NSXAdmin: Update metadata shared secret * NSX|v HA: deploy edges on 2 datastores * api\_replay: remove unneeded entry point to file * [Admin-Util] add neutron config init to the admin utils * NSX|V3 fix get\_ports when retrieving only specific fields * Revert "Temporarily disable tempest plugin" * Tempest: fixed upstream remove network\_resources from sceanrio * Tempest: Use client result for Micro-Segmentation * Tempest: Use network client result instead of OO * NSX|V3: Add support for native DHCP service * Make exclusive router size updatable * NSX|v unit tests: fix fake\_vcns get\_section\_id * NSX|V add vm to exclude list when the port has no port security * NSX|V: validate GW information is correct when attaching router * Temporarily disable tempest plugin * Can't set gateway on no subnet network * Fix add same network on different type routers failed * Ensure that ListOpt configuration variables are set * [Admin-Util][NSX-v3]: fix ports mismatch admin util * Remove POT file * Make NSX plugins independent of devstack:lib/neutron-legacy * NSX|V3: start\_periodic\_dhcp\_agent\_status\_check( is deprecated * NSX|V: fix unit test failures * Fix broken unit tests * Tempest: NSX-v external network supports multiple subnets * Tempest: Providing support for dhcp-121 feature * Rename edge appliance before its deletion * Tempest: QoS clients and API CRUD operation tests * NSX|V: use correct lock for dhcp vdr binding * LBaaSv1: Delete LB objects when backend is broken * LBaaSv2: Delete LB even when backend is broken * NSX-V: support qos policy in network get * [Admin-Util][NSX-v3]: list routers which are missing from backend * Fix broken unit tests * Updated from global requirements * Updated from global requirements * [Admin-Util][NSX-v3]: validate ports switch profiles on backend * NSX|V: use correct logging type * Updated from global requirements * NSX-v3: Initial framework for api-replay-mode * Add neutron-api-reply cli tool * NSX|V handle duplicate hostname binding error * Upstream broke Qos unit tests * Update name of backend ports when router name is changed * NSX-V3: support qos policy in port/network get * [Admin-Util][NSX-v3]: list ports which are missing from backend * [Admin-Util][NSX-v3]: list networks which are missing from backend * Updated from global requirements * QoS refactor required changes * Ensure migrate script pass on newer MySQL * Tempest: Change external network to public network * NSX|V3: ensure that a VLAN network cannot be added to a router * NSXAdmin: update member IPs in metadata LB * NSX|V network creation with availability zones hints * NSX|V router create with availability zones hints * NSX|V: only update NSX if neceesary for router update * remove expire\_all in getting backup edges * NSXvAdmin: Fix mismatches for security-groups * Updated from global requirements * Fix selecting same backup edge from pool * Tempest: Change tenant to project * Add sample local.conf for nsxv3 * Tempest: Add micro-segmentation scenario test * Tempest: Format vmware\_nsx\_tempest README * NSX|V rename distributed router plr edge when router is renamed * Add README for the NSX QoS service * Enhance getting valid router bindings * NSX|V fix Tasks logging to not crash on non-ascii characters * NSX|V3: add in a method for getting the NSX version * Updated from global requirements * Fix distributed router rename error * Remove ref to oslo log verbose * NSX|V: be proactive with DHCP binding conflicts * NSX|v remove unused dhcp functions from edge\_utils * Tempest: lbaasv2 scenario http round-robin operation test * NSXv3: Clean up logical port for any nsgroup association error * NSX|V3 QoS DSCP marking support * NSX|V: ensure locking when detacing router interface * [NSX|V|V3]: Refactor NSX-V L2 Gateway driver * Tempest: Add nsxv3 api and scenario test suites * Remove deprecated warnings for neutron\_lib * tempest-api-network updates * NSX|V3: ensure no dangling networks * NSX|V rename backend edge name when router is being renamed * Cleanup script: fix typo * NSX|V3 QoS: handle illegal max bandwidth * fix deleting network error with multiple subnets * Multiple external subnets support * Reorder exclusive router delete process * [NSXv3]: Refactor v3 L2 Gateway driver * Updated from global requirements * NSX|V don't crash unattached router port update IPs * NSX|V fix deadlock while updating router gateway * NSX|V: fix broken unit tests * NSX|V Set router status to error if edge\_update fails * NSX|V3: ensure that non Overlay network cannot be added to a router * NSX|V: validate result before return IP * NSX|v fail adding external subnet/port as a router interface * Backup edge put enhance * Fix tempest breakage * NSX|v routers: remove redundant calls to backed fro static routes * Updated from global requirements * [Trivial] Remove unnecessary executable privilege * Fix creating portgroup provider net with no physical\_network set * NSXv: Use locking when updating a port * Tempest: Fix py3 indexing issue on dict\_keys * NSX|V: save backend calls when creating DHCP binding * Router intf/gw error enhance * NSX|V3 Delete unused QoS plugin * NSX|v QoS DSCP marking support * FIP firewall rule is missing * Switch to using hacking checks from neutron-lib * [Tempest]: dns-search-domain scenario/negative tests * Tempest: Skip security group tests because of bug * NSX|V Use requests module for HTTP/HTTPS * NSX|V3 add QoS support for networks * [Admin-util][NSXv3] Fix help message for secgroups * [Admin-Util]: Add error handling to nsxv update\_dhcp\_edge\_binding * NSX|V3 add qos support for ports * [Admin-Util] Add error handling to nsxv update\_switch admin utility * Set new default password that vdnet is using * Tempest: Added L2GW API tests * Updated from global requirements * ADMIN: fix confusing error message for spoofguard * NSXv: mock spawn\_n when runnign unittests * Fix a typo in nsx\_v3 update\_resource mocked call * Admin utility: ensure that the router is defined * NSX|v3 replace configuration uuids with names * Updated from global requirements * NSXAdmin-v3: Don't delete internal fw sections and groups * NSXv: Remove redundant code to check for duplicate rules * Revert "NSX: remove usage of ovs\_use\_veth" * Change default backup edge size to compact * Clean edge vnic bindings for a clean backup edge * Tempest: Change tenant prefix to project * [Tempest]: initial lbaasv2 clients and API tests * Enhance dhcp service error handle * Enhance update edge error handle * Automatically generate vmware-nsx configuration files * nsx\_v3: Allow security group rule id to be specified * [Admin Utils] Added missing return to get security group * NSX: remove usage of ovs\_use\_veth * NSX|V log warning when getting a router-binding entry with bad status * NSX|V3: fix test imports * Add nsxv3 tempest test suite * NSX\_V3: treat logical port mapping integrity issues * Fix dhcp lock error when update same edge * NSX|v limit access to metadata service to specific protocols * NSX|V prevent adding static routes to shared routers * [Tempest: use project instead of tenant * [L2-gateway]: Fix l2gw plugin due to networking-l2gw changes * [NSX-v3]: Fix L2GW connection-create * Updated from global requirements * Remove vmware-nsx's static example configuration file * NSXv: Enhance edge deploy failure handle * NSX|V add qos support for networks * [Admin-Util]: List networks associated with missing edges * LBaaSv2: Delete fails while LB in ERROR state * [Admin-Util]: Fix tabulate results method * NSX|V3: separate the neutron network id from nsx network id * LBaaSv2: Fail when no router found for subnet * Change async static route call to sync * Keeping the load balancer firewall on edge * [Admin-Util]: Add support to list missing edges for NSXv plugin * NSX|v update edge device when the user changes the port ip address * NSX: do not block init with security group logging configuration * NSX|V3: enable plugin to use existing ID's * fix failing pep8 job * subnet host route support * Add dhcp metadata host-route support * [NSX-v]: Add support for multiple DVS for VLAN network type * Remove attribute not specified before checking duplicate sg rules * [NSX-v]: Validate edges while subnet create * NSXv3: Adding support for 'secgroup-rule-local-ip-prefix' extension * NsxV3: Fine grained logging for security-groups * NSXv: Fine grained control for logging security-group rules * Checking load balancer before removing router interface * NSX|MH: remove tests that break the gate * Add the metadata lb back * Enhance update routes at the backend * Fix network attached to two distributed routers * NSX|V3: remove redundant warning * NSX|V3: Optimize finding metadata port * Optimize get\_networks function in all plugins * Tempest: Adding l2gw test * Remove useless edge cluster uuid * Add option to expose dvs features on nsxv plugin * NSX|V3: pass 'nsx-logical-switch-id' as part of the port details * Tag the alembic migration revisions for Mitaka * clean tier0 ports created from nsxv3 * Add debug message for REST call reply * Tempest: Use routers\_client in router test * Fix security-group bulk rule creation * Tempest: Add external\_network\_cidr in config * NSX|V: upstream broke us * Tempest: Use data\_utils from tempest.lib * Admin util: add in option to set the DVS teaming policy * Fix deploying edge concurrently failure * Tempest: fix broken tempest tests * Translations: ensure that the locale directory is created * NSX|V3: Change default value of metadata\_on\_demand to False * Tempest: Add multi-hypervisor scenario test * Used warning instead of warn * NSXv3: Avoid AttributeError in create\_security\_group exception handling * remove unneeded param from \_create\_port\_at\_the\_backend * NSXv3: Update existing default firewall section on init * NSX|V: increase default retries * Fix cfg\_group not found in dvs plugin * Add missing translation to LBaaSv2 listener * NSXv: Better exception handling when failing to create a secgroup rule * Register extending function for security-group rule extension * urlparse is incompatible for python 3 * NSX\_V3: ensure that DHCP works with multiple subnets * Add extension fields on network and port create * NSX|MH: rename qos extension to qos\_queue * Extending security-group ingress rule * Updated from global requirements * Don't rely on unique names to initialize default backend resources * Tempest: change tempest\_lib to tempest.lib * Insert new security-group FW sections at the bottom and not at the top * Allow use of port 22 for LBaaS VIP * Translate LBaaS TERMINATED\_HTTPS to NSX equivalent * NSX|v3 fail create\_port for external network with device owner compute * NSX|v3 update\_port on backend only if it was created on backend * NSX|V: improve get\_version method * NSX|V3: Add support for vxlan in provider:network\_type * Change length of cert\_id field to 128 * NSX|V3: fix broken unit tests * NSX|V3: use oslo\_serialization instaed of json import * NSX Admin: Add support for NSXv Security groups * NSX: make use of neutron\_lib constants * Fix router intf port deleted when are in use * Updated from global requirements * [Tempest]: fix upstream remove commands module * NSXv: fix broken unit tests * Skip test\_create\_security\_group\_rule\_icmpv6\_legacy\_protocol\_name * NSX|V: fix broken unit tests * NSX: Enable resource tracking in NSX plugins * nsx-v3: Configure interface and route to external network * Tempest: Add placeholder for common tempest tests * Add internal metadata network on demand * nsx\_v3: Move db access in update\_port under same update transaction * Apply routes on VDR's PLR when no-snat is enabled * Updated from global requirements * Revert "Move router db call ahead of backend call" * Move remove\_router\_interface\_info db call ahead of backend call * [Tempest]: NSX-v dhcp service is not reachable * Fix vdr interface deletion sometime failed error * Revert "NSX-T separate the neutron network id from nsx network id" * NSX|V and NSX|V3: add in support for 'subnet\_allocation' extension * NSX|V fix broken unit tests * NSX: make use of neutron\_lib exceptions * Updated from global requirements * [NSX-v]: Use oslo\_serialization for jsonutils * Don't assume backend resource was created if no ManagerError raised * nix.ini: fix typos * Reorganize locking for NSXv * NSX-T separate the neutron network id from nsx network id * Multiple Transport Zones Scenario Tests * Initial release of DNS search domain API test * NSXv: Edge firewall default timeout should be 7200 * Admin util: add support to get network morefs * NSXv3: Add missing config option details to nsx.ini * NSX-v3 reinitialize cluster on fork * NSX-v3 disable psec per port * NSXv: Add DNAT rules for the interfaces of the shared routers * Add force=true option to NSgroups DELETE call * Differentiate between StaleRevision error to other manager errors * NSXv: Place LB only on tenant's router * Fix log exception * Remove deprecated warnings * NSX|V3: Remove Neutron port if failed to add port to security-groups * NSXv - allow changing the router type exclusive <-> shared. APIImpact * NSX|V3: Remove neutron port if failed to create backend port * Admin util: fix spoofguard issues * NSX|V: ensure that gateway network has a subnet * nsx-v3: remove old FIXME comment * NSX v3 devstack cleanup invalid call * Separate NSX backend transactions out of neutron DB transaction * NSX|V3: Fix floating IP status * Resolve NetworkInUse race condition * Additional debug for NSX v3 cluster * NSXv3: Retry to remove NSGroup member for any ManagerError * Updated from global requirements * Do not exclude flake8 checker for devstack directory * Multiple Transport Zone API tests * Updated from global requirements * NSX|V3: Update router name on NSX * Avoid UnboundLocalError: local variable 'lport' on external networks * NSX|V: ensure that DHCP config is updated synchornously * NSXv admin util - cleanup edge vnic bindings * Add nsxv3 delete router test * Consolidate branch setup for dependencies pulled from git * NSX-v3 multi-manager round robin scheduling * NSX-v3 update endpoint state only on timeout * Updated from global requirements * NSX|V: add ability for admin to configure default nameservers * Revert "[NSXv]: Push description to NSXv edge during creation" * NSXv: raise exception for reserved subnets * Unblock the gate * Change imports for IP\_PROTOCOL\_MAP * NSX-v3 sensible HTTP connection defaults * [NSXv]: Fix multiple tz subnet create * Add nsxv3 security group test * Add bandit security linter * NSX-v3 HTTP retries conf property * [NSXv]: Add support for dns search domains in NSXv plugin * NSX-v3 proxy exception handling * Add nsxv3 floating ip temepst test * Add method to get firewall rule in nsxv3 client * [NSXv]: Push description to NSXv edge during creation * Add nsxv3 router tempest test * Better error message when reaching security-group maximum capacity * NSX-v3 http read timeout * Follow the convention * Files contains test lists for regression test execution * Run selected api network tests * Negative tests for Multiple Transport Zones * Add nsxv3 client for vmware\_nsx\_tempest * Update translation setup * Add nsx networks test * Add NSXv3 config for tempest * [AU]: Add command to modify edge appliance size * NSX|V: fix broken unit tests * Instruct tox to ignore import exeption on vmware\_nsx\_tempest.\_i18n * Add retry logic when deleting logical port and logical switch * Updated from global requirements * Remove deprectaed warnings * NSX|V: ensure that DHCP bindings are deleted * Updated from global requirements * Always set advertise\_nat\_route\_flag True for FIP * move devstackgaterc file to devstack/nsx\_v3 * Fix LBaaSv2 logging * Add external DNS driver mixin to VMware plugins * Enable availability zone for network * Fix broken unit tests * Add placeholder for NSXv3 tempest tests * init release of vmware-nsx tempest tests in tempest external plugin * Updated from global requirements * Updated from global requirements * Do not specify vnic index while setting DGW * Fix unit tests failures * Address pair validation for NSX v3 plugin * Define has\_neutron\_plugin\_security\_group using override-defaults * NSX|V3: ensure that port update allows traffic * NSX|V3: fix the router tags for uuid * Fix OS Default FW section apply\_tos * LBaaS Layer4 TCP VIP should use LVS * Rename badly named edge appliance * Truncate edge appliance name when too long * Updated from global requirements * Fix plugin(s) following upstream changes * Use a retry when adding or removing NSGroup members * NSX|V3: ensure that a resource is used when creating a short name * NSX|V3: fix network name update to include UUID * Updated from global requirements * NSX|V3: add tags for the T0 and T1 ports * nsx\_v3: delete security group rule from backend first * NSX|V3: add tag for instance id if possible * NSX|V3: provide better names for ports on the backend * Updated from global requirements * Explicitly call \_ensure\_default\_security\_group before creating a new SG * Adopt incremental add/remove member API for NSGroup * Insert FW rules at the bottom instead at the top * NSX\V3: use 'OS' as the prefix for the nested groups * Delete i18n\_cfg file * LOG.warn -> LOG.warning * NSX|V3: ensure that tag length does not exceed 40 characters * [NSX-v]: Update existing vague exceptions * [NSX-v]: Introduce a more user friendly exception * [NSX-v]: Validate DNS search domain values at API level * Making the number of nested NSGroup configurable * Delete all NS-Groups when call unstack.sh * Locking when initializing the NSGroupManager and the default FW section * NSX|V: dvs\_id is optional and not mandatory * NSX|V3: rename cleanup script for devstack * Ignore NS-Groups that have no "tags" * Admin util: verify that a backup edge is a backup * NSX|V3: remove double import * NSX|V3: Rename logical port with router attachment * NSX|V3: Rename tag in logical port mapped to neutron DHCP port * NSX|V3: Rename tag in logical port with router attachment * NSX|V3: Add tags for DownLink logical router port * NSX|V3: fix short-name notation * NSX|v3: Scaling security-groups by using multiple nested groups * Fix attach specific router port failure * Removing manually edge members placment codes * Updated from global requirements * Update pool erases member config * Fix parameter list for create\_dhcp\_bindings() * [NSXv3] Add tags to qos switching profile * Fix potential infinite loop during add\_router\_interface * [NSXv3] Add os-project-name tag * NSX|V3: add in tag resource * NSX|V3: Rename logical router * NSX|V3: Rename logical router port * Move metadata proxy processing out of DB transaction * Move python\_nsxadmin out of a top-level namespace * nsxv3 multi manager:fix backend cleaup * NSX|V3: ensure that router id is updated prior to tag creation * NSX|V3: provide a unique name for the network on the backend * Set logical switch os\_neutron\_id tag * NSX|V3: ensure that the DHCP switch profile is not too permissive * NSX|V3: fix tags for internal resoucres * [NSXv3] Clarify error regarding missing tier0 UUID * NSXv3: fix edge HA doesn't work * Add metadata proxy support in NSX/T plugin * Fix delete\_port case in handle\_port\_metadata\_access * Fix multiple subnets attached to router * Updated from global requirements * Clarify usage of NSXv3 default UUID settings * NSX|V3: add in missing tests for portbindings * Fix attach second subnet on router failed * NSX|V3: add in missing support for host\_id update * Updated from global requirements * Deprecated tox -downloadcache option removed * Updated from global requirements * Rename os-tid tag to os-project-id * Admin Utility: Add orphaned-edges resource * Admin Utility: Minor fixes for output formatting * Admin Utility: Update DHCP binding for NSXv edge * NSX|V3: improve configuration names * Make sure correct branch of neutron is pulled in * Admin Utility: Add command for delete-backup-edge * Rename neutron-id tag to os-neutron-id * NSX v3 multi-manager * Skip updating logical port when removing router interface * Remove code that is no longer used * Updated from global requirements * Bugfix: Add translation hints * Stop creation of port at the backend in case of failures * Use the correct \_ from vmware\_nsx.\_i18n file * Updated from global requirements * nsx\_v3: remove unneeded call to nsx\_db.get\_nsx\_switch\_and\_port\_id * Fix custom conf files referencing * [Admin Utility]: Add command for list-backup-edges * Install the tools folder on vmware\_nsx install * [Admin utility nsxv3] Fix import error * Updated from global requirements * Add reno for release notes management * Admin Utility: Fix output for missing edges and spoofguard policy * NSX|V#: add in missing log hint * Add tag for os default dfw section * Fix spacing for help string * Admin util should work from any dir * Add enum34 & PrettyTable dependencies[admin util] * Cleanup python-nsxadmin dir * Use prettytable and rename python-nsxadmin - to \_ * [AU]Fix help message for supported ops on resource * Switch to internal \_i18n pattern, as per oslo\_i18n guidelines * NSX|MH: unblock gate * Added cleanup for switching profile in unstack * NSX|V: remove exceptions when running unit tests * NSX|MH: fix broken unit test * NSX|MH: unblock the gate * Add nsx-update to supported ops * [NSXv] Add SSL support for metadata service in NSX-V plugin * Fix stale dhcp bindings left after VM stress test * Fix translation file names * Remove deprecated parameters * Setup for translation * Add execution time to backend transaction logging * Fixes typos * NSX|V: add locking for interface management * NSXv: Change edge\_ha flag on edge updates * Check if l2gw has been used before creating l2gw-connection * [NSXv]: Add support for multiple transport zones * Use config choices for replication\_mode option * Fix network not detached from vdr dhcp edge * Updated from global requirements * [Admin Utility]: Add command to enable NSXv Edge HA * Fix typos with topy * Random cleanups * Updated from global requirements * Use choices for exclusive\_router\_appliance\_size * Adding unittests for security-group implementation * Updated from global requirements * NSXv3: Exclude tempest tests expected to fail * Fix unittests for port-security and security-groups * NSX|V: fix broken using tests * Remove an invalid parameter in logical-router-port REST request * Fix attach logical router port failure * Add NSX\_L2GW\_DRIVER for NSX-v plugin * NSX|V3: treat stale exception on port update * Updated from global requirements * Explicitly remove nsgroup form the nsgroup container on secgroup delete * Revert "Security Group integration scale up" * NSX|V3: ensure that rate limits are disable for the DHCP profile * Support L2 gateway edge HA according to the edge\_ha setting * Enable global advertisement status flag * Log returned data from NSX manager * NSX|V: ensure that spoofguard policy is port update * Fix Edge appliance rename failure * Fix up broken unit tests * NSXv: Check router-type before router-size * Use PortOpt instead of min/max on IntOpt * Security Group integration scale up * NSXv3: Fix typo in URI while setting GW for router * NSXv: Add method to get dhcp bindings count per edge * Fix indentation * Admin utility: List missing DHCP bindings on NSXv * Admin Utility: List spoofguard policy mappings * Admin Utility: Delete orphaned NSXv edges * Fix show 'security-groups' field for port with no security-groups * NSX|T: update nova configuration with ovs\_bridge * Ensure that method's default argument shouldn't be mutable * NSXv: add address pair support * Admin utility: list orphaned NSXv edges * Adding a required field to when requesting to add an ns-group member * Remove session flush in nsx v3 plugin * LBaaSv2 driver * Add support for adding update callbacks * Framework for debugging nsx-openstack setup * Updated from global requirements * Rename "routes" to "results" for consistency * Rename 'rule\_id' to 'id' for consistency * NSX v3 API client error propagation * Adding firewall default rule to allow outgoing dhcp traffic * [NSXv]: Adds method to get all spoofguard policy mappings * [NSXv]: Add get and list methods for spoofguard policy * Cleanup utility for nsxt plugin * Updated from global requirements * Move 'locking\_coordinator\_url' to common configuration section * psec profile distributed locking * Updated from global requirements * Add networking-l2gw to tox\_install.sh * NSX-v3: Fix security-group update missing members attribute * nsx v3 router refactor * Attach psec profile only with IP addr * Updated from global requirements * nsx v3 ut cleanup * NSXv3: Fix rpc code * Fix DHCP agent updates * Fix missing nsx v3 layer3 api changes * [NSXv3]: Fix update\_port call * Fix missing function in nsxlib v3 * NSXv: Adding Subnetpools unittests * Fix DHCP firewall rule * Set external ID for nsxvswitch bridge to "nsx-managed" * nsx v3 port security support * Updated from global requirements * nsx v3 lport updates * Explicitly add 'members' field when creating ns-group * Updated from global requirements * Fix dhcp\_router\_id DB integration error * refactor NSX v3 UTs * [NSXv3]: Enable tempest tests for security-groups * NSXv3: static route support * Fix loadbalancer driver call NeutronManager init on every call * [NSXv]: Add conf param for exclusive router edge size * Revert "Fix subnet use vdr dhcp edge for dhcp service" * Enable HA on the edge which works as L2 gateway * Reorganize vmware\_nsx/tests structure * NSX|V: enable update for subnet from horizon * NSX|V: set the edge\_ha default state to be False * Add router-size when creating an exclusive router * Update coverage file to exclude cisco files * nsx v3 lport refactor * nsx v3 port security plumbing * Change ignore-errors to ignore\_errors * NSXv3: Completing security-group implementation * dfw\_api.py: fix nsxclient typo * Updated from global requirements * NSXv: enforce backend limitations with IPv6 * NSXv: set the 'aggregatePublishing' on the manager * Divide vmware\_nsx/nsxlib/ into mh and v3 subdirectories * [NSXv]: Fix router attribute validation check * NSXv driver for Layer 2 gateway * Updated from global requirements * NSX\_V3: do not configure ovs manager * Divide vmware\_nsx/plugins into plugin-specific subdirectories * Ensure that DHCP agent is configured with correct bridge name * L2 gateway migration skip * Metadata LB configuration should be synchronous * Updated NSXv plugin parameter descriptions * NSXv3: FIP support * Divide vmware\_nsx/services into plugin-specific subdirectories * Add sample localrc for nsx\_v3 * NSXv: ensure that locking is done with flag 'external=True' * Move vmware\_nsx/neutron/plugins/vmware to vmware\_nsx * Move vmware\_nsx/neutron/tests to vmware\_nsx/tests * Move vmware\_nsx/neutron/services to vmware\_nsx/services * Move vmware\_nsx/neutron/db to vmware\_nsx/db * Replace missing tag with "" instead of None * NSXv3: router intf support * NSXv3: Router GW support * NSXv3: Add test coverage for build\_v3\_tags\_payload * NSXv3: Add neutron-id tag to backend * Update references for vmware plugin config (etc part) * NsxV3: Router preparation for GW/intf/FIP support * Remove version attribute from setup.cfg * Add NSGroup container and default firewall section for security-groups * Security Groups implementation * Move vmware-etc to top directory * NsxV3: external network support * NSXv3: Update backend during network update * Fix README.rst * Bump version to 7.0.0 * Fix duplicate dhcp edge name error * Fix dhcp service edge select/delete conflict * Use synchronous call when updating VDR interfaces * Fix dirty DB entries left on deleting vdr with GW * Fix subnet use vdr dhcp edge for dhcp service * Fix logging message on VIP update * NSXv: ensure that member update are atomic across API workers * Handle VDR connected to subnet while not the DGW * NsxV3: Add test-list for tempest * Updated from global requirements * Fix a typo in comments in nsx.ini * Fix NSX-v test and update logic for v6 subnets * Updated from global requirements * Add in pep8 checks for the tools directory * Fix a typo in comments true -> True * Stop doing any magic cloning of neutron during CI * Add support for dhcp extra opt to nsx v3 plugin * Add Model-migrations-sync test * Fix dhcp bindings missing problems * Fix some routes disappeared on TLR * Nsxv: Fix db out of sync with backend * Fix comment concerning metadata agentless mode * Fix exception handling in update port call * NSX|V3: create dhcp profile at boot time * NSXv: validate that router is not None * Fix fip problems for provider router * LBaaS: allow configuration of SOURCE\_IP LB method * Correct the version checking for rp\_filters * Remove router\_type for distributed router * Updated from global requirements * Add support to NSXv3 driver to verify certs * Add unit tests to test dhcp switching profile * NSXv3: Introduce config param to add dhcp switching profile * NSXv3: Fix router interface deletion * Tag the alembic migration revisions for Liberty * Skip test\_create\_router\_gateway\_fails test * Add conf parameter to local.conf to set default l2gw * Fix the launchpad url correct * Update L3 codes due to API changes * Missed l2gw port check in driver * Install vmware-nsx during 'stack install' phase * Move nsx\_l2gw\_driver to DEFAULT section in nsx.ini * NSXv3: Add more unit tests for layer 2 gateway support * Delete security group bindings on port delete * Fix the L2gw migration script * LBaaS: up state update for members in ERROR state * NSXv3: Add backend driver for Layer 2 gateway * Updated from global requirements * Change the first letter of Log sentence to uppercase * Handle concurrency with LBaaS FW section update * NSXv: fix LBaas logging issue * rp\_filter status get not supported in NSXv 6.1.x * NSX-MH: Fix test\_update\_subnet\_gateway\_for\_external\_net UT * NSX: Move DB models as part of core vendor decomposition * Adding Neutron API extensions * NSX: Register migrations at install time * Refactor neutron\_plugin\_configure\_service() * Updated from global requirements * [NSXv3]: Add sample conf variables to nsx.ini * Deploy NSX Edges in HA mode * Cache lb\_plugin in the loadbalancer driver * Use min and max on IntOpt option types * Ensure NSXv driver can verify certificates * DVS: Verify certificate on vCenter connections * Fix test module import due to a rename in neutron * Define VIF\_TYPE\_DVS in vmware-nsx repo * Fix some typos in docstring and error messages * Replace references to VC with vCenter * Updated from global requirements * Fix logging message while updating LBaaS pool * Make edge cluster specification optional * NSXv: do not fail on spoofgaurd policy error * Updated from global requirements * Adds CRD operations for NSX Qos Service Plugin * Fix failing unit test due to exception change * NSXv: ensure per process task manager * Remove duplicate NSXv3 option * Add parent/tag integration for docker stuff * NSX-mh: Failover controller connections on socket failures * NSXv3: Support network creation options * Fix nsxlib.v3.client.delete\_resource * Updated from global requirements * NSXv: prevent host route creation * Fix interlock between dhcp conf and dhcp binding * L2 gateway service plugin support NSX-V backends * Initialize alembic branches for vmware-nsx repo * NSX-mh: Remove \_get\_fip\_assoc\_data * NSX-mh: use router\_distributed flag * Mishandled concurrency with metadata on DHCP * Support Nova metadata address change * Fix concurrently update/CD dhcp on same edge * Verify dhcp/net binding exists before using * Mitigate DB inconsistency on dhcp edge * Randomly select available dhcp edges * Logging jobs number per edge * Open firewall for static routes relative flows * Fix Default edge firewall rule * NSX-v3: Add support for port update * Use ICMP to health-check metadata proxies * Add HTTP method, URL path parameters to monitor * Updated from global requirements * LBaaS: Add member status to member statistics * Mark LBaaS pool as ERRORed when no Edge is found * Fix using the None object * Prevent deletion of router port with LBaaS config * Move vmware plugin from neutron to vmware\_nsx repo (etc part) * NSXv: fix broken unit tests * Drop ipv6 addresses before sending to nsx-t backend * Add support for L2 gateway service plugin * Add metadata\_initializer configuration parameter * Adds support to create qos-switching-profile * Log response body on bad requests from nsx * NSX: Rename default\_interface\_name option * Updated from global requirements * Fix plugin url in readme * Fix fetching LBaas pool stats failure * Fix DVR for NSX-mh * Fix LBaaSv1 pool member deadlock * NsxV3: Adding Neutron security-group processing and testing * Verify that VDR DHCP binding exists before using * Remove warning on router admin state not supported * Remove check for overlap between the fixed ips and addresspairs * Nsx manager client: return meaningful errors * Initialize a per-process tooz coordinator * Prevent failures on router delete * NSX-mh: perfrom appropriate pre-delete checks * Set default rp\_filter for NSXv 6.2 * NSXv: prevent toggling of ditributed router * Updated from global requirements * Add nsx-manager config option to nsx.ini * Add support for dhcp extra opt * Refactoring out common client api methods * Revert "Fix routes configuration failed at backend" * Updated from global requirements * Don't fail when adding an existing nsx-security-group member * DHCP Edge ping bugfix * stop using namespace packages * NSXv: ensure that update\_interface is synchronous * NSXv: only configure DHCP on backend if subnet is configured * Fix cleanup upon metadata failure * Bugfix: start coordinator on locking init * NSXv: fix bad log message * Fix routes configuration failed at backend * NSXv: fix debug log message * NSXv: fix broken log * NSXv: support for multi api\_workers * Add locking to LBaaSv1 driver, exclusive router * Anothher step towards Python 3 compatibility * Fix broken unit tests * Updated from global requirements * Adding debug logging when vnic fixed-ips are assigned or updated * Updated from global requirements * Fix LBaaSv1 exceptions * MH: Limit enabled versions for agentless mode * NSX-mh: allow names with underscore for l2gw transport * Updated from global requirements * NSXv: enable support for a portgroup provider network * Metadata VDR bugfix * DHCP Edge ping bug * Fix failing unit tests * Put user/password in nsx\_v3 during devstack setup * Adding retry when requesting NSXv to add security-group member * Remove skipped tests due to missing mocks * Updated from global requirements * Disable autoconfiguration of rules on Edge * create\_port in plugin is sometimes called within a transaction * NSX3: reuse common code * Fix update subnet from disable-dhcp to enable-dhcp * NSXv: ensure that DHCP bindings are done synchronously * NSX: \_delete\_port was changed to ipam.delete\_port * Fix unit tests for master * Fix DBDuplicateEntry in unit tests * nsxv3: implement address\_bindings * Split out nsx\_user/pass to nsxv3 section * Make use of oslo\_service for loopingcall * Update FW cluster section when adding additional clusters * Support update subnet dhcp status * Bugfix for metadata support with overlapping IPs * Python 3: dict\_keys object does not support indexing * Updated from global requirements * Fix class test name to be V2 not V3 * Updated from global requirements * Fix FIP datapath is broken by disabling enable\_snat attribute * Fix delete API response code * Add unit tests for nsxlib-backend methods * Add neutron\_version tag to NSX entities * NSXv: fix broken unit tests * NSXv: add in extra configuration validations * Recreate metadata route after VDR gateway is set * Add delay to sync thread startup * Add Openstack tenant-id as os-tid to NSX * Fix failures caused by upstream changes * Return 400 status code if l3\_ext network type is misused * Fix typo * Fix test\_router\_add\_interface\_port\_without\_ips testcase * Updated from global requirements * Do not convert RXTX factor to int * Fix routes disappeared by shared routers with different tenants * Updated from global requirements * Updated from global requirements * Sort our neutron requirements * Fixing default security-group ingress rule * Fix delete provider network with dvs backend * Fix update subnet gateway when creating the edge binding * Support reconfiguration of metadata service * Enable neutron unit tests for network and port * Update to the latest requirements * Distributed locking support * Change stackforge to openstack * Update .gitreview file for project rename * Pass admin\_status from CLI to NSX for port-create * Add devstack support for nsx-v3-plugin * Set display\_name for port on NSX if specified in Neutron * Skip failing unit tests * NSXv: fix bad log message * Handling IndexError in case there are no vdnscope * NSX-mh: fix default l2 gw connections * Removing port dhcp binding when detaching interface * Fixing port index update with setting index to None * move logical switch deletion after edge delete * Prevent RXTX factor from being updated on network queue * Fix a typo * Python3: replace dict.iteritems with six.iteritems * Change spoofguard publish on vnic level * Support for router admin-state-up * NSXv: fix broken unit tests * NSXv: add in support for dhcp\_lease\_time * NSXv: Update DHCP binding configurations * NSXv: fix log message * NSXv: fix hacking pep8 issues * Allow ping reply from DHCP Edge appliances * Metadata for VDR networks * Fix redundant retry-on-exception when removing vnic from spoofguard * Make Edge appliance credentials configurable * Create backend security-group and cluster dfw section if needed * Removing use of contextlib.nested * Expect a IPv6 unit test failure * Fix a typo in nsxv\_cleanup * Fix uplink direction and don't specify index * NSXv: add in support for update\_subnet * Enable static routes on shared router * Tell NSX to delete all ports when delete lswitch * Fix hard coded credentials * Delete port from NSX backend first * Fix nosnat environment blocked by edge firewall problem * Remove sorted func in nsxv plugin * NSX: fix broken tests for ICMPv6 * DVS: ensure that horizon works with DVS plugin * Add DVS devstack support * DVS: fix issues with spinning up VMs * NSX-MH: do update port on backend outside of db transaction * NSXv: Add fake tenant id to metadata network * Address exception when creating metadata service * Fix shared router is bound on a deleting edge * Fix identity key conflict error in sqlalchemy * VMware: fix broken tests * Backend handling on security group update * More fixes and cleanups * Fix port delete * Implement router delete and fix router create * Add port-binding extension and some small bug fixes * Add basic integration with nsx\_v3 * nsx\_v3\_plugin: Initial plugin framework * Fix unit tests import paths * Enable subnet-allocation for the MH plugin * Refactoring of security-groups operation * Fix broken unit tests * Recover unit tests from upstream changes * Devstack plugin: flush addresses on public bridge * LBaaS plugin bugfix * Fixing unittests * Fix after 'breakage' * Statistics support for Neutron LBaaS plugin * VMWare Edge appliance loadbalancer driver * Fix "Lock wait timeout" DB error on router binding * Adopt oslo incubator * Fix tests for routers * Add sync fake deploy edge driver call * DVS: ensure that provider networks work correctly * Use uuidutils from oslo\_utils * Add method to retrieve vdn\_scope\_id * Fix breakages due to upstream changes * Fix DVR for NSX-mh * Fix unit tests * Address group for metadata port should use admin context * Change the admin tenant for metadata handler * DVS: add security group extension * Finish switch to oslo\_log * Fixing spoofguard policy deletion * NSXv: do not create subnet under DB transaction * Add async param to deploy\_edge in fake\_vcns * Enable hacking to be run * Fix overlapping between gw and intf address * Adding unittesting for edge\_utils * Add simple\_dvs\_plugin * Replace "select for update" on nsxv\_router\_bindings * Use oslo\_log for logging * Fix the async between network and edge * Fix multiple workers occupy the last available vnic of shared edge * Fix binding shared router on unavailable edge * Add check-nsx-config to console scripts * Using python retrying module * Edge pool concurrency * Fix deploy redundant backup edges in multiple servers * Default gateway setting for metadata proxy * All the new subnet creation failed after the subnet count crossed 160 * NSXv: ensure that selection of edges is 'atomic' * Address race conditions with edge allocations: * Edge locks * Remove imports from neutron.plugins.vmware * Delete disassociated floating ips on external network deletion * Fix import path * Change default routing edge's size from Large to compact * Do not need to update dhcp edge when dhcp is disabled * Fix tenant can't associate fip to its vm by a public router * Don't wait on deleting interface when migrating routing service * Fix many interfaces adding on router problem * static routes support for VDR * Fix router create failed at the backend * GW&FIP&interface support for shared router * NSX-mh: Synchronize dhcp port creation * Do not attempt ext gw port reset for deleted routers * Use DEVICE\_OWNER\_DVR\_INTERFACE in NSX-mh plugin * Exclusive Router Support * Adding devstack hooks for 'unstack' run phase * Implement devstack external plugin * Introducing security group container * fix shared-router/metadata patch merge codes bug * Complete DHCP locking * Lock for security group vnic update * Locking support * Fix slow unit tests * Do not pass muatble object as parameter * Avoid desyncronization between neutron db and dhcp edge * Ensure that NSXv manager support concurrency * Avoid subInterface not found exception * Fix adding router interface job failed * Adding a retry mechanism when approving and publishing ip address * Metadata service concurrent init bugfix * Fix metadata and dhcp using same vnic problems * Add a configuration option to enable the use of spoofguard * Update dhcp edge metadata and vnic first and delete network * metadata\_shared\_secret should support encrypt * VDR Create failed due to metadata issue * Metadata address range change to 169.254.128.0/17 * Handle concurrent execution of metadata init * Adding Port Security support * Metadata init time improvements * Rename subnet metadata\_providers extension * Remove faulty debug logs * Fix wrong method call in nsx\_v.py * Metadata config validation * Metadata shared secret support * Metadata bugfix * Metadata providers bugfix: * Fix update gw\_info with 'enat\_snat' failed * NSXv: ensure that 'agent' is registered for extensions * Fix pep8 and py27 jobs * Remove network gateway mixin from nsx-v plugin * Use pretty\_tox for running unit tests * Add db mixins for NSX extensions * Use neutron extensions * vmware unit tests: tenant\_id must be in neutron ctx * Update .gitignore file * adapt UT to have a patch merge in Neutron * Migrate to oslo.concurrency * Rename qexception->nexception * Fix retry logic for UnAuthorizedRequest in race-condition * NSX: synchronize floating IP operations * NSX: Remove logic for creating chained logical switches * Removing neutron configuration file * Rename NsxvSectionMapping class * NSX plugin security group rules summarization * Fix router firewall interface tests * VMware-NSX: update documentation to reference VMware-NSX * VMware-NSX: clean up requirements file * VMware-NSX: skip tests that have ordering problems * VMware: initial NSXv developments * VMware: fix security group check on port create * VMware: fix gitreview 0.0.1 ----- * Create vmware-nsx with history * Updated from global requirements * Add OVS status and fix OVS crash * Cleanup req\_format in test\_api\_v2\_resource * Imported Translations from Transifex * Cisco: unsupported format character in log format * Correct arguments to logging function * Remove locking from network and subnet delete op * Removed unused iso8601 dependency * Add functional test for l3-agent metadata proxy * Remove mlnx plugin * Set timeout for functional job * Enable test\_migration * tests: initialize admin context after super().setUp call * Fixed test test\_update\_port\_security\_off\_address\_pairs * openvswitch/ofagent: Remove OVS.enable\_tunneling option * Imported Translations from Transifex * Remove unused dependencies * Generate testr\_results.html for neutron functional job * L3 Agent restructure - observer hierarchy * Replace non-ovs\_lib calls of run\_vsctl with libary functions * Don't restore stopped mock that is initialized in setUp() * Separate wait\_until to standalone function * Imported Translations from Transifex * Mock up time.sleep to avoid unnecessary wait in test\_ovs\_tunnel * Catch duplicate errors scheduling SNAT service * Fix for KeyError: 'gw\_port\_host' on l3\_agent * Migrate to oslo.context * Have L3 agent catch the correct exception * Not nova but neutron * Remove broad exception catch from periodic\_sync\_routers\_task * Fix race condition in ProcessMonitor * Updated from global requirements * Refactor process\_router method in L3 agent * Switch to using subunit-trace from tempest-lib * Move classes out of l3\_agent.py * Prettify tox output for functional tests * Services split, pass 2 * Fix IPv6 RA security group rule for DVR * Imported Translations from Transifex * ofa\_test\_base: Fix NoSuchOptError in UT * Add lbaasv2 extension to Neutron for REST refactor * Remove TODO for H404 * Update rpc\_api docs with example version update * Auto allocate gateway\_ip even for SLAAC subnets * Updated from global requirements * Split services code out of Neutron, pass 1 * Use comments rather than no-op string statements * Enforce log hints * Disallow log hints in LOG.debug * Reduce code duplication in test\_linux\_dhcp * Print version info at start * Enforce log hints in ofagent and oneconvergence * Make sudo check in ip\_lib.IpNetnsCommand.execute optional * Move set\_override('root\_helper', ...) to base functional class * Imported Translations from Transifex * Update i18n translation for NEC plugin log msg's * return the dict of port when no sec-group involved * Imported Translations from Transifex * Update i18n translation for IBM plugin log msg's * Workflow documentation is now in infra-manual * tox.ini: Prevent casual addition of bash dependency * Updated from global requirements * Remove RpcCallback class * Convert several uses of RpcCallback * Fix up an old RpcProxy assumption * Remove RpcProxy class * Cleanup recent generalization in post mortem debugger * radvd: pass -m syslog to avoid thread lock for radvd 2.0+ * Get rid of py26 references: OrderedDict, httplib, xml testing * Imported Translations from Transifex * Fix enable\_metadata\_network flag * Fix program name in --version output * Enforce log hints in opencontrail * Update i18n translation for Metaplugin plugin * Update i18n translation for Brocade plugin log msg's * Update i18n translation for Nuage plugin * Update i18n translation for Embrane plugin * Enforce log hints in neutron.plugins.plumgrid * Remove ovs-vsctl call from OVSInterfaceDriver * Update i18n translation for Midonet plugin * Enforce log hints in neutron.plugins.sriovnicagent * Enforce log hints in neutron.plugins.hyperv * Imported Translations from Transifex * Drop RpcProxy usage from DhcpAgentNotifyAPI * Updated the README.rst * Fix base test class for functional api testing * Use oslo function for parsing bool from env var * Don't block on rpc calls in unit tests * Refactor test\_migration * Strip square brackets from IPv6 addresses * Update i18n translation for BigSwitch plugin log msg's * Imported Translations from Transifex * pretty\_tox.sh: Portablity improvement * iptables\_manager: Fix get\_binary\_name for eventlet * test\_dhcp\_agent: Fix no-op tests * Drop old code from SecurityGroupAgentRpcApiMixin * Drop RpcProxy usage from ml2 AgentNotifierApi * Update i18n translation for Mellanox plugin and agent log msg's * Drop RpcProxy usage from L3AgentNotifyAPI * Simplify L3 HA unit test structure * Update i18n translation for VMware NSX plugin log msg's * Alter execute\_alembic\_command() to not assume all commands * hacking: Check if correct log markers are used * Fix hostname validation for nameservers * Removed python2.6 rootwrap filters * Imported Translations from Transifex * MeteringPluginRpc: Fix crash in periodic\_task * Enable undefined-loop-variable pylint check * Remove unused variables from get\_devices\_details\_list * Change description of default security group * Fix incorrect exception order in \_execute\_request * Migrate to oslo.i18n * Migrate to oslo.middleware * Remove unused xml constants * Drop RpcProxy usage from MeteringAgentNotifyAPI * Drop RpcProxy usage from l2population code * Drop RpcProxy usage from cisco apic ml2 plugin * Drop RpcProxy usage from oneconvergence plugin * Synced processutils and periodic\_task modules * Migrate to oslo.utils * Fix floating-ips in error state in dvr mode * Reject trailing whitespaces in IP address * Imported Translations from Transifex * CSCO:Tenants not to access unshared n/w profiles * Drop sudo requirement from a unit test * Remove Python 2.6 classifier * Update i18n translation for Cisco plugins and cfg agent log msg's * Remove ryu plugin * Imported Translations from Transifex * Drop RpcProxy usage from nec plugin * Drop RpcProxy usage from mlnx plugin * Drop RpcProxy usage from ibm plugin * Drop RpcProxy usage from hyperv plugin * Drop RpcProxy usage from cisco.l3 * Drop RpcProxy usage from cisco.cfg\_agent * Drop RpcProxy usage from brocade plugin * Update rally-jobs files * Test HA router failover * Imported Translations from Transifex * Update i18n translation for linuxbridge log msg's * Update i18n translation for openvswitch log msg's * Update i18n translation for ML2 plugin log msg's * Updated from global requirements * Imported Translations from Transifex * Enforce log hints in neutron.services * Enforce log hints in neutron.services.metering * Fix metadata proxy start problem for v6-v4 network * Fix AttributeError in RPC code for DVR * Drop RpcProxy usage from bigswitch plugin * Drop RpcProxy usage from VPNaaS code * Drop RpcProxy usage from metering\_agent * Fix context.elevated * Tighten up try/except block around rpc call * Implement migration of legacy routers to distributed * run\_tests.sh OS X script fixes * Eliminate unnecessary indirection in L3 agent * Show progress output while running unit tests * Drop RpcProxy usage from LBaaS code * Enforce log hints in neutron.services.loadbalancer * Enforce log hints in neutron.services.firewall * Enforce log hints in neutron.services.l3\_router * enable H401 hacking check * enable H237 check * Updated from global requirements * Check for default sec-group made case insensitive * Update i18n translation for neutron.server/scheduler log msg's * Update i18n translation for neutron.notifiers log msg's * Update i18n translation for neutron.common/debug log msg's * Imported Translations from Transifex * ofagent: Remove obsolete bridge\_mappings (plugin side) * Delete FIP namespace when last VM is deleted * Fix a race condition adding a security group rule * Drop RpcProxy usage from FWaaS code * Drop RpcProxy usage from neutron.agent.rpc.PluginApi * Fix a copy/pasted test mistake * Drop test code copied from nova * Drop several uses of RpcCallback * Add some basic rpc api docs * Drop RpcCallback usage from DhcpRpcCallback * Drop RpcProxy usage from PluginReportStateAPI * Fix hostname regex pattern * Catch NoResultFound in \_get\_policy\_profile\_by\_name * Validate loadbalancing method when updating a pool * Update i18n translation for neutron.api log msg's * Catch DBReferenceError exception during binding a router * Enable default SNAT from networks connected to a router indirectly * Imported Translations from Transifex * BSN: Optimistic locking strategy for consistency * BSN: include missing data in floating IP call * ofagent: Remove obsolete bridge\_mappings (agent side) * NSX: Validate gateway device list against DB * Drop RpcProxy usage from MetadataPluginApi * Drop usage of RpcProxy from L3PluginApi * Prevent an iteration through ports on IPv6 slaac * Use a string multiplier instead of 59 repetitions * Convert all incoming protocol numbers to string * Updated from global requirements * Correct raw table regex in test\_security\_groups\_rpc * BSN: Add network to ext\_gw\_info sent to backend * BSN: Set inconsistency record on delete failure * Fix PYTHONHASHSEED bugs in test\_security\_groups\_rpc * Subnet delete for IPv6 SLAAC should not require prior port disassoc * Fix client side versions in dhcp rpc API * Drop usage of RpcProxy from DhcpPluginApi * linuxbridge-agent: make vxlan unicast check more efficent * Moved out common testcases from test\_type\_vxlan.py * Update i18n translation for neutron.extension log msg's * Update i18n translation for neutron.db log msg's * Update i18n translation for neutron.cmd log msg's * Update i18n translation for neutron.agents log msg's * enable F812 check for flake8 * enable F811 check for flake8 * Decrease policy logging verbosity * Support pudb as a different post mortem debugger * Cleanup and refactor methods in unit/test\_security\_groups\_rpc * switch to oslo.serialization * Add rootwrap filters for ofagent * Updated policy module from oslo-incubator * Resolving some spelling mistakes * Fix for FIPs duplicated across hosts for DVR * Drop neutron.common.rpc.MessagingTimeout * Remove neutron.common.rpc.RemoteError * Remove neutron.common.rpc.RPCException * Remove useless return * Cisco VPNaaS and L3 router plugin integration * Fix missing allowed command in openvswitch xenapi agent * fix event\_send for re-assign floating ip * Remove openvswitch core plugin entry point * rootwrap config files reference deleted quantum binaries * Fix L3 HA network creation to allow user to create router * Update default value for agent\_required attribute * SRIOV: Fix Wrong Product ID for Intel NIC example * Imported Translations from Transifex * Updated from global requirements * Purge use of "PRED and A or B" poor-mans-ternary * Include call to delete\_subnet from delete\_network at DB level * Use correct base class for unit tests for ML2 drivers * Replace "nova" entries in iptables\_manager with "neutron" * Drop and recreate FK if adding new PK to routerl3bindings * Imported Translations from Transifex * Remove duplicate ensure\_remove\_chain method in iptables\_manager * ML2: fix file permissions * Fix sneaky copypaste typo in ovs agent scheduler test * Make L2 DVR Agent start successfully without an active neutron server * Detect if iproute2 support SR-IOV commands * Use stop() method on MessageHandlingServer * Rename constant to a more appropriate name * Big Switch: Fix SSL version on get\_server\_cert * Check for concurrent port binding deletion before binding the port * Imported Translations from Transifex * Batch ports from security groups RPC handler * Fix incorrect int/tuple comparison during binary search * Big Switch: Send notification after port update * Allow to add router interface to IPv6 SLAAC network * ML2 Cisco Nexus MD - not overwriting existing config * Reorder operations in (l3\_dvr) update floating ip * Use RPC instead of neutron client in metadata agent * Add assertion to test\_page\_reverse method * Adds an option to enable broadcast replies to Dnsmasq * Add advsvc role to neutron policy file * NSX: allow multiple networks with same vlan on different phy\_net * NSX: Fix foreign key constraint delete provider network * Imported Translations from Transifex * Fix 'Length too long' error in neutron-dsvm-functional tests * Remove use\_namespaces from RouterInfo Property * Fix handling of CIDR in allowed address pairs * Updated from global requirements * Remove XML support * enable F402 check for flake8 * enable E713 in pep8 tests * NEC plugin: Allow to apply Packet filter on OFC router interface * \_update\_router\_db: don't hold open transactions * Big Switch: Switch to TLSv1 in server manager * Only resync DHCP for a particular network when their is a failure * Validate network config (vlan) * Validate local\_ip for OVS agent is actual ip address * Imported Translations from Transifex * Hyper-V: Remove useless use of "else" clause on for loop * Enable no-name-in-module pylint check * Move disabling of metadata and ipv6\_ra to \_destroy\_router\_namespace * Updated from global requirements * Adds macvtap support * Remove duplicate import of constants module * Switch run-time import to using importutils.import\_module * Enable assignment-from-no-return pylint check * tox.ini: Avoid using bash where unnecessary * l2population\_rpc: docstring improvements * Fix race condition on processing DVR floating IPs * neutron-db-manage finds automatically config file * Ensure test\_agent\_manager handles random hashseeds * Ensure ofagent unit tests handles random hashseeds * Moves the HA resource creations outside of transaction * Modify docstring on send\_delete\_port\_request in N1kv plugin * Empty files should not contain copyright or license * Remove superfluous except/re-raise * Remove single occurrence of lost-exception warning * Schema enhancement to support MultiSegment Network * Remove redundant initialization and check from DVR RPC mixin * Improve performance of security group DB query * Optimize query in \_select\_dhcp\_ips\_for\_network\_ids * Updated cache module and its dependencies * Updated service.py and its dependencies * Updated fileutils and its dependencies * Cisco N1kv: Fix update network profile for add tenants * DB: Only ask for MAC instead of entire port * Only fetch port\_id from SG binding table * NSX: Make conn\_idle\_timeout configurable * nsx plugin: keep old priority when reconnecting bad connection * l3\_agent: avoid name conflict with context * Guard against concurrent port removal in DVR * Refactor l2\_pop code to pass mac/ip info more readably * Fix KeyError in dhcp\_rpc when plugin.port\_update raise exception * Refactor \_make\_subnet\_dict to avoid issuing unnecessary queries * openvswitch: Remove no longer used options * VPNaaS Cisco unit test clean-up * Call DVR VMARP notify outside of transaction * remove E251 exemption from pep8 check * Race for l2pop when ports go up/down on same host * Catch exceptions in router rescheduler * Minor: remove unnecessary intermediate variable * Handle unused set\_context in L3NatTestCaseMixin.floatingip\_with\_assoc * Use EUI64 for IPv6 SLAAC when subnet is specified * Arista L3 Ops is success if it is successful on one peer * Add unique constraints in IPAvailabilityRange * Remove two sets that are not referenced * Update VPN logging to use new i18n functions * mock.assert\_called\_once() is not a valid method * Check for VPN Objects when deleting interfaces * Compare subnet length as well when deleting DHCP entry * Add pylint tox environment and disable all existing warnings * Updated from global requirements * update the relative path of api\_extensions\_path * Reduce security group db calls to neutron server * Ignore top-level hidden dirs/files by default * Remove some duplicate unit tests * NSX: drop support to deprecated dist-router extension * Execute udevadm on other linux installs * Avoid constructing a RouterInfo object to get namespace name * Drop sslutils and versionutils modules * Imported Translations from Transifex * Remove an argument that is never used * Refactor \_process\_routers to handle a single router * Add Juno release milestone * Add database relationship between router and ports * Fix L2 agent does not remove unused ipset set * Add Juno release milestone * Add database relationship between router and ports * Disable PUT for IPv6 subnet attributes * Skip IPv6 Tests in the OpenContrail plugin * Remove all\_routers argument from \_process\_routers * update ml2\_migration to reflect optional methods * Disable PUT for IPv6 subnet attributes * Do not assume order of lvm.tun\_ofports set elements * Skip IPv6 Tests in the OpenContrail plugin * Removed kombu from requirements * Updated from global requirements * Imported Translations from Transifex * Imported Translations from Transifex * Remove two sets that are not referenced * Forbid update of HA property of routers * Forbid update of HA property of routers * Teach DHCP Agent about DVR router interfaces * Updated from global requirements * Allow reading a tenant router's external IP * Raise exception if ipv6 prefix is inappropriate for address mode * Retry getting the list of service plugins * Add missing methods to NoopFirewallDriver * Don't fail when trying to unbind a router * Modify the ProcessMonitor class to have one less config parameter * Big Switch: Don't clear hash before sync * Remove sslutils from openstack.common * Divide \_cleanup\_namespaces for easy extensibility * L3 Agent should generate ns\_name in a single place * Add comments to iptables rules to help debugging * nit : missing a "%s" in a log message * L3 agent should always use a unique CONF object * Iterate over same port\_id if more than one exists * Fix setup of Neutron core plugin in VPNaaS UT * remove openvswitch plugin * Fix pid file location to avoid I->J changes that break metadata * Don't fail when trying to unbind a router * remove linuxbridge plugin * Allow reading a tenant router's external IP * Fix sleep function call * Add admin tenant name to nova notifier * ML2: move L3 cleanup out of network transaction * Open Kilo development * ML2 Cisco Nexus MD: Fix UT to send one create vlan message * Implement ModelsMigrationsSync test from oslo.db * Imported Translations from Transifex * Update migration scripts to support DB2 * Do not assume order of report list elements * Disallow unsharing used firewall policy * Imported Translations from Transifex * Add missing methods to NoopFirewallDriver * Raise exception if ipv6 prefix is inappropriate for address mode * Fix broken port query in Extraroute test case * Revert "Cleanup floatingips also on router delete" * fix dvr snat bindings for external-gw-clear * Fix quota limit range validator * Remove default dictionary from function def * Fix KeyError when getting secgroup info for ports * Create DHCP port for IPv6 subnet * Deletes floating ip related connection states * Do not lookup l3-agent for floating IP if host=None, dvr issue * Remove RPC notification from transaction in create/update port * Do not assume order of body and tags elements * Remove the translation tag for debug level logs in vmware plugin * Retry getting the list of service plugins * Fix entrypoint of OneConvergencePlugin plugin * Forbid regular users to reset admin-only attrs to default values * Finish small unit test refactor of API v2 tests * Security groups: prevent race for default security group creation * Stop admin using other tenants unshared rules * Eliminate OrderedDict from test\_api\_v2.py * Mock out all RPC calls with a fixture * Add logging for enforced policy rules * Imported Translations from Transifex * Remove unnecessary \_make\_port function in BSN UTs * ofagent: Drop log level of tenant-triggerable events * Set vif\_details to reflect enable\_security\_group * Use dict\_extend\_functions to populate provider network attributes * Fix foreign key constraint error on ml2\_dvr\_port\_bindings * Some clean up of code I'm preparing to modify * Indicate the begin and end of the sync process to EOS * DVR to delete router namespaces for service ports * Do not assume order of device\_ids set elements * Fix 500 error on retrieving metadata by invalid URI * Only setup dhcp interface if dhcp is not active on network * HA routers master state now distributed amongst agents * Rework and enable VPNaaS UT for Cisco CSR REST * Update URL of Ryu official site in ofagent README files * Set dsvm-functional job to use system packages * Delete a broken subnet delete unit test * Fix to delete user and group association in Nuage Plugin * Deletes FIP agent gw port when last VM is deleted * Delete DB records instead of tables to speedup UT * Stop exception log in Big Switch unit tests * Separate Configuration from Freescale SDN ML2 mechanism Driver * NSX plugin: set VNIC\_TYPE port binding attribute * Access correct key for template name * ofagent: Ignore unknown l2pop entry removals * Neutron metering does not check overlap ip range * Rename workers to api\_workers and simplify code * Fix DVR to service DHCP Ports * Tunnel ID range validation for VXLAN/GRE networks * Remove @author(s) from copyright statements * BSN: Add context to backend request for debugging * Don't create unused ipset chain * Imported Translations from Transifex * Avoid an extra database query in schedule\_snat\_router * Add HA support to the l3 agent * Stop ignoring 400 errors returned by ODL * Fix a test\_db\_plugin unit test side\_effect usage * Imported Translations from Transifex * Fix KeyError on missing gw\_port\_host for L3 agent in DVR mode * Stop using intersphinx * Updated from global requirements * Cisco N1kv: Remove vmnetwork delete REST call on last port delete * Remove the Cisco Nexus monolithic plugin * L3 Metering label as shared * Check for ports in subnet before deleting it from Nuage VSD * ofagent: Fix a possible crash in arp responder * Add a new scheduler for the l3 HA * Add functional testing to ipset\_manager * Properly handle empty before/after notifications in l2pop code * Remove logic for conditional migrations * Make Juno migrations config independent * Introduce havana initial state * Adds ipset support for Security Groups * Refactor l3\_agent.process\_router\_floating\_ip\_addresses * Cleanup floatingips also on router delete * use TRUE in SQL for boolean var * Remove faulty .assert\_has\_calls([]) * Fail on None before iteration attempt * Imported Translations from Transifex * ofagent: Remove broken XenAPI support * Passing admin tenant name to EOS * Fix for floating ip association and deletion * BSN: Allow concurrent reads to consistency DB * Remove useless check in \_rpc\_update\_firewall * Use renamed \_fail\_second\_call() in cisco nexus tests * Add L3 VRRP HA base classes * Allow DHCPv6 reply from server to client * Don't allow user to set firewall rule with port and no protocol * Added TAP\_DEVICE\_PREFIX info to common/constants * Fix comments in api.rpc.handlers * ofagent: Clean up logging * UTs: Disable auto deletion of ports/subnets/nets * Remove second call to get\_subnets in delete\_subnet * Changes to support FWaaS in a DVR based environment * Imported Translations from Transifex * Remove hints from schedule\_router * Call unbind\_snat\_servicenode from schedule router * NSX: Correct allowed\_address\_pair return value on create\_port * Add the unit tests for ml2.rpc module * Neutron should not use the neutronclient utils module for import\_class * Add unit-test assert to check dict is superset of dict * Pythonified sanity\_check.all\_tests\_passed * Removed direct access to MessagingServer * Remove subnet\_id from check\_ports\_exist\_on\_l3agent * Add requests\_mock to test-requirements.txt * Removed kombu from requirements * Fix metadata agent's auth info caching * Throw exception instances instead of classes * Add scheduler unit tests to enable bug fixes and refactoring * Fix AttributeError when setting external gateway on DVR router * Stop tracking connections in DVR FIP Namespace * Fixes formatting for debug output in neutron/agent/l3\_agent.py * Avoid testing code duplication which introduced testing bugs * Supply missing cisco\_cfg\_agent.ini file * Reset IPv6 detection flag after IPv6 tests * Remove unused arg to config.setup\_logging() * Updated from global requirements * Revert "Skip functional l3 agent test" * Fix leftover Timeout effecting most eventlet calls * shared policy shouldn't have unshared rules * ofagent: Remove @author tags and update copyright notices * Work toward Python 3.4 support and testing * Cleanup rename of get\_compute\_ports\_on\_host\_by\_subnet * Revert "Cisco DFA ML2 Mechanism Driver" * Refactor security group rpc call * Avoid auto-scheduling for distributed routers * Fix interface IP address for DVR with gateway * BSN: Bind external ports in ML2 driver * Remove SELECT FOR UPDATE use in delete\_firewall * Big Switch: Retry on 503 errors from backend * Remove absolute path in KillFilter for metadata-proxy * Implements sync mechanism between Neutron and Nuage VSD * ofagent: Implement physical\_interface\_mappings * ofagent: Enable local arp responder for TYPE\_LOCAL * ofagent: Enable local arp responder for TYPE\_FLAT * Implements ProcessMonitor to watch over external processes * Skip functional l3 agent test * ofagent: Local arp responder for VLAN * Prevent SystemExits when running tests * Big Switch: Separate L3 functions into L3 service * Apic drivers enhancements (second approach): Topology * Big Switch: Bind IVS ports in ML2 driver * Add functional test for IptablesManager * Clarify message when no probes are cleared * Remove reference to cisco\_cfg\_agent.ini from setup.cfg again * Fix a bug in Mellanox plugin RPC caused by secgroup RPC refactoring * Don't spawn metadata-proxy for non-isolated nets * l2pop: Allow network types overridable * ML2: Fix release of network segments to allocation pools * Fix a recent ipv6 UT regression * Imported Translations from Transifex * Add endpoint\_type parameter to MetaInterfaceDriver * Remove chain for correct router during update\_routers() * ofagent: Enable local arp responder for local VMs * ofagent: merge br-tun into br-int * Apic drivers enhancements (second approach): Sync * Apic drivers enhancements (second approach): L3 refactor * ML2 Type Driver refactor part 2 * Adds router service plugin for CSR1kv * Introduces a keepalived manager for HA * Support for extensions in ML2 * Cisco DFA ML2 Mechanism Driver * Improve some plugins help strings * Provide a quick way to run flake8 * Apic drivers enhancements (second approach): L2 refactor * Make SecurityGroupsRpcCallback a separate callback class * Subnets with prefix length 0 are invalid * Adding mechanism driver in ML2 plugin for Nuage Networks * Fix state\_path in tests * Add functional test for l3\_agent * remove explicit include of the ovs plugin * NSX: log request body to NSX as debug * Datacenter moid should not be tuple * Remove ovs dependency in embrane plugin * Layer 3 service plugin to support hardware based routing * Remove binding:profile update from Mellanox ML2 MD * Remove old policies from policy.json * Apic drivers enhancements (second approach): Backend * Make DvrServerRpcCallback a separate callback class * Make DhcpRpcCallback a separate callback class * Adding support of DNS nameserver and Host routes for the Nuage Plugin * Block downgrade from icehouse to havana * Use lockutils module for tox functional env * Do not use auto\_schedule\_routers to add router to agent * Fix func job hook script permission problems * Check for IPv6 file before reading * Remove SELECT FOR UPDATE use in update\_firewall * Fix l3 agent scheduling logic to avoid unwanted failures * Fix InvalidRequestError in auto\_schedule\_routers * Fix incorrect number of args to string format * Add support for provider-network extension in nuage Plugin * Make L3RpcCallback a separate callback class * Cisco VPN with in-band CSR (interim solution) * Inline "for val in [ref]" statements * Minor refactoring for add\_router\_to\_l3\_agent * Predictable iptables chains output order * Prefer "val !=/== ref" over "val (not) in [ref]" in conditions * Heal script: Drop fks before operating on columns * Fixed template of IPsecSiteConnectionNotFound message * Fix DVR to service LBaaS VIP Ports * Refactor test\_type\_gre/vxlan to reduce duplicate code * Fix heal\_script for MySQL specifics * Make log level in linux.utils.execute configurable * Imported Translations from Transifex * Networks are not scheduled to DHCP agents for Cisco N1KV plugin * ext-gw update on dvr router improperly handled by l3-agent * metering driver default value is different in code and config file * Fix for floatingip-delete not removing fip\_gw port * Increase the default poll duration for Cisco n1kv * Fix IpNetnsCommand to execute without root\_wrapper when no netns * Increase ovsdb\_monitor.SimpleInterfaceMonitor start timeout * Change autogenerate to be unconditional * Remove status initialization from plugin's create\_firewall * Set firewall state to CREATED when dealing with DVR * Add template attr. for subnet, router create in Nuage plugin * Implement ip\_lib.device\_exists\_with\_ip\_mac * Add \_store\_ip\_allocation method * Updated from global requirements * Refactor plugin setup helpers out of test.base * Raise proper exception in case duplicate ipv6 address is allocated * Do not explicitly set mysql\_engine * Fixes Hyper-V agent issue on Hyper-V 2008 R2 * Removing sorted() function from assertEqual() * Add hook scripts for the functional infra job * ML2 Type driver refactor part 1 * Minor refactoring of auto\_schedule\_routers * Add ipv6 forwarding for router namespaces * Refresh rpc\_backend values in unit tests to those from oslo.messaging * Add unit tests covering single operations to ODL * One Convergence: Skip all tests with 'v6' in name * VPNaaS: Enable UT cases with newer oslo.messaging * Do not log WARN messages about lack of L3 agents for DVR routers * Add specific docs build option to tox * Fix policy rules for adding and removing router interfaces * Refactor type\_tunnel/gre/vxlan to reduce duplicate code * Join tables in query for down L3 agents * Rename range to avoid shadowing the builtin * Fixes Hyper-V issue due to ML2 RPC versioning * A10 Networks LBaaS v1 Driver * Assign Cisco nw profile to multi-tenants in single request * Remove unused network parameter from \_allocate\_ips\_for\_port * corrects the typos in l3\_router\_plugin's comments * Support Stateful and Stateless DHCPv6 by dnsmasq * Implements securitygroup extension for nuage plugin * Fix bigswitch setup.cfg lines * Arista Layer 3 Sevice Plugin * Add config for visibility of cisco-policy-profile * Ensure ip6tables are used only if ipv6 is enabled in kernel * Remove invalid or useless initialization in test\_type\_vxlan * Fix migration set\_length\_of\_description\_field\_metering * Set InnoDB engine for all existing tables * Use oslo.db create\_engine instead of SQLAlchemy * Big Switch: Check for 'id' in port before lookup * Reorder operations in create\_vip * Send HTTP exceptions in the format expected by neutronclient * Change nexus\_dict to accept port lists * Update DVR Binding when router\_id changes * Imported Translations from Transifex * Remove auto-generation of db schema from models at startup * Cisco N1kv plugin to send subtype on network profile creation * Implement namespace cleanup for new DVR namespaces * Fix config option names in ml2\_conf\_sriov.ini * NSX: Avoid floating IP status reset * correct getLoggers to use \_\_name\_\_ in code * Skip FWaaS config mismatch check if RPC method is unsupported * NSX: lift restriction on DVR update * Updated from global requirements * Use jsonutils instead of stdlib json * Remove INACTIVE status from FWaaS * Ignore http\_proxy while connecting to test WSGI server * Fix interface add for dvr with gateway * l2pop: get\_agent\_ports: Don't yield (None, {}) * ML2: Make get\_device\_details report mac address as well * Delete DVR namespaces on node after removing last VM * Fix PortNotFound error during update\_device\_up for DVR * Option to remove routers from dead l3 agents * Remove SELECT FOR UPDATE use in ML2 tunnel driver add\_endpoint * Fix KeyError during sync\_routers * Fix PortNotFound exception during sync\_routers * VPNaaS: Cisco fix validation for GW IP * Raise NotImplementedError instead of NotImplemented * Imported Translations from Transifex * Fix duplicate function: test\_getattr\_unallowed\_attr * Preserve link local IP allocations for DVR fip ns across restart * Fix 404 error fetching metadata when using DVR * Raise exception for network delete with subnets presents * SecurityGroupRuleExists should point out rule id inseand of group id * Opencontrail plug-in implementation for core resources * Do not assume order of new\_peers list elements * Make plugin and l3plugin available as mixin's properties * Use call to report state when ovs\_agent starts up * add auth token to context * Fixes an issue with FIP re-association * NSX: unify the two distributed routing extensions * NSX: fix wording for configuration option * MLNX Agent: ensure removed ports get treated on resyncs * Add delete operations for the ODL MechanismDriver * Predictable field and filter ordering * Fixing neutron-db-manage with some options other than upgrade/downgrade * Removes extra indents from TestSubresourcePlugin * ofagent: Upgrade note about firewall\_driver * Return port context from \_bind\_port\_if\_needed * MLNX Agent: Process port\_update notifications in the main agent loop * Fix session's InvalidRequestError because of nested rollback * Remove unneeded device\_owner field from l2pop tuple * ofagent: Remove network\_delete method * Do not assume order of parameters in OVSBridge.add\_flow call * Fix to throw correct error code for bad attribute * Improve external gateway update handling * Do not assume order of pci slot list * DeferredBridge to allow add\_tunnel\_port passthru * Enabled Cisco ML2 driver to use new upstream ncclient * Fix to enable L2pop to serve DVR * Remove duplicated check for router connect to external net * ofagent: Add a missing normalized\_port\_name * Return 403 instead of 404 on attr policy failures * Proper validation for inserting firewall rule * Imported Translations from Transifex * Ensure assertion matches dict iter order in test * Fix 500 error during router-update for dvr routers * Simple refactor to stop passing around an unused parameter * Make \_build\_uri\_path output predictable * Radware: When a pip is needed, reuse the Port * Remove redundant topic from rpc calls * l3\_db: refactor L3\_NAT\_DB\_mixin * OVS flows apply concurrently using a deferred OVSBridge * Do not assume order of network\_uuid's * Big Switch: Only update hash header on success * ofagent: Stop monitoring ovsdb for port changes * ofagent: Desupport ancillary bridges * Add a tox test environment for random hashseed testing * OFAgent: Implement arp responder * Updated from global requirements * Do not assume order of quotas dictionary elements * Move Cisco VPN RESTapi URI strings to constants * Remove ignored do\_request timeout argument * Move from Python logging to Openstack logging * Imported Translations from Transifex * NSX: remove duplicate call to set\_auth\_cookie() * NSX: Correct default timeout params * Remove reference to cisco\_cfg\_agent.ini from setup.cfg * Exit Firewall Agent if config is invalid * Fix spelling mistakes * Fix DB Duplicate error when scheduling distributed routers * Imported Translations from Transifex * Make ML2 ensure\_dvr\_port\_binding more robust * centralized router is incorrectly scheduled * Fix-DVR Gateway clear doesn't delete csnat port * Fix spelling in get\_plugin\_interface docstring * Use storage engine when creating tables in migrations * Removed configobj from test requirements * Implement Midonet Juno Network Api calls * Add missing ml2 plugin to migration 1fcfc149aca4 * Replace nullable from primary keys in tz\_network\_bindings with default * Use correct section for log message if interface\_driver import fails * Make sure that gateway is in CIDR range by default * test\_l3\_plugin: L3AgentDbInteTestCase L3AgentDbSepTestCase fails * Add L3 Scheduler Changes for Distributed Routers * Pass filters in arrays in get\_agent\_gw\_ports\_exist\_for\_network * Do not schedule network when creating reserved DHCP port * Check that router info is set before calling \_update\_arp\_entry * Move ARP responder test to sanity command * neutron.conf does not have the definition of firewall quotas * Fix wrong order of tables in downgrade * Fix deprecated opt in haproxy driver * Race condition of L3-agent to add/remove routers * Replaced the strings with respective constants * Make dvr\_vmarp\_table\_update call conditional to dvr extension * ofagent: Update a comment in port\_bound * Updated from global requirements * Set promote\_secondaries when creating namespaces * Functional tests work fine with random PYTHONHASHSEED * Call config\_parse in base test setup * ML2 additions to support DVR * Make test\_l3\_agent.\_prepare\_router\_data a module function * Remove redundant code in tests/unit/test\_l3\_agent * Fix ML2 Plugin binding:profile update * Set python hash seed to 0 in tox.ini * Add definition for new VIF type * Configuration agent for Cisco devices * Handle bool correctly during \_extend\_extra\_router\_dict * Encapsulate some port properties in the PortContext * Changes to remove the use of mapping tables from Nuage plugin * Updated from global requirements * Log exceptions inside spawned functions * Correct misspelled variable name * Avoid RequestURITooLong exception in metadata agent * Move loadbalancer vip port creation outside of transaction * Define some abstract methods in VpnDriver class * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 2 * Modify L3 Agent for Distributed Routers * Audited attribute for policy update not changing * OFAgent: Share codes of l2-population in OVS agent * This patch changes the name of directory from mech\_arista to arista * ML2 mechanism driver for SR-IOV capable NIC based switching, Part 1 * Add rule for updating network's router:external attribute * L2 Agent-side additions to support DVR * Imported Translations from Transifex * NSX: fix router ports port\_security\_enabled=False * Add partial specs support in ML2 for multiprovider extension * Add partial specs support in ML2 for gre/vxlan provider networks * Set nullable=False on tenant\_id in apic\_contracts table * call security\_groups\_member\_updated in port\_update * The default value of quota\_firewall\_rule should not be -1 * Correct LOG.debug use * Fix incorrect downgrade * Fix spelling mistake in the log message * Imported Translations from Transifex * Support Router Advertisement Daemon (radvd) for IPv6 * Move plugin.delete\_port call out of transaction * Add partial specs support in ML2 for vlan provider networks * ML2: Update a comment after the recent bind\_port change * NSX: fix validation logic on network gateway connect * Initialize RpcProxy objects correctly * Fix DVR regression for ofagent * RPC additions to support DVR * no quota for allowed address pair * Allow to import \_LC, \_LE, \_LI and \_LW functions directly * L2 Model additions to support DVR * Fixed audit notifications for dhcp-agent-network * Make readme reference git.openstack.org not github * Fix enums usage for postgres in migrations * Return a tuple of None's instead of one None * Fix a log typo in ML2 manager.bind\_port() * Big Switch: Remove consistency hash on full sync * VPNaaS: Separate validation for Cisco impl * VPNaaS: separate out validation logic for ref impl * VMWare: don't notify on disassociate\_floatingips() * Add L3 Extension for Distributed Routers * VPNaaS Cisco REST client enhance CSR create * Bump hacking to version 0.9.2 * Log methods using rpc communcation * Fixes port update failure when device ID is not updated * Support Quota extension in MidoNet plugin * NSX: Remove unneed call to \_ensure\_default\_security\_group * Use auth\_token from keystonemiddleware * update vsm credential correctly * Shamelessly removing commented print line * L3 agent prefers RPC messages over full sync * Dnsmasq config files syntax issue when dhcp\_domain is empty * Database healing migration * Fix incorrect default paramater in migration * Use method's logger in log decorator * Fixed audit notifications for l3-agent-router ops * Expand arp\_responder help text * Send network name and uuid to subnet create * Cisco: Fix test cases which make incorrect create requests * ML2: Bind ports outside transactions * Freeze models for healing migration * NSX: Optionally not enforce nat rule match length check * ofagent: Handle device name prefixes other than "tap" * Add -s option for neutron metering rules * Security groups extension for PLUMgrid plugin * Missing max\_routes in neutron.conf * Clear entries in Cisco N1KV specific tables on rollback * Allow unsharing a network used as gateway/floatingip * Change all occurences of no\_delete to do\_delete * Split up metering test case into plugin + test case * Use integer server\_default value for multicast\_ip\_index * Validate expected parameters in add/remove router interfaces * Revert "VPNaaS REST Client UT Broken" * Mock out tunnel\_sync in test to avoid sleeping * Add 'server\_default' parameter * Add BSN plugin to agent migration script * Move \_convert\_to\_nsx\_transport\_zones into nsx\_utils * Extract CommonDBMixin to a separate file * Remove dead helper function from test\_l3\_plugin * Added support for NOS version 4.1.0, 5.0.0 and greater * Remove reference to setuptools\_git * NSX: neutron router-interface-add should clear security-groups * Refactor 'if false do nothing' logic in l3 scheduler db * Imported Translations from Transifex * Add a gate-specific tox env for functional tests * NSX: remove unnecessary checks on network delete * Bump min required version for dnsmasq to 2.63 * Add CONTRIBUTING.rst * Do not mark device as processed if it wasn't * Fix 'server\_default' parameter usage in models * Fix missing migration default value * Add a link to a blog post by RedHat that discusses GRE tunnels in OVS * Updated from global requirements * VPNaaS REST Client UT Broken * Avoid notifying while inside transaction opened in delete\_port() * sync periodic\_task fix from incubator * Omit mode keyword when spawning dnsmasq with some ipv6 subnets * Fixed spelling mistake in securitygroups\_rpc * OVS agent: fix a comment on CANARY\_TABLE * ofagent: Fix an argument mismatch bug in commit 9d13ea88 * Fix UnboundLocalError raised during L3 router sync task * Updated from global requirements * Fix isinstance assertions * Imported Translations from Transifex * Allow setting a rootwrap cmd for functional tests * Fix OVSBridge.get\_port\_ofport to handle empty output * Ignore variable column widths in ovsdb functional tests * Add configurable http\_timeout parameter for Cisco N1K * NSX: fix indentations * BSN: Remove db lock and add missing contexts * NSX: properly handle floating ip status * Updated from global requirements * Fix example for running individual tests * Stop the dhcp-agent process when dnsmasq version is not determined * Switch to using of oslo.db * Replace occurences of 'test\_tenant' with 'test-tenant' in tests * lb-agent: ensure removed devices get treated on resyncs * Imported Translations from Transifex * Add sanity check for nova notification support * changes ovs agent to get bridges via ovs\_lib * Use correct MAX\_LEN constant in agent functional tests * remove unsupported middleware * Fix re-creation of the pool directory * Add config for performance gate job * Use patch ports to interconnect integration/physical bridges * Exit rpc\_loop when SIGTERM is recieved in ovs-agent * LBaaS new object model logging no-op driver * ofagent: Use port desc to monitor ports on br-int * Fixed dhcp & gateway ip conflict in PLUMgrid plugin * Introduce bulk calls for get device details * validate flat networks physical name * Remove \_\_init\_\_ method from TunnelCallback mixin * OVS agent: Correct bridge setup ordering * Revert "Revert "ovs-agent: Ensure integration bridge is created"" * Imported Translations from Transifex * Synced log module and its dependencies from olso-incubator * Pass newly created router to \_update\_router\_gw\_info * don't ignore rules that are already enforced * Updated neutron.conf to reflect new RPC options * Moved rpc\_compat.py code back into rpc.py * Updated from global requirements * Updated from global requirements * ofagent: move main module from ryu repository * Don't convert numeric protocol values to int * Imported Translations from Transifex * Revert "Check NVP router's status before deploying a service" * Remove the useless vim modelines * Imported Translations from Transifex * Changing the poll\_duration parameter type to int * Add test cases for plugins/ml2/plugin.py * Removed local modification in incubator code * Removed 'rpc' and 'notifier' incubator modules * Removed create\_rpc\_dispatcher methods * Use openstack.common.lockutils module for locks in tox functional tests * Pass serializer to oslo.messaging Notifier * Fix auto\_schedule\_networks to resist DBDuplicateEntry * Imported Translations from Transifex * Control active number of REST calls from Cisco N1kv plugin to VSM * Revert "ovs-agent: Ensure integration bridge is created" * ValueError should use '%' instead of ',' * NSX: return 400 if dscp set for trusted queue * NSX sync cache: add a flag to skip item deletion * NSX: propagate network name updates to backend * Renamed argument for create\_consumer[s] * Renamed consume\_in\_thread -> consume\_in\_threads * Renamed start\_rpc\_listener -> start\_rpc\_listeners * Port to oslo.messaging * Imported Translations from Transifex * Pass 'top' to remove\_rule so that rule matching succeeds * Big Switch: Stop watchdog on interval of 0 * Remove old quantum scripts * Move \_filter\_non\_model\_columns method to CommonDbMixin * Updated from global requirements * Ignore emacs checkpoint files * Big Switch: Lock consistency table for REST calls * Check port value when creating firewall rule with icmp protocol * Improve docstring for OVSNeutronAgent constructor * Big Switch ML2: sync detection in port-update * Imported Translations from Transifex * Remove SELECT FOR UPDATE use in ML2 type driver release\_segment * Add vlan type driver unittests * Make sure we call BaseTestCase.setUp() first * Don't explicitly call .stop() on mock.patch objects * Don't instantiate RPC clients on import * Configure agents using neutron.common.config.init (formerly .parse) * linuxbridge-agent: process port updates in the main loop * Notify systemd when starting Neutron server * Ensure entries in dnsmasq belong to a subnet using DHCP * Added missing core\_plugins symbolic names * Trigger provider security group update for RA * NSX: revert queue extension name change * Fix pool statistics for LBaaS Haproxy driver * Don't use root\_helper when it's not needed * Introduced rpc\_compat.create\_connection() * Copy-paste RPC Service class for backwards compatibility * Introduce RpcCallback class * Fix opt helpstring for dhcp\_lease\_duration * Consistently use jsonutils instead of specific implementation * Imported Translations from Transifex * Adding static routes data for members * remove pep8 E122 exemption and correct style * Change default netpartition behavior in nuage plugin * Add 'ip rule ...' support to ip\_lib * Add missing keyword raise to get\_profile\_binding function * Add logging for NSX status sync cache * Big Switch: Remove unnecessary initialization code * Big Switch: Import DB module in unit test * When l2-pop ON, clean stale ports in table0 br-tun * remove E112 hacking exemption and fix errors * Updated from global requirements * Allowed address pair: Removing check for overlap with fixed ips * NeutronManager: Remove explicit check of the existence of an attribute * Fix invalid IPv6 address used in FakeV6 variables * Improve vxlan type driver initialization performance * Floatingip extension support for nuage plugin * ovs-agent: Ensure integration bridge is created * Brocade mechanism driver depends on the brocade plugin templates * Brocade mechanism driver should be derived from ML2 plugin base class * changes ovs agent\_id init to use hostname instead of mac * multiprovidernet: fix a comment * Imported Translations from Transifex * Fix race condition with firewall deletion * extensions: remove 'check\_env' method * Check the validation of 'delay' and 'timeout' * Control update, delete for cisco-network-profile * Ensure routing key is specified in the address for a direct producer * Support Subnets that are configured by external RAs * Refactor code in update\_subnet, splitting into individual methods * Make allocation\_pools attribute of subnet updateable by PUT * Monkey patch threading module as early as possible * Introduced transition RPC exception types * Added RpcProxy class * ofagent: Fix VLAN usage for TYPE\_FLAT and TYPE\_VLAN * Big Switch: Catch exceptions in watchdog thread * Use import from six.moves to import the queue module * Start an unstarted patch in the hyperv unit tests * Imported Translations from Transifex * Fix NVP FWaaS occurs error when deleting a shared rule * Check NVP router's status before deploying a service * Add an option to turn off DF for GRE and VXLAN tunnels * Increase default metadata\_workers, backlog to 4096 * Big Switch: Add missing data to topology sync * Replace XML with JSON for N1kv REST calls * Big Switch: Call correct method in watchdog * Freescale SDN Mechanism Driver for ML2 Plugin * OVS Agent: limit veth names to 15 chars * Added note to neutron.conf * Return no active network if the agent has not been learnt yet * Sync service module from oslo-incubator * ovs, ofagent: Remove dead code * Default to setting secure mode on the integration bridge * Cisco APIC Layer 3 Service plugin * Allow neutron-sanity-check to check OVS patch port support * Remove run-time version checking for openvswitch features * Add flat type driver unittests * Changed DictModel to dict with attribute access * Pass object to policy when finding fields to strip * Allow L3 base to handle extensions on router creation * Refactor some router-related methods * Add local type driver unittests * add engine parameter for offline migrations * Check DB scheme prior to migration to Ml2 * Removes unnecessary Embrane module-level mocks * Improve module-level mocks in midonet tests * Big Switch: fix capabilities retrieval code * Improve iptables\_manager \_modify\_rules() method * NSX: bump http\_timeout to 30 seconds * Log firewall status on delete in case of status inconsistency * BSN: Set hash header to empty instead of False * Neutron does not follow the RFC 3442 spec for DHCP * LBaaS add missing rootwrap filter for route * Radware LBaaS driver is able to flip to a secondary backend node * NSX: fix invalid docstring * NSX: fix tenant\_id passed as security\_profile\_id * NSX: Fix request\_id in api\_client to increment * Improve usage of MagicMocks in ML2 and L3 tests * Improve readability of MagicMock use in RYU test * Remove function replacement with mock patch * Remove unnecessary MagicMocks in cisco unit tests * Handle errors from run\_ofctl() when dumping flows * Sync periodic\_task from oslo-incubator * Added missing plugin .ini files to setup.cfg * Imported Translations from Transifex * Make linux.utils.execute log error on return codes * FWaaS plugin doesn't need to handle firewall rule del ops * Reprogram flows when ovs-vswitchd restarts * Revert "fix openvswitch requirement check" * Updated from global requirements * Fix KeyError exception while updating dhcp port * NSX: fix bug for flat provider network * Disallow regular user to update firewall's shared attribute * Support 'infinite' dhcp\_lease\_duration * l2-pop : removing a TODO for the delete port use case * NEC plugin: Bump L3RPC callback version to 1.1 * Synced jsonutils from oslo-incubator * Imported Translations from Transifex * fix openvswitch requirement check * NSX: replace strong references to the plugin with weakref ones * Fixes bugs for requests sent to SDN-VE controller * Install SNAT rules for ipv4 only * Imported Translations from Transifex * Add NVP advanced service check before deleting a router * Disallow 'timeout' in health\_monitor to be negative * Remove redundant default=None for config options * Fix for multiple misspelled words * Use list copy for events in nova notifier * Extraroute extension support for nuage plugin * OFAgent: Fixing lost vlan ids on interfaces * Set onlink routes for all subnets on an external network * Cisco APIC ML2 mechanism driver, part 2 * Remove all mostly untranslated PO files * remove token from notifier middleware * NSX: get rid of the last Nicira/NVP bits * Metadata agent caches networks for routers * Common decorator for caching methods * Make pid file locking non-blocking * Allowed Addresspairs: Removing check for overlap with fixed ips * Do not defer IPTables apply in firewall path * Metaclass Python 3.x Compatibility * Fix non-existent 'assert' calls to mocks * Log iptables rules when they fail to apply * Remove hard dependency on novaclient * Provide way to reserve dhcp port during failovers * Imported Translations from Transifex * Implement local ARP responder onto OVS agent * Fix typos in ovs\_neutron\_agent.py * Allow vlan type usage for OpenDaylight ml2 * NSX: do not raise on missing router during migration step * NSX: fix error when creating VM ports on subnets without dhcp * NSX: allow net-migration only in combined mode * OFAgent: Avoid processing ports which are not yet ready * Add missing translation support * Reorg table ml2\_port\_bindings when db migration * Remove unused parameter * NSX: Do a single query for all gateway devices * Add mailmap entry * Add 'secret' property for 'connection' option * NSX: Do not extend fault map for network gateway ext * Ensure tenant owns devices when creating a gateway * Corrected the syntax of port\_update call to NVSD agent * Fix some typos in neutron/db and IBM SDN-VE plugin * Fix issubclass() hook behavior in PluginInterface * Imported Translations from Transifex * LBaaS VIP doesn't work after delete and re-add * OVS lib defer apply doesn't handle concurrency * Big Switch: Don't use MagicMocks unnecessarily * Make plugin deallocation check optional * Restore GARP by default for floating IPs * Ensure core plugin deallocation after every test * Updated from global requirements * Big Switch: Check source\_address attribute exists * Revert "Big Switch: Check source\_address attribute exists" * ML2 VxlanTypeDriver: Synchronize of VxlanAllocation table * Start ping listener also for postgresql * ofagent: Add a missing push\_vlan action * NSX: ensure that no LSN is created on external networks * Make VPNaaS 'InUse' exception more clear * Remove explicit dependency on amqplib * Revert "Disable debug messages when running unit tests" * eswitch\_neutron\_agent: Whitespace fixes in comments * Upgrade failure for DB2 at ml2\_binding\_vif\_details * Remove duplicate module-rgx line in .pylintrc * Disable debug messages when running unit tests * Perform policy checks only once on list responses * Allow DHCPv6 solicit from VM * Fix importing module in test\_netscaler\_driver * Record and log reason for dhcp agent resync * Big Switch: Check source\_address attribute exists * L3 RPC loop could delete a router on concurrent update * Adding tenant-id while creating Radware ADC service * Fix H302 violations * Fix H302 violations in plugins package * Fix H302 violations in unit tests * Imported Translations from Transifex * lbaas on a network without gateway * Optimize querying for security groups * NSX: pass the right argument during metadata setup * Improve help strings for radware LbaaS driver * Fix network profile subtype validation in N1kv plugin * Performance improvement of router routes operations * Add support to dynamically upload drivers in PLUMgrid plugin * Imported Translations from Transifex * Reference new get\_engine() method from wsgi.py * Allow test\_l3\_agent unit test to run individually * tests/unit: refactor reading neutron.conf.test * Don't print duplicate messages on SystemExit * Unit test cases for quota\_db.py * Cisco VPN device driver - support IPSec connection updates * OVS and OF Agents: Create updated\_ports attribute before setup\_rpc * Imported Translations from Transifex * Updated from global requirements * Synced jsonutils from oslo-incubator * Imported Translations from Transifex * NSX: fix migration for networks without a subnet * Allow ML2 plugin test cases to be run independently * Removed signing\_dir from neutron.conf * Add physical\_network to binding:vif\_details dictionary * Database exception causes UnboundLocalError in linuxbridge-agent * Wrong key router.interface reported by ceilometer * Imported Translations from Transifex * NSX: fix API payloads for dhcp/metadata setup * Improve ODL ML2 Exception Handling * NSX: change api mapping for Service Cluster to Edge Cluster * Fix protocol value for SG IPV6 RA rule * Cisco APIC ML2 mechanism driver, part 1 * LBaaS: remove orphan haproxy instances on agent start * Fixed floating IP logic in PLUMgrid plugin * Segregate the VSM calls from database calls in N1kv plugin * NSX: add nsx switch lookup to dhcp and metadata operations * Use set\_gateway from ip\_lib * Fix incorrect usage of sa.String() type * Re-submit "ML2 plugin should not delete ports on subnet deletion" * LBaaS: Set correct nullable parameter for agent\_id * Vmware: Set correct nullable for lsn\_id, nsx\_port\_id * IBM: set secret=True on passwd config field * Restore ability to run functional tests with run\_tests.sh * Fix H302 violations in extensions package * Sync db code from oslo-incubator * Imported Translations from Transifex * Remove List events API from Cisco N1kv Neutron * NSX: Fix fake\_api\_client to raise NotFound * Replace loopingcall in notifier with a delayed send * ip-lib : use "ip neigh replace" instead of "ip neigh add" * Add 2-leg configuration to Radware LBaaS Driver * Fix H302 violations in db package and services * Cisco: Set correct nullable for switch\_ip, instance\_id, vlan\_id * Ml2: Set correct nullable for admin\_state\_up * Drop service\* tables only if they exist * Updated from global requirements * Make help texts more descriptive in Metaplugin * ML2 Cisco Nexus MD: Improve Unit Test Coverage * Fix migration that breaks Grenade jobs * Fix incorrect change of Enum type * allow delete\_port to work when there are multiple floating ips * Add nova\_ca\_certificates\_file option to neutron * gw\_port should be set as lazy='join' * netaddr<=0.7.10 raises ValueError instead of AddrFormatError * Imported Translations from Transifex * netaddr<=0.7.10 raises ValueError instead of AddrFormatError * Validate IPv6 modes in API when IP version is 4 * Add 'ip neigh' to ip\_lib * OFAgent: Improve handling of security group updates * OFAgent: Process port\_update notifications in the main agent loop * NSX: sync thread catches wrong exceptions on not found * Notifier: Catch NotFound error from nova * Switch over to FixedIntervalLoopingCall * Check if bridge exists and make sure it's UP in ensure\_bridge * Validate CIDR given as ip-prefix in security-group-rule-create * Support enhancements to Cisco CSR VPN REST APIs * Fix uninitialized variable reference * Nuage Plugin: Delete router requires precommit checks * Delete DHCP port without DHCP server on a net node * Improved quota error message * Remove device\_exists in LinuxBridgeManager * Add support for multiple RPC workers under Metaplugin * Security Group rule validation for ICMP rules * Fix Metering doesn't respect the l3 agent binding * DHCP agent should check interface is UP before adding route * Remove workaround for bug #1219530 * Fix LBaaS Haproxy occurs error if no member is added * Add functional tests to verify ovs\_lib VXLAN detection * Add nova\_api\_insecure flag to neutron * Allow combined certificate/key files for SSL * Verify ML2 type driver exists before calling del * Fix dangling patches in Cisco and Midonet tests * Make default nova\_url use a version * ML2 Cisco Nexus MD: Remove unnecessary Cisco nexus DB * NSX plugin: fix get\_gateway\_devices * Exclude .ropeproject from flake8 checks * Register LBaaS resources to quotas engine * Remove mock.patch.stop from tests that inherit from BaseTestCase * Reschedule router if new external gateway is on other network * Update ensure()/reconnect() to catch MessagingError * Properly apply column default in migration pool\_monitor\_status * Remove "reuse\_existing" from setup method in dhcp.py * Enable flake8 E711 and E712 checking * Fixes Hyper-V agent security groups disabling * Fixes Hyper-V agent security group ICMP rules * Fix typo in ml2 configuration file * Edge firewall: improve exception handling * Edge driver: Improve exception handling * Fix typo in comment * NSX: Fix KeyError in sync if nsx\_router\_id not found * VMware: log backend port creation in the right place * Revert "Hide ipv6 subnet API attributes" * BigSwitch: Create router ports synchronously * NSX: ensure dhcp port is setup on metadata network * Hide ipv6 subnet API attributes * Set correct columns' length * Enforce required config params for ODL driver * Add L2 Agent side handling for non consistent security\_group settings * BSN: Remove module-level ref to httplib method * BigSwitch: Stop HTTP patch before overriding * Typographical correction of Arista ML2 help * Fix wrong section name "security\_group" in sample config files * Set the log level to debug for loading extensions * Updated from global requirements * set api.extensions logging to ERROR in unit tests * Add common base class for agent functional tests * Remove RPC to plugin when dhcp sets default route * Imported Translations from Transifex * Add missing comma in nsx router mappings migration * OFAgent: Avoid re-wiring ports unnecessarily * BigSwitch: Improves server manager UT coverage * BigSwitch: Don't import portbindings\_db until use * lb-agent: fix get\_interfaces\_on\_bridge returning None * Clean out namespaces even if we don't delete namespaces * Call policy.init() once per API request * ofa\_neutron\_agent: Fix \_phys\_br\_block\_untranslated\_traffic * Don't emit log for missing attribute check policy * Sync service and systemd modules from oslo-incubator * Imported Translations from Transifex * Move bash whitelisting to pep8 testenv * Fix test MAC addresses to be valid * ML2: ODL driver sets port status * Add a note that rpc\_workers option is experimental * Fix Jenkins translation jobs * Redundant SG rule create calls in unit tests * Set ns\_name in RouterInfo as attribute * Replace HTTPSConnection in NEC plugin * ignore build directory for pep8 * Imported Translations from Transifex * Delete routers that are requested but not reported as active * Explicitly import state\_path opt in tests.base * fixes tests using called\_once\_ without assert * Remove invalid copyright headers under API module * update doc string - correct typo * Revert changes removing OVSBridge return * fixes broken neutron-netns-cleanup * Remove duplicated tests for check\_ovs\_vxlan\_version * Permit ICMPv6 RAs only from known routers * Return 409 for second firewall creation * OFA agent: use hexadecimal IP address in tunnel port name * Fixing Arista CLI command * use floatingip's ID as key instead of itself * Use a temp dir for CONF.state\_path * Use os.uname() instead of calling uname in subprocess * Enable hacking H301 check * Stop using portbindings\_db in BSN ML2 driver * NSX: Fix pagination support * Removing vim header lines * Fix function parsing the kernel version * Updated from global requirements * Restore NOT NULL constraint lost by earlier migrations * BigSwitch: Semaphore on port status update * Remove last parts of Quantum compatibility shim * Imported Translations from Transifex * Fix quota\_health\_monitor opt name in neutron.conf * Add missing DB migrations for BSN ML2 plugin * Only send notifications on uuid device\_id's * Add Icehouse no-op migration * Add support for https requests on nova metadata * Delete disassociated floating ips on external network deletion * Imported Translations from Transifex * Invoke \_process\_l3\_create within plugin session * Invalid ovs-agent test case - test\_fdb\_add\_flows * Add missing parameters for port creation * Move test\_ovs\_lib to tests/unit/agent/linux * Update BigSwitch Name to its correct name * Cancelling thread start while unit tests running * Delete duplicate external devices in router namespace * Deals with fails in update\_\*\_postcommit ops * ML2 Cisco Nexus MD: Support portchannel interfaces * Changed the message line of RouterInUse class * UT: do not hide an original error in test resource ctxtmgr * BigSwitch: Move attr ref after error check * Fix namespace exist() method * Make dnsmasq aware of all names * Open Juno development * Prevent cross plugging router ports from other tenants * Adds OVS\_HYBRID\_PLUG flag to portbindings * Disable XML tests on Py26 * Subnets should be set as lazy='join' * nec plugin: allow to delete resource with ERROR status * Synced rpc and gettextutils modules from oslo-incubator * Import request\_id middleware bug fix from oslo * Add unit test for add\_vxlan in test\_linux\_ip\_lib * Start using oslosphinx theme for docs * Migrate data from cap\_port\_filter to vif\_details * Imported Translations from Transifex * Include cisco plugin in migration plugins with ovs * ML2 Cisco Nexus MD: Remove workaround for bug 1276395 * Fixed TypeError when creating MlnxException * Replace a usage of the deprecated root\_helper option * Cisco VPN driver correct reporting for admin state chg * Add script to migrate ovs or lb db to ml2 db * Correct OVS VXLAN version check * LBaaS: make device driver decide whether to deploy instance * NSX plugin: return 400 for invalid gw certificate * Imported Translations from Transifex * Remove extra space in help string * Add enable\_security\_group to BigSwitch and OneConvergence ini files * Add nec plugin to allowed address pairs migration * Imported Translations from Transifex * Fix segment allocation tables in Cisco N1kv plugin * Updated from global requirements * NEC plugin: Rename quantum\_id column to neutron\_id * Log received pool.status * NEC plugin: Allow to add prefix to OFC REST URL * NEC plugin: Remove a colon from binding:profile key due to XML problem * rename ACTIVE\_PENDING to ACTIVE\_PENDING\_STATUSES * VPNaaS support for VPN service admin state change and reporting * Use save\_and\_reraise\_exception when reraise exception * Return meaningful error message on pool creation error * Don't set priority when calling mod\_flow * Avoid creating FixedIntervalLoopingCall in agent UT * Imported Translations from Transifex * Big Switch Plugin: No REST port delete on net del * Add enable\_security\_group option * Get rid of additional db contention on fetching VIP * Fix typo in lbaas agent exception message * De-duplicate unit tests for ports in Big Switch * ML2: Remove validate\_port\_binding() and unbind\_port() * Imported Translations from Transifex * Fix duplicate name of NVP LBaaS objs not allowed on vShield Edge * tests/unit: clean up notification driver * Use different name for the same constraint * Add a semaphore to some ML2 operations * Log dnsmasq host file generation * add HEAD sentinel file that contains migration revision * Added config value help text in ns metadata proxy * Fix usage of save\_and\_reraise\_exception * Cisco VPN device driver post-merge cleanup * Fixes the Hyper-V agent individual ports metrics * Sync excutils from oslo * BigSwitch ML2: Include bound\_segment in port * NEC plugin: Honor Retry-After response from OFC * Add update binding:profile with physical\_network * return false or true according to binding result * Enable to select an RPC handling plugin under Metaplugin * Ensure to count firewalls in target tenant * Mock agent RPC for FWaaS tests to delete DB objs * Allow CIDRs with non-zero masked portions * Cisco plugin fails with ParseError no elem found * Cisco Nexus: maximum recursion error in ConnectionContext.\_\_del\_\_ * Don't use root to list namespaces * Fixes Hyper-V agent security groups enable issue * ML2 BigSwitch: Don't modify parent context * Advanced Services documentation * LBaaS: small cleanup in agent device driver interface * Change report\_interval from 4 to 30, agent\_down\_time from 9 to 75 * Stop removing ip allocations on port delete * Imported Translations from Transifex * Ignore PortNotFound exceptions on lockless delete * Show neutron API request body with debug enabled * Add session persistence support for NVP advanced LBaaS * Fix misleading error message about failed dhcp notifications * NSX: Fix router-interface-delete returns 404 when router not in nsx * Fix \_validate\_mac\_address method * BigSwitch: Watchdog thread start after servers * Calculate stateless IPv6 address * Create new IPv6 attributes for Subnets * Remove individual cfg.CONF.resets from tests * BigSwitch: Sync workaround for port del deadlock * NSX: Ensure gateway devices are usable after upgrade * Correctly inherit \_\_table\_args\_\_ from parent class * Process ICMP type for iptables firewall * Imported Translations from Transifex * Added missing l3\_update call in update\_network * ML2 plugin involves in agent\_scheduler migration * Imported Translations from Transifex * Avoid long transaction in plugin.delete\_ports() * cisco: Do not change supported\_extension\_aliases directly * Fix KeyError except on router\_info in FW Agent * NSX: remove last of unneed quantum references * NSX: fix intermetting UT failure on vshield test\_router\_create * Bugfix and refactoring for ovs\_lib flow methods * Send fdb remove message when a port is migrated * Imported Translations from Transifex * Send network-changed notifications to nova * Notify nova when ports are ready * Skip radware failing test for now * NSX: Propagate name updates for security profiles * Fix in admin\_state\_up check function * NSX: lower the severity of messages about VIF's on external networks * Kill 'Skipping unknown group key: firewall\_driver' log trace * Imported Translations from Transifex * API layer documentation * BigSwitch: Use eventlet.sleep in watchdog * Embrane LBaaS Driver * BigSwitch: Widen range of HTTPExceptions caught * Fix ml2 & nec plugins for allowedaddresspairs tests * Fix unittest failure in radware lbaas driver * Removes calls to mock.patch.stopall in unit tests * Stop mock patches by default in base test class * Query for port before calling l3plugin.disassociate\_floatingips() * Optimize floating IP status update * NSX: Allow multiple references to same gw device * VPNaaS Device Driver for Cisco CSR * Updated from global requirements * BigSwitch: Fix certificate file helper functions * Create agents table when ML2 core\_plugin is used * Fix usage of sqlalchemy type Integer * Fixing lost vlan ids on interfaces * Fix bug:range() is not same in py3.x and py2.x * Call target plugin out of DB transaction in the Metaplugin * NSX: Sync do not pass around model object * NSX: Make replication mode configurable * Updated from global requirements * Fix ml2 db migration of subnetroutes table * Imported Translations from Transifex * After bulk create send DHCP notification * Fix lack of extended port's attributes in Metaplugin * Add missing ondelete option to Cisco N1kv tables * Migration support for Mellanox Neutron plugin * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * Add support for tenant-provided NSX gateways devices * NSX: fix nonsensical log trace on update port * BigSwitch: Fix rest call in consistency watchdog * BigSwitch: Fix cfg.Error format in exception * BigSwitch: Fix error for server config check * Fixed Spelling error in Readme * Adds state reporting to SDN-VE agent * Fix unittest failure in radware lbaas driver * Log configuration values for OFA agent * NSX: Add ability to retry on 503's returned by the controller * Cisco Neutron plugin fails DB migration * Floatingip\_status migration not including Embrane's plugin * One Convergence Neutron Plugin l3 ext support * Nuage plugin was missed in floatingip\_status db migration script * ML2 Cisco Nexus MD: VM migration support * Drop old nvp extension file * Makes the Extension loader behavior predictable * One Convergence Neutron Plugin Implementation * NEC plugin: delete old OFC ID mapping tables * Imported Translations from Transifex * Fix typo in migration script * Enhance GET networks performance of metaplugin * Adds the missing migration for gw\_ext\_mode * BigSwitch: Add SSL Certificate Validation * BigSwitch: Auto re-sync on backend inconsistencies * VPNaaS Service Driver for Cisco CSR * Updated from global requirements * Add OpenDaylight ML2 MechanismDriver * Replaces network:\* strings by constants * Check vxlan enablement via modinfo * Do fip\_status migration only for l3-capable plugins * Fix race condition in update\_floatingip\_statuses * Implementaion of Mechanism driver for Brocade VDX cluster of switches * NSX: passing wrong security\_group id mapping to nsx backend * Avoid unnecessarily checking the existence of a device * Refactor netns.execute so that it is not necessary to check namespace * Minor refactoring for Hyper-V utils and tests * Adds Hyper-V Security Groups implementation * Rename migration lb\_stats\_needs\_bigint to match revision number * Imported Translations from Transifex * NVP LBaaS: check for association before deleting health monitor * Different class names for VPNaaS migrations * ML2: database needs to be initalized after drivers loaded * replace rest of q\_exc to n\_exc in code base * Adds multiple RPC worker processes to neutron server * NEC plugin: PFC packet fitler support * Fix NVP/Nicira nits * Remove unused method update\_fixed\_ip\_lease\_expiration * NSX: nicira\_models should import model\_base directly * NSX: make sync backend run more often * Embrane Plugin fails alembic migrations * Implement Mellanox ML2 MechanismDriver * Use database session from the context in N1kv plugin * Delete subnet fails if assoc port has IPs from another subnet * Remove nvplib and move utility methods into nsxlib * BigSwitch: Add address pair support to plugin * Remove unused 'as e' in exception blocks * Remove vim line from db migartion template * Imported Translations from Transifex * Support advanced NVP IPsec VPN Service * Improves Arista's ML2 driver's sync performance * Fix NVP FWaaS errors when creating firewall without policy * Remove call to addCleanup(cfg.CONF.reset) * nec plugin: Avoid long transaction in delete\_ports * Avoid using "raise" to reraise with modified exception * Imported Translations from Transifex * Implement OpenFlow Agent mechanism driver * Finish off rebranding of the Nicira NVP plugin * Log configuration values for OVS agent * BigSwitch: Asynchronous rest calls for port create * Introduce status for floating IPs * BigSwitch: Add agent to support neutron sec groups * N1kv: Fixes fields argument not None * Adds the new IBM SDN-VE plugin * Imported Translations from Transifex * Nuage Networks Plugin * Fixes spelling error Closes-Bug: #1284257 * Openvswitch update\_port should return updated port info * Updated from global requirements * Remove unused variable * Change firewall to DOWN when admin state down * ovs-agent: use hexadecimal IP address in tunnel port name * NSX: add missing space 'routeron' * Imported Translations from Transifex * Fix DetachedInstanceError for Agent instance * Update License Headers to replace Nicira with VMware * Renaming plugin-specific exceptions to match NSX * Imported Translations from Transifex * DB Mappings for NSX security groups * NSX: port status must reflect fabric, not link status * Typo/grammar fixes for the example neutron config file * NSX: Pass NSX uuid when plugging l2 gw attachment * stats table needs columns to be bigint * Remove import extension dep from db migration * Fix get\_vif\_port\_by\_id to only return relevant ports * Developer documentation * Fix NSX migration path * ML2 mechanism driver access to binding details * Add user-supplied arguments in log\_handler * Imported Translations from Transifex * NSX: Fix newly created port's status should be DOWN * BigSwitch: Stop using external locks * Rename/refactoring of NVP api client to NSX * Remove pyudev dependency * Rename DB models and related resources for VMware NSX plugin * Lower log level of errors due to user requests to INFO * Include proper Content-Type in the HTTP response headers * LBaaS: check for associations before deleting health monitor * l2-population/lb/vxlan : ip neigh add command failed * l2-population : send flooding entries when the last port goes down * tests/service: consolidate setUp/tearDown logic * Ensure ovsdb-client is stopped when OVS agent dies * NSX: Fix status sync with correct mappings * Support Port Binding Extension in Cisco N1kv plugin * change Openstack to OpenStack in neutron * ML2 binding:profile port attribute * Rename/remove Nicira NVP references from VMware NSX unit tests * Fix webob.exc.HTTPForbidden parameter miss * Sync oslo cache with oslo-incubator * Change tenant network type usage for IB Fabric * options: consolidate options definitions * Replace binding:capabilities with binding:vif\_details * Make sure dnsmasq can distinguish IPv6 address from MAC address * Rename Neutron core/service plugins for VMware NSX * Make metaplugin be used with a router service plugin * Fix wrap target in iptables\_manager * BigSwitch: Fix tenant\_id for shared net requests * BigSwitch: Use backend floating IP endpoint * Updated from global requirements * Imported Translations from Transifex * Raise max header size to accommodate large tokens * NSX: get\_port\_status passed wrong id for network * Imported Translations from Transifex * Reset API naming scheme for VMware NSX plugin * remove pointless test TestN1kvNonDbTest * Rename Security Groups related methods for VMware NSX plugin * Rename L2 Switch/Gateway related methods for VMware NSX plugin * Rename Router related methods for VMware NSX plugin * Plugins should call \_\_init\_\_ of db\_base\_plugin for db.configure * Fixes Tempest XML test failures for Cisco N1kv plugin * Fixes broken documentation hyperlinks * Use "!=" instead of "is not" when comparing two values * ML2/vxlan/test: remove unnecessary self.addCleanup(cfg.CONF.reset) * Fix test\_db\_plugin.test\_delete\_port * Handle racing condition in OFC port deletion * Imported Translations from Transifex * Adds https support for metadata agent * Fix VPN agent does not handle multiple connections per vpn service * Don't require passing in port\_security=False if security\_groups present * wsgi.run\_server no longer used * Use different context for each API request in unit tests * Sync minimum requirements * Implements an LBaaS driver for NetScaler devices * vshield task manager: abort tasks in stop() on termination * Copy cache package from oslo-incubator * BigSwitch: Move config and REST to diff modules * Implements provider network support in PLUMgrid plugin * Should specify expect\_errors=False for success response * Fix unshortened IPv6 address caused DHCP crash * Add support to request vnic type on port * tests/unit: Initialize core plugin in TestL3GwModeMixin * Revert "Skip a test for nicira service plugin" * Improve unit test coverage for Cisco plugin model code * Imported Translations from Transifex * Fix class name typo in test\_db\_rpc\_base * Embrane Tempest Compliance * ipt\_mgr.ipv6 written in the wrong ipt\_mgr.ipv4 * Update help message of flag 'enable\_isolated\_metadata' * Imported Translations from Transifex * Fix invalid facilities documented in rootwrap.conf * Reset the policy after loading extensions * Fix typo in service\_drivers.ipsec * Validate rule uuids provided for update\_policy * Add update from agent to plugin on device up * Remove dependent module py3kcompat * Delete duplicate internal devices in router namespace * Use six.StringIO/BytesIO instead of StringIO.StringIO * Parse JSON in ovs\_lib.get\_vif\_port\_by\_id * Imported Translations from Transifex * Skip a test for nicira service plugin * Remove DEBUG:....nsx\_cluster:Attribute is empty or null * Fix request timeout errors during calls to NSX controller * remove unused imports * L3 agent fetches the external network id once * Avoid processing ports which are not yet ready * Ensure that session is rolled back on bulk creates * Add DB mappings with NSX logical routers * Use save\_and\_reraise\_exception when reraise exception * nec plugin: Compare OFS datapath\_id as hex int * Use six.moves.urllib.parse instead of urlparse * Rename Queue related methods for VMware NSX plugin * Lowercase OVS sample config section headers * Add DB mappings with NSX logical switches * NSX: Fix possible deadlock in sync code * Raise an error from ovs\_lib list operations * Add additional unit tests for the ML2 plugin * Fix ValueError in ip\_lib.IpRouteCommand.get\_gateway() * Imported Translations from Transifex * Fix log-related tracebacks in nsx plugin * add router\_id to response for CRU on fw/vip objs * Move db migration of ml2 security groups to havana * Sync latest oslo.db code into neutron * Add support for router scheduling in Cisco N1kv Plugin * Imported Translations from Transifex * Add migration support from agent to NSX dhcp/metadata services * Validate multicast ip range in Cisco N1kv Plugin * NSX plugin: fix floatingip re-association * Re-enable lazy translation * Do not append to messages with + * Remove psutil dependency * Remove legacy quantum config path * LBaaS: move agent based driver files into a separate dir * mailmap: update .mailmap * Fix binding:host\_id is set to None when port update * Return request-id in API response * Skip extra logging when devices is empty * Add extraroute\_db support for Cisco N1kv Plugin * Improve handling of security group updates * ML2 plugin cannot raise NoResultFound exception * Fix typo in rootwrap files: neuton -> neutron * Imported Translations from Transifex * Prepare for multiple cisco ML2 mech drivers * ML2 Cisco Nexus MD: Create pre/post DB event handlers * Support building wheels (PEP-427) * NVP plugin:fix delete sec group when backend is out of sync * Use oslo.rootwrap library instead of local copy * Fix misspellings in neutron * Remove unnecessary call to get\_dhcp\_port from DeviceManager * Refactor to remove \_recycle\_ip * Allow multiple DNS forwarders for dnsmasq * Fix passing keystone token to neutronclient instance * Don't document non-existing flag '--hide-elapsed' * Fix race condition in network scheduling to dhcp agent * add quota support for ryu plugin * Imported Translations from Transifex * Enables BigSwitch/Restproxy ML2 VLAN driver * Add and update subnet properties in Cisco N1kv plugin * Fix error message typo * Configure floating IPs addresses after NAT rules * Add an explicit tox job for functional tests * improve UT coverage for nicira\_db operations * Avoid re-wiring ports unnecessarily * Process port\_update notifications in the main agent loop * Base ML2 bulk support on the loaded drivers * Imported Translations from Transifex * Removes an incorrect and unnecessary return * Reassign IP to vlan interface when deleting a VLAN bridge * Imported Translations from Transifex * Change metadata-agent to have a configurable backlog * Sync with commit-id: 9d529dd324d234d7aeaa3e6b4d3ab961f177e2ed * Remove unused RPC calls from n1kv plugin code * Change metadata-agent to spawn multiple workers * Extending quota support for neutron LBaaS entities * Tweak version nvp/nsx version validation logic for router operations * Simplify ip allocation/recycling to relieve db pressure * Remove unused code * Reduce severity of log messages in validation methods * Disallow non-admin users update net's shared attribute * Fix error while connecting to busy NSX L2 Gateway * Remove extra network scheduling from vmware nsx plugin * L3 Agent restart causes network outage * Remove garbage in vim header * Enable hacking H233 rule * Rename nvp\_cluster for VMware NSX plugin * Minimize the cost of checking for api worker exit * Remove and recreate interface if already exists * Use an independent iptables lock per namespace * Report proper error message in PLUMgrid Plugin * Fix interprocess locks for run\_tests.sh * Clean up ML2 Manager * Expunge session contents between plugin requests * Remove release\_lease from the DHCP driver interface * VMware NSX: add sanity checks for NSX cluster backend * Update RPC code from oslo * Fix the migration adding a UC to agents table * Configure plugins by name * Fix negative unit test for sec group rules * NVP: Add LOG.exception to see why router was not created * Add binding:host\_id when creating port for probe * Fix race condition in delete\_port method. Fix update\_port method * Use information from the dnsmasq hosts file to call dhcp\_release * Fix pip install failure due to missing nvp.ini file * Imported Translations from Transifex * Imported Translations from Transifex * Make timeout for ovs-vsctl configurable * Remove extra whitespace * Fix extension description and remove unused exception * Fix mistake in usage drop\_constraint parameters * Fix race condition on ml2 delete and update port methods * Fix Migration 50e86cb2637a and 38335592a0dc * L3 Agent can handle many external networks * Update lockutils and fixture in openstack.common * Add test to port\_security to test with security\_groups * LBaaS: handle NotFound exceptions in update\_status callback * VMware NSX: Fix db integrity error on dhcp port operations * Use base.BaseTestCase in NVP config test * Remove plugin\_name\_v2 and extension\_manager in test\_config * Enables quota extension on BigSwitch plugin * Add security groups tables for ML2 plugin via migration * Rename nicira configuration elements to match new naming structure * Fix race in get\_network(s) in OVS plugin * Imported Translations from Transifex * Fix empty network deletion in db\_base\_plugin for postgresql * Remove unused imports * nicira: fix db integrity error during port deletion * Rename check\_nvp\_config utility tool * Remove redundant codes * Remove dupl. for get\_resources in adv. services * Start of new developer documentation * Fix NoSuchOptError in lbaas agent test * Corrects broken format strings in check\_i18n.py * [ML2] l2-pop MD handle multi create/delete ports * Dnsmasq uses all agent IPs as nameservers * Imported Translations from Transifex * BigSwitch: Fixes floating IP backend updates * neutron-rootwrap-xen-dom0 handles data from stdin * Remove FWaaS Noop driver as default and move to unit tests dir * Send DHCP notifications regardless of agent status * Mock looping\_call in metadata agent tests * Imported Translations from Transifex * Change default eswitchd port to avoid conflict * Midonet plugin: Fix source NAT * Add support for NSX/NVP Metadata services * Update the descriptions for the log cfg opts * Add VXLAN example to ovs\_neutron\_plugin.ini * Imported Translations from Transifex * ml2/type\_gre: Adds missing clear\_db to test\_type\_gre.py * ml2: gre, vxlan type driver can leak segment\_id * NVP: propagate net-gw update to backend * Imported Translations from Transifex * Nicira: Fix core\_plugin path and update default values in README * Include lswitch id in NSX plugin port mappings * Imported Translations from Transifex * Revert "move rpc\_setup to the last step of \_\_init\_\_" * extra\_dhcp\_opt add checks for empty strings * LBaaS: synchronize haproxy deploy/undeploy\_instance methods * NVP plugin: Do backend router delete out from db transaction * NVP plugin: Avoid timeouts if creating routers in parallel * Updates tox.ini to use new features * LBaaS: fix handling pending create/update members and health monitors * Add X-Tenant-ID to metadata request * Do not trigger agent notification if bindings do not change * fix --excluded of meter-label-rule-create is not working * move rpc\_setup to the last step of \_\_init\_\_ * Updated from global requirements * Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2 * Update common network type consts to same origin * Remove start index 0 in range() * LBaaS: unify haproxy-on-host plugin driver and agent * change variable name from plugin into agent * Imported Translations from Transifex * Add post-mortem debug option for tests * validate if the router has external gateway interface set * Remove root\_helper config from plugin ini * Fix a race condition in agents status update code * Add LeastRouters Scheduler to Neutron L3 Agent * Imported Translations from Transifex * Imported Translations from Transifex * Remove dead code \_arp\_spoofing\_rule() * Add fwaas\_driver.ini to setup.cfg * Switch to using spawn to properly treat errors during sync\_state * Fix a typo in log exception in the metering agent * Sync rpc fix from oslo-incubator * Do not concatenate localized strings * Imported Translations from Transifex * Removed erronus config file comment * Fix str2dict and dict2str's incorrect behavior * Improve unit test coverage for Cisco plugin common code * Change to improve dhcp-agent sync\_state * Fix downgrade in migration * Sync dhcp\_agent.ini with the codes * Imported Translations from Transifex * Handle failures on update\_dhcp\_port * Handle exceptions on create\_dhcp\_port * Imported Translations from Transifex * Add vpnaas and debug filters to setup.cfg * Fix misspells * Fix bad call in port\_update in linuxbridge agent * atomically setup ovs ports * Adds id in update\_floatingip API in PLUMgrid plugin driver * Sync Log Levels from OSLO * update error msg for invalid state to update vpn resources * Add missing quota flags in the config file sample * Imported Translations from Transifex * Fix unable to add allow all IPv4/6 security group rule * Add request timeout handling for Mellanox Neutron Agent * Revert "ML2 plugin should not delete ports on subnet deletion" * Improve OVS agent logging for profiling * l3\_agent: make process\_router more robust * Fixes missing method in Hyper-V Utils (Metering) * Fix metering iptables driver doesn't read root\_helper param * Updates .gitignore * Stop logging unnecessary warning on context create * Avoid loading policy when processing rpc requests * Improve unit test coverage for Cisco plugin base code * Pass in certain ICMPv6 types by default * Ensure NVP API connection port is always an integer * Mocking ryu plugin notifier in ryu plugin test * Rebind security groups only when they're updated * Fix format errors seen in rpc logging * Add test\_handle\_router\_snat\_rules\_add\_rules * Rebind allowed address pairs only if they changed * Enforce unique constraint on neutron pool members * Send only one agent notification on port update * Fix showing nonexistent NetworkGateway throws 500 instead of 404 * Imported Translations from Transifex * Update Zhenguo Niu's mailmap * Improve unit test coverage for Cisco plugin nexus code * Preserve floating ips when initializing l3 gateway interface * Fwaas can't run in operating system without namespace feature * Imported Translations from Transifex * metaplugin: use correct parameter to call neutron client * Replace stubout with fixtures * Imported Translations from Transifex * Imported Translations from Transifex * Mock the udevadm in the TunnelTestWithMTU test * Avoid dhcp agent race condition on subnet and network delete * Sync openstack.common.local from oslo * Imported Translations from Transifex * ML2 plugin should not delete ports on subnet deletion * Add state reporting to the metadata agent * Move MidonetInterfaceDriver and use mm-ctl * Do not add DHCP info to subnet if DHCP is disabled * Handle IPAddressGenerationFailure during get\_dhcp\_port * Add request-id to log messages * Imported Translations from Transifex * Enable polling minimization * Add configurable ovsdb monitor respawn interval * Ensure get\_pid\_to\_kill works with rootwrap script * Adds tests, fixes Radware LBaaS driver as a result * Optionally delete namespaces when they are no longer needed * Call \_destroy\_metadata\_proxy from \_destroy\_router\_namespaces * Added check on plugin.supported\_extension\_aliases * Cisco nexus plugin fails to untrunk vlan if other hosts using vlan * Catch PortNotFound exception during get\_dhcp\_port * Reduce the severity of dhcp related log traces * MidoNet: Added support for the admin\_state\_up flag * Fix OVS agent reclaims local VLAN * Replace mox in unit tests with mock * LBaaS: fix reported binary name of a loadbalancer agent * Apply six for metaclass * NVP plugin:fix connectivity to fip from internal nw * Imported Translations from Transifex * Add support for NSX/NVP DHCP services * Fix downgrade in migration * Imported Translations from Transifex * Add log statements for policy check failures * Lower severity of log trace for DB integrity error * Adds delete of a extra\_dhcp\_opt on a port * Round-robin SVI switch selection fails on Cisco Nexus plugin * Tune up report and downtime intervals for l2 agent * Fix DB integrity issues when using postgres * Move Loadbalancer Noop driver to the unit tests * Removes unused nvp plugin config param * Midonet to support port association at floating IP creation * Arista ML2 mech driver cleanup and integration with portbindings * Fix MeteringLabel model to not clear router's tenant id on deletion * Fix downgrade in migration * Fix sqlalchemy DateTime type usage * Linux device name can have '@' or ':' characters * Remove the warning for Scheduling Network * Do not run "ovs-ofctl add-flow" with an invalid in\_port * Replace a non-existing exception * Fix random unit-test failure for NVP advanced plugin * Updated from global requirements * Cleanup HACKING.rst * Remove confusing comment and code for LBaaS * Don't shadow str * ExtraRoute: fix \_get\_extra\_routes\_by\_router\_id() * remove repeated network type definition in cisco plugin * Refactor configuring of floating ips on a router * Remove database section from plugin.ini * Fix import log\_handler error with publish\_errors set * DHCP agent scheduler support for BigSwitch plugin * Fix segment range in N1KV test to remove overlap * Fix query error on dhcp release port for postgresql * sync log from oslo * Imported Translations from Transifex * Use correct device\_manager member in dhcp driver * LBaaS UT: use constants vs magic numbers for http error codes * Modified configuration group name to lowercase * Avoid dhcp agent race condition on subnet and network delete * Ensure OVS plugin is loaded in OVS plugin test * Remove deprecated fields in keystone auth middleware * Fix error while creating l2 gateway services in nvp * Fix update\_device\_up method of linuxbridge plugin * LBaaS: Fix incorrect pool status change * Imported Translations from Transifex * NVP: Correct NVP router port mac to match neutron * Updated from global requirements * Removing workflows from the Radware driver code * LBaaS: when returning VIP include session\_persistence even if None * Imported Translations from Transifex * change assertEquals to assertEqual * Fix TypeError: kill doesn't make sense * Update latest OSLO * Revert back to 'call' for agent reports * Imported Translations from Transifex * Imported Translations from Transifex * Fixing the syntax error in the XML Serializer * Raise VipExists exception in case Vip is created or updated for a pool that already has a Vip * Imported Translations from Transifex * NVP metadata access - create elevated context once * Fix race condition in dhcp agent * adding parameter to configure QueuePool in SQLAlchemy * Fix issues with db pooling * use the fact that empty sequences are false * Ensure that lockfile are defined in a common place * Imported Translations from Transifex * Fix typo in policy.json and checks in nicira plugin * Fix DB query returning ready devices in LoadBalancerCallbacks * Imported Translations from Transifex * Load all the necessary database tables when running cisco plugin * Fix haproxy cfg unit test * fix mis-placed paren in log statement for l3-scheduler * Imported Translations from Transifex * Add bulking support for Cisco plugin * Validate protocol when creating VIP * Allow tests in TestDhcpAgentEventHandler run independently * Add scheduling support for the Brocade plugin * Imported Translations from Transifex * Synchronize QuantumManager.get\_instance() method * Imported Translations from Transifex * Imported Translations from Transifex * Pin SQLAlchemy to 0.7.x * Improve test coverage for quantum wsgi module * Adds delete-orphan to database deletion * Imported Translations from Transifex * Do not disable propagate on root logger * NVP metadata access - create elevated context once * Registers root\_helper option for test\_iptables\_firewall * Resolves ryu plugin unittest errors * Set fake rpc implementation in test\_lb\_quantum\_agent * Ensure DB pooling code works with newer eventlet versions * Imported Translations from Transifex * Sync latest Oslo components for updated copyright * drop rfc.sh * Replace "OpenStack LLC" with "OpenStack Foundation" * sync Oslo Grizzly stable branch with Quantum * First havana commit * Ensure port get works when NVP mapping not stored in Quantum DB * remove references to netstack in setup.py * Imported Translations from Transifex * port\_security migration does not migrate data * Adds Grizzly migration revision * Switch to final 1.1.0 oslo.config release * Fix detection of deleted networks in DHCP agent * Add l3 db migration for plugins which did not support in folsom * Updates latest OSLO changes * Set fake rpc backend impl for TestLinuxBridgeAgent * Imported Translations from Transifex * Update oslo rpc libraries * Sets default MySql engine to InnoDB * Solve branch in migration path * Fixes Hyper-V agent issue with mixed network types * Imported Translations from Transifex * missing - in --config-file * Fix typo * Log the configuration options for metadata-proxy and agent * Imported Translations from Transifex * NVP plugin: return 409 if wrong router interface info on remove * Imported Translations from Transifex * Ensure metadata access network does not prevent router deletion * Filter out router ports without IPs when gathering router sync data * Do not delete subnets with IPs on router interfaces * Update to Quantum Client 2.2.0 * Add explicit egress rules to nvp security profile * Update tox.ini to support RHEL 6.x * Fix exception typo * Disable secgroup extension when Noop Firewall driver is used * Wrap quota controller with resource.Resource * Allow probe-create to specify device\_owner * Enable handling the report\_state RPC call in Brocade Plugin * Imported Translations from Transifex * Create quantum client for each api request in metadata agent * Lock tables for update on allocation/deletion * NVP plugin: configure metadata network only if overlapping IPs are enabled * Show default configuration Quotas * add ns-metadata-proxy rootwrap filters to dhcp.filters * isolated network metadata does not work with nvp plugin * Imported Translations from Transifex * Load quota resources dynamically * Notify creation or deletion of dhcp port for security group * fix mis-matched kwargs for a few calls to NvpPluginException * Populate default explicit allow rules for egress * Switch to oslo.config * Moved the configuration variables * Make run\_tests.sh pep8 conf match tox * Fix syntax error in credential.py and missing \_\_init\_\_.py * Imported Translations from Transifex * Add common test base class to hold common things * fix incorrect pathname * Prevent DoS through XML entity expansion * Delete DATABASE option checkup testcases * Fixes linuxbridge agent downs with tap device deletion timing issue * Rename source\_(group\_id/ip\_prefix) to remote\_(group\_id/ip\_prefix) * Imported Translations from Transifex * Setup device alias by device flavor information * L3 port delete prevention: do not raise if no IP on port * Pin pep8 to 1.3.3 * Avoid sending names longer than 40 character to NVP * move cisco-specific extensions to Cisco extensions directory * Add UT for LBaaS HAProxy driver * Include health monitors expected codes upper bound into HAProxy config * Allow DHCP and L3 agents to choose if they should report state * Imported Translations from Transifex * Enable HA proxy to work with fedora * Prevent exception with VIP deletion * Change the default l3\_agent\_manager to L3NATAgent * Imported Translations from Transifex * NEC plugin support for dhcp network and router scheduling * enable linuxbridge for agent scheduler * Move network schedule to first port creation * Imported Translations from Transifex * Host route to metadata server with Bigswitch/Floodlight Plugin * Incorrect argument in calling post\_json * fix update\_port to get tenant\_id from db rather than request * Ensure max length of iptables chain name w/o prefix is up to 11 chars * Cisco plugin support for creating ports without instances * mock quantum.agent.common.config.setup\_logging * Imported Translations from Transifex * Add initial testr support * Replace direct tempfile usage with a fixture * Set fake rpc implementation in metaplugin test configuration * Enabled add gateway to refrain from checking exit code * Add stats reporting to HAProxy namespace driver * Add session persistence support to LBaaS HAProxy driver * Remove deprecated assertEquals alias * LBaaS Agent Reference Implementation * Imported Translations from Transifex * create a Quantum port to reserve VIP address * NVP plugin support for dhcp network scheduling * Bump python-quantumclient version to 2.1.2 * Add scheduling feature basing on agent management extension * Remove compat cfg wrapper * NVP Router: Do no perfom SNAT on E-W traffic * Enable multiple L3 GW services on NVP plugin * Fix retrieval of shared networks * Imported Translations from Transifex * Remove network type validation from provider networks extension * Fix NVP plugin not notifying metadata access network to DHCP agent * Limit amount of fixed ips per port * Fetch all pages when listing NVP Nat Rules * Unpin PasteDeploy dependency version * Make sure all db accesses use subtransaction * Use testtools instead of unittest or unittest2 * Port update with existing ip\_address only causes exception * Enables packetfilter ext in NEC plugin based on its driver config * Set default api\_extensions\_path for NEC plugin * Fixes import reorder nits * Imported Translations from Transifex * Latest common updates * Limit chain name to 28 characters * Add midonet to setup.py * Add password secret to brocade plugin * Use db model hook to filter external network * Add default state\_path to quantum.conf * Imported Translations from Transifex * Imported Translations from Transifex * refactor LoadBalancerPluginDbTestCase setUp() * Imported Translations from Transifex * Remove external\_id and security group proxy code * Add pagination parameters for extension extraroute * Imported Translations from Transifex * Provide a default api\_extensions\_path for nvp\_plugin * AttributeError: No such RPC function 'report\_state' * Add pagination support for xml * Sync latest install\_venv\_common.py with olso * Imported Translations from Transifex * Add check-nvp-config utility * Close file descriptors when executing sub-processes * Add support Quantum Security Groups for Ryu plugin * Resolve branches in db migration scripts to G-3 release * Add Quantum support for NVP Layer-2 gateways * Implement MidoNet Quantum Plugin * Routing table configuration support on L3 * Correct permissions on quantum-hyperv-agent * Raising error if invalid attribute passed in * Support Port Binding Extension in BigSwitch plugin * Exit if DHCP agent interface\_driver is not defined * Supporting pagination in api v2.0 * Update latest OSLO files * Modify dhcp agent for agent management extension * Imported Translations from Transifex * Metadata support for NVP plugin * Add routed-service-insertion * plugin/nec: Make sure resources on OFC is globally unique * Fix SG interface to reflect the reality * Add unit test for ryu-agent * Agent management extension * Need to pass port['port'] to \_get\_tenant\_id\_for\_create() * Improve error handling when nvp and quantum are out of sync * Decouple helper functions from L3NatDBTestCase * Imported Translations from Transifex * Add Migration for nvp-qos extension * Use oslo-config-2013.1b3 * Shorten the DHCP default resync\_interval * Add nvp qos extension * Imported Translations from Transifex * Unable to update port as non-admin nvp plugin * Update nvplib to use HTTP constants * Rename admin\_status\_up to admin\_state\_up * Fixed the typo of loadbalancer test case * Allow nicira plugin to handle multiple NVP API versions * Imported Translations from Transifex * L3 API support for BigSwitch-FloodLight Plugin * Add an update option to run\_tests.sh * Avoid extra query when overlapping IPs are disabled * Allow tests from test\_dhcp\_agent run independently * Imported Translations from Transifex * Mark password config options with secret * Adds Brocade Plugin implementation * Add support for extended attributes for extension resources * Imported Translations from Transifex * Support iptables-based security group in NEC plugin * Persist updated expiration time * Support advanced validation of dictionaries in the API * Synchronize code from oslo * Add check for subnet update with conflict gateway and allocation\_pools * Alembic migration script for Loadbalancing service * Fix NVP L3 gateway ports admin\_state\_down on creation * Remove cfg option default value and check if missing * Remove duplicated option state\_path from netns cleanup * only destroy single namespace if router\_id is set * Use AssertEqual instead of AssertTrue * Imported Translations from Transifex * Move auth\_token configurations to quantum.conf * L3 API support for nicira plugin * Unused methods in quantum.wsgi clean up * Add firewall\_driver option to linuxbridge\_conf.ini * Adds API parameters to quantum.api.extension.ResourceExtension * fix grammar in NetworkInUse exception * Imported Translations from Transifex * PLUMgrid quantum plugin * Implements quantum security groups support on OVS plugin * Sync latest cfg from oslo-incubator * Improvements to API validation logic * Imported Translations from Transifex * add non-routed subnet metadata support * Imported Translations from Transifex * Enable OVS and NETNS utilities to perform logging * Add unit tests for Open vSwitch Quantum plugin * Add NVP Security group support * Fix import error in ryu-agent * Imported Translations from Transifex * Bad translation from network types to nvp transport types * Update .coveragerc * Register root\_helper in test\_debug\_commands and test\_dhcp\_agent * Adds xml support for quantum v2 API * Allow tools/install\_venv\_common.py to be run from within the source directory * Cisco plugin cleanup follow up commit * Be smarter when figuring out broadcast address * Use policy\_file parameter in quantum.policy * Imported Translations from Transifex * Define root\_helper variable under the [AGENT] section * Fixes rest of "not in" usage * Updated to latest oslo-version code * Imported Translations from Transifex * Imported Translations from Transifex * Imported Translations from Transifex * Resetting session persisnence for a VIP * Improve data access method of ryu-agent * Fixes 'not in' operator usage * Imported Translations from Transifex * Adds support of TCP protocol for LBaaS VIPs * Sync latest cfg from oslo-incubator * Remove redunant key list generation in Cisco plugin * Fixes if statement inefficiency in quantum.agent.linux.interface * Imported Translations from Transifex * Postgresql ENUM type requires a name exceptions NVP Plugin * correct spelling of Notify in classname * Disable dhcp\_domain distribution when dhcp\_domain is empty * Make protocol and ethertype case insensitive for security groups * Fix branch in db migration scripts * Finish adding help strings to all config options in Quantum code * Add NVP port security implementation * Imported Translations from Transifex * Set default lock\_path in state\_path * Use install\_venv\_common.py from oslo * Make get\_security\_groups() return security group rules * Fix OVSQuantumAgent.port\_update if not admin\_state\_up * Clean up test\_extensions.py imports * Fixes import order errors * OVS cleanup utility removes veth pairs * Revert "Reqd. core\_plugin for plugin agents & show cfg opts loaded." * Reqd. core\_plugin for plugin agents & show cfg opts loaded * Ensure that correct root helper is used * Fix InvalidContentType can't be raised because of error in constructor * OVS: update status according to admin\_state\_up * Cisco plugin cleanup * Improving code reuse with loadbalancer entity deletion * Fix database reconnection * Fixes per tenant quota doesn't work * Adds port security api extension and base class * LinuxBridge: set port status as 'DOWN' on creation * LinuxBridge: update status according to admin\_state\_up * Use babel to generate translation file * LBaaS plugin returns unnecessary information for PING and TCP health monitors * Fix all extension contract classes inherit from extensions.ExtensionDescriptor * get\_security\_group() now returns rules * set allocation\_pool\_id nullable=False * make IPv6 unit test work on systems with eth0 * Support Port Binding Extension in NEC plugin * Enable NEC OpenFlow plugin to use per-tenant quota * Enhance wsgi to listen on ipv6 address * Fix i18n messages * Update Oslo rpc * Enforces generic sqlalchemy types in migrations * Remove redudant code * Removes redundant code in quantum.api.api\_common * Fix i18n messages in quantum.api.api\_common * Completes unittest coverage of quantum.api.api\_common * Enable test\_agent\_ovs\_cleanup to be run alone * Fix i18n messages for cisco plugin * Provide atomic database access for ports in linuxbridge plugin * Add help strings to config file options in Quantum code * Document that code is on github now in README * Config lockutils to use a temp path for tests * Fix downgrade revision to make db migration linear * Send notification on router interface create/delete * More unittests for quantum.api.v2.base * Fixes inefficiency in quantum.api.v2.base.\_filters * Refactor hyperv plugin and agent * Update Oslo rpc module * Provide atomic database access nvp plugin * \_validate\_security\_groups\_on\_port was not validating external\_ids * Update WebOb version to >=1.2 * Ensure that agents also set control\_exchange * Add a common test case for Port Binding Extension * Fix line endings from CRLF to LF * Fixes import order nits * Fix ATTR\_NOT\_SPECIFIED comparison errors * Add migration for network bindings in NVP plugin * NEC OpenFlow plugin supports L3 agent RPC * Update latest OSLO * Catch up RPC context fixes on NEC OpenFlow plugin * ensure all enums in loadbalancer models have names * Adding multi switch support to the Cisco Nexus plugin * Name the securitygrouprules.direction enum * Adds support for deploying Quantum on Windows * Adds a Hyper-V Quantum plugin * Add exception validation for subnet used * Remove accessing cfg.CONF.DATABASE in nec-agent * Inform a client if Quantum provides port filtering feature * Remove unsused imports in the plugins package * DHCP agent unable to access port when restarting * Remove unused imports in unit tests * Use default\_notification\_level when notification * Latest OSLO updates * NvpPluginException mixes err\_msg and err\_desc * Fixes i18n messages in nvp plugin * Optimize if/else logic in quantum.api.v2.base.prepare\_request\_body() * Fixes quantum.api.v2.base.\_filters to be more intuitive * Fix for loadbalancer vips list * rename port attribute variable to SECURITYGROUPS from SECURITYGROUP * Remove relative imports from NVP plugin * Port to argparse based cfg * Fix database configuration of ryu-agent * Pass X-Forwarded-For header to Nova * The change implemented Lbaas CRUD Sqlalchemy operations * Iptables security group implementation for LinuxBridge * Update the migration template's default kwargs * add migration support for lb security groups * Fix import for quantum-db-manage * Allow nvp\_api to load balance requests * API extension and DB support for service types * Add migration support to Quantum * Remove some unused imports * Undo change to require WebOb 1.2.3, instead, require only >=1.0.8 * Add common support for database configuration * Fixup import syntax error in unit test * Enable the user to enforce validity of the gateway IP * Add comment to indicate bridge names' length * refactor QuotaV2 import to match to other exts * change xxx\_metadata\_agent() into xxx\_metadata\_proxy() * Fix the replacement placeholder in string * Ensure that exception prints UUID and not pointer * .gitignore cleanup * Fixes i18n message for nec plugin * Fixes i18n message for ryu plugin * Remove unused imports in debug package * sql\_dbpool\_enabled not passed to configured\_db nvp\_plugin * Enable tenants to set non-owned ext network as router gateway * Upgrade WebOb to 1.2.3 * Logging module cleanup * Remove unused imports in common package * Remove unused imports in rootwrap package * Remove unused imports in db package * Remove unused imports in api package * Provider network implementation for NVP plugin * Remove unused imports in agent package * Set default core\_plugin to None * Ensure that exception prints correct text * Cleans up bulk\_body generation in quantum.api.v2.base.prepare\_request\_body() * Exceptions cleanup * Readjust try/catch block in quantum.api.v2.base.create() * Ensures that the dnsmasq configuration file flag is always set * Ensure allocation pools are deleted from database * Raise InvalidInput directly instead of catch it * Ensure bulk creations have quota validations * Correct exception output for subnet deletion when port is used * Update the configuration help for the OVS cleanup utility * Implementing string representation for model classes * Provide "atomic" database access for networks * Add OVS cleanup utility * Removes redundant code in quantum.api.v2.base.create() * Add eventlet db\_pool use for mysql * Clean up executable modules * Fixes import order nits * Fix log message for unreferenced variable * The patch introduces an API extension for LBaaS service * Fix pep8 issues * Add tox artifacts to .gitignore * Correct i18n messages for bigswitch plugin * dhcp\_agent.ini, l3\_agent.ini: update dhcp/l3\_agent.ini * Make patch-tun and patch-int configurable * Update test\_router\_list to validate the router returned * Fixed the security group port binding should be automatically deleted when delete\_port * Add restproxy.ini to config\_path in setup.py * Replaces assertEquals to assertEqual * Completes coverage of quantum.api.v2.resource * Fixed the unit tests using SQLite do not check foreign keys * dhcp.filters needs ovs\_vsctl permission * Correct i18n message for nicira plugin * Correct i18n message for metaplugin * add parent/sub-resource support into Quantum API framework * plugins/ryu: l3 agent rpc for Ryu plugin is broken * pluins/ryu: Fixes context exception in Ryu plugin * DRY for network() and subnet() in test\_db\_plugin.py * Adds validity checks for ethertype and protocol * Add script for checking i18n message * Update evenlet monkey patch flags * Remove unnecessary port deletion * Support to reset dnsname\_servers and host\_routes to empty * Prevent unnecessary database read by l3 agent * Correct i18n message for linuxbridge plugin * Add router testcases that missing in L3NatDBTestCase * Releasing resources of context manager functions if exceptions occur * Drop duplicated port\_id check in remove\_router\_interface() * Returns more appropriate error when address pool is exhausted * Add VIF binding extensions * Sort router testcases as group for L3NatDBTestCase * Refactor resources listing testcase for test\_db\_plugin.py * l3 agent rpc * Fix rootwrap cfg for src installed metadata proxy * Add metadata\_agent.ini to config\_path in setup.py * add state\_path sample back to l3\_agent.ini file * plugin/ryu: make live-migration work with Ryu plugin * Remove \_\_init\_\_.py from bin/ and tools/ * Removes unused code in quantum.common * Fixes import order nits * update state\_path default to be the same value * Use /usr/bin/ for the metadata proxy in l3.filters * prevent deletion of router interface if it is needed by a floating ip * Completes coverage of quantum.agent.linux.utils * Fixes Rpc related exception in NVP plugin * make the DHCP agent use a unique queue name * Fixes Context exception in BigSwitch/FloodLight Plugin * fix remap of floating-ip within l3-agent polling interval * Completes coverage of quantum.agent.rpc.py * Completes coverage of quantum.agent.netns\_cleanup.py * add metadata proxy support for Quantum Networks * Make signing dir a subdir in /var/lib/quantum * Use openstack.common.logging in NEC OpenFlow plugin * Correct i18n message for api and db module * Fixes update router gateway successful with existed floatingip association * Fixes order of route entries * fix so cisco plugin db model to not override count methods * Use auth\_token middleware in keystoneclient * Fixes pep8 nit * Make sure we can update when there is no gateway port linked to it * Fix syntax error in nvplib * Removes quantum.tests.test\_api\_v2.\_uuid() * Add filters for quantum-debug * Removing unnecessary setUp()/tearDown() in SecurityGroupsTestCase * Fix exception when security group rule already exists * Don't force run\_tests.sh pep8 only to use -N * Correct i18n message for ovs plugin * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Correct i18n message * Removes \_validate\_boolean() * Removes quantum.common.utils.str\_uuid() * Refactors quantum.api.v2.attributes.py * Updates tearDown() to release instance objects * pass static to argv to quantum-debug config parser * Improve openvswitch and linuxbridge agents' parsing of mappings * Move extension.py into quantum/api * Ensure that the expiration time for leased IP is updated correctly * Fix context problem * bug 1057844: improve floating-ip association checks * fix broken logic of only using hasattr to check for get\_x\_counts * Prevent router being deleted if it is used by a floating IP * Updates clear\_db() to unregister models and close session * The change allows loading several service plugins along with core plugin * fix incorrect kwarg param name for region with l3-agent * All egress traffic allowed by default should be implied * Fix unitest test\_router\_list with wrong fake return value * Delete floating port and floatingip in the same transaction * Completes unittest coverage of quantum.api.v2.attributes.py * Use DB count to get resource counts * plugin/ryu, linux/interface: remove ryu specific interface driver * Allow NVP plugin to use per-tenant quota extension * Revert "Put gw\_port into router dict result." * Ensure that deleted gateway IP address is recycled correctly * Ensure that fixed port IP address is in valid allocation range * RESTProxy Plugin for Floodlight and BigSwitch * Ensure that mac address is set to namespace side veth end * plugin/ryu: update for ryu update * plugin/ryu: add tunnel support * Adds tests for attribute.\_validate\_uuid * Adds tests to attribute.convert\_to\_int * Adds tests for attributes.is\_attr\_set * Adds test scripts for \_validate\_string * Adds test scripts for \_validate\_range * Part of the patch set that enables VM's to use libvirts bridge type * Remove qpid configuration variables no longer supported * Removing unsed code for Cisco Quantum Plugin V1 * Add QUANTUM\_ prefix for env used by quantum-debug * Make tox.ini run pep8 checks on bin * Explicitly include versioninfo in tarball * Adds test scripts for \_validate\_values * Clean up quantum.api.v2.validators * Add indication when quantum server started * Import lockutils and fileutils from openstack-common * Update latest openstack-common code * Clean up executable modules * Remove nova code from Quantum Cisco Plugin * Use isinstance for \_validate\_boolean * Fixes convert\_to\_boolean logic * Updated openstack-common setup and version code * Validate L3 inputs * Treat case when pid is None * Fix openssl zombies * Ensure that the anyjson version is correct * Add eventlet\_backdoor and threadgroup from openstack-common * Add loopingcall from openstack-common * Added service from openstack-common * Sync latest notifier changes from openstack-common * Update KillFilter to handle 'deleted' exe's * Pep8 fixes for quantum master * Use \_validate\_uuid in quantum.plugins.nec.extensions.packetfilter.py * Use is\_uuid\_like in quantum.extensions.securitygroup.py * Removes regex validation of UUIDs in dhcp\_agent * Use uuidutils.is\_uuid\_like in quantum.extentions.l3 * Implements \_validate\_uuid * Use uuidutils for uuid validation * Drop lxml dependency * Testcase of listing collection shouldn't depend on default order of db query * Add uuidutils module * Log loaded extension messages as INFO not WARNING * db\_base\_plugin\_v2.QuantumDbPluginV2.create\_port clean-up * Clean-up comments in quantum/db/l3\_db.py * Import order clean-up * let metaplugin work with plugin which has not l3 extension support * Ensure that HTTP 400 codes are returned for invalid input * Use openstack common log to do logging * Put gw\_port into router dict result * Add check for cidr overrapping for adding external gateway * Fix unnecessary logging messages during tests * support 'send\_arp\_for\_ha' option in l3\_agent * pin sqlalchemy to 0.7 * Remove unused metaplugin agents * Get subnets of router interfaces with an elevated context * Support external network in probe-create * remove unused modules for linuxbridge/ovs plugin agent * Chmod agent/linux/iptables\_manager.py * Quantum Security Groups API * Make create\_floatingip support transaction * Update policies * Notify about router and floating IP usages * Fix exception when port status is updated with linux bridge plugin * Call iptables without absolute path * Delete the child object via setting the parent's attribute to None * Add unit tests for the ovs quantum agent * Add MTU support to Linux bridge * Correct Intended Audience * Add OpenStack trove classifier for PyPI * use object directly instead of the foreigh key to update master db object * Remove database access from agents * Fix database clear when table does not exist * IP subnet validation fixes * Update default base database to be V2 * Update common * add test for create subnet with default gateway and conflict allocation pool * Logging indicates when service starts and terminates * Ensures port is not created when database exception occurs * Improve unit test times * Add control\_exchange option to common/config.py * Treat invalid namespace call * get\_network in nvp plugin didn't return subnet information * tests/unit/ryu/test\_ryu\_db: db failure * correct nvplib to update device\_id * Update rpc and notifier libs from openstack.common * Add quantum-usage-audit * Fix filters default value in get\_networks * l3\_nat\_agent was renamed to l3\_agent and this was missed * Update vif driver of Ryu plugin * Support for several HA RabbitMQ servers * Correct the error message in the Class NoNetworkAvailable * Fix flag name for l3 agent external network id * clean notification options in quantum.conf * Add log setting options into quantum.conf * Warn about use of overlapping ips in config file * Do global CIDR check if overlapping IPs disabled * Fix rootwrap filter for dnsmasq when no namespace is used * Add common popen support to the cisco plugin * Use sqlite db on file for unit tests * Uses a common subprocess popen function * remove default value of local\_ip in OVS agent * Remove a function that is not used * all rootwrap filter for 'route', used by l3-agent * l3-agent: move check if ext-net bridge exists within daemon loop * Add catch-call try/catch within rpc\_loop in ovs plugin agent * Fix OVS and LB plugins' VLAN allocation table synchronization * ZMQ fixes for Quantum from openstack-common * Restore SIGPIPE default action for subprocesses * Fix for flat network creation in Cisco plugin * Removes test desription that is no longer valid * Modified code Pyflakes warning * Fix deadlock of Metaplugin * remove unittest section for nec plugin README file * remove unittest section for ryu plugin README file * Fix for DB error in the Cisco plugin * modify the wrong phy\_brs into phys\_brs * NVP plugin missing dhcp rpc callbacks * make README point to real v2 API spec * README file changes for Cisco plugin * fix for nested rootwrap checks with 'ip netns exec' * always push down metadata rules for router, not just if gateway exists * Removed eval of unchecked strings * Update NVP plugin to Quantum v2 * ovs-lib: make db\_get\_map return empty dict on error * Update l3-agent.ini with missing configuration flags * Sync a change to rpc from openstack-common * Fix for failing network operations in Cisco plugin * add missing files from setup.py * Add quantum-nec-agent to bin directory * remove not need shebang line in quantum debug * make rootwrap filters path consistent with other openstack project * Bump version to 2013.1, open Grizzly * Fix lack of L3 support of NEC OpenFlow plugin * Add a new interface driver OVSVethInterfaceDriver * Ensure that l3 agent does not crash on restart * make subnets attribute of a network read-only * Exclude openstack-common from pep8 test * Ensures that the Linux Bridge Plugin runs with L3 agent * Remove an external port when an error occurs during FIP creation * Remove the exeception handler since it makes no sense * Add enable\_tunneling openvswitch configuration variable * Create .mailmap file * Update default policy for add/remove router interface to admin\_or\_owner * Add periodic check resync check to DHCP agent * Update metaplugin with l3 extension update * Add DHCP RPC API support to NEC OpenFlow plugin * Remove an external interface when router-gateway is removed * openvswitch plugin does not remove inbound unicast flow in br-tun * Remove default name for DHCP port * Added policy checks for add interface and remove interface * allow multiple l3-agents to run, each with one external gateway net * Prevent floating-ip and ex-gateway ports should prevent net deletion * fix generation of exception for mismatched floating ip tenant-ids * Give better error to client on server 500 error * Change 422 error to 400 error * Add IP version check for IP address fields * Policies for external networks * Add IP commands to rootwrap fileter for OVS agent * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Modified code Pyflakes warning * Fix broken L3 support of Ryu plugin * check subnet overlapping when adding interface to router * add local network type and use by default for tenant networks * Fix data passed to policy engine on update * remove incorrect mock assert\_called in unit tests * Fix dhcp agent rpc exception handling * Add missing include for logging when log\_config is used * Modified code Pyflakes warning * Modified code pyflakes warning * Improve error message when flat network already exists * Lower webob dep from v1.2.0 to v1.0.8 * Allocation pool creation should check if gateway is in subnet * Make sure floating IPs + gateways must be on external nets * restart dnsmasq when subnet cidr set changes * supress dhcp router opt for subnets with null gw * add rootwrap filters to wrap ip netns exec * Implements agent for Quantum Networking testing * Quantum dhcp crashes if no networks exist * Update with latest code from openstack-common (stable/folsom) * Fixes undefined variable 'network\_type' in OVS agent * Create utility to clean-up netns * Fix lack of L3 support of Ryu plugin * Ensure that port update set correct tag in OVS * ovs\_lib unable to parse return when port == -1 * L3: make use of namespaces by agent configurable * Fix error in rule for metadata server dnat * Fix programming error of ryu-plugin * Ensure network delete is handled by OVS agent * Implement L3 support in Metaplugin * Fixes agent problem with RPC * netns commands should always run in the root ns * Add lease expiration management to ip recycling * misc L3 fixes * expose openvswitch GRE tunnel\_id via provider API * Do not transfer ips if there isn't any * prevent invalid deletion of ports using by L3 devices * Modified code PEP8 warning * Implementation of 2nd phase of provider extension for openswitch * Mangle network namespace name used by dhcp\_agent * Update rootwrap; track changes in nova/cinder * remove policy check for host\_routes in update\_port * Ensure proper validation for l3 API attributes * Cisco nexus sub-plugin update\_network fix * Fix dhcp option distribution by dnsmasq * fix bug where network owned resources block delete * Plugin aware extensions should also be reset at each test setup * Ensure network connectivity for linuxbridge flat network * Execute unit tests for Cisco plugin with Quantum tests * prevent OVS + LB plugins from clearing device\_id and device\_owner * updated outdated comments in base v2 plugin class * clear db.\_ENGINE for each plugin init in Metaplugin * Enable tox to run OVS plugin unit tests * Allow tox to run plugin specific unit tests * fixes cisco nexus plugin delete network issue * Fix Metainterface driver with namespace * Add lease expiration script support for dnsmasq * Remove 'verbose' API capability * PEP8 issues fixed * removed some unused global variable * Update TESTING file * Typo fix in quantum: existant => existent * Add DHCP RPC API support to Ryu plugin * Run core unit tests for each plugin * OVS plugin tunnel bridges never learn * Add nosehtmloutput as a test dependency * fix typo in OVS plugin from recent bugfix * enable router deletion logic in l3-agent * Enable users to list subnets on shared networks * Fix IP allocation on shared networks ports * Move metaplugin test for common test directory * Enable DHCP agent to work with plugin when L2 agents use DB polling * fix associating a floating IP during floating IP creation * Ensure that LB agent does not terminate if interface already exists in bridge * Treat exceptions when invoking ovs-vsctl * Remove v1.0 and v1.1 API from version info * Get OVS port details from port ID * Fix undefined variables * Fixing unit test failures in Cisco plugin * fix netns delete so that it works when a ns is set * Linuxbridge support for L3 agent * Fix exception message for bulk create failure * quantum l3 + floating IP support * Add missing conversion specifiers in exception messages * Use a common constant for the port/network 'status' value * Remove unused variable * Log message missing parameter causes exception * Update README for v2 API * Fix flavor extension based on new attribute extension spec * Update the Nicira NVP plugin to support the v2 Quantum API * Enhancements to Cisco v2 meta-plugin * Add model support for DHCP lease expiration * Trivial openvswitch plugin cleanup * Convert DHCP from polling to RPC * Add quota per-tenant * Reset device owner when port on agent is down * Allow extra config files in unit tests * Fix visual indentation for PEP8 conformance * Updates pip requirements * NEC OpenFlow plugin support * Enables Cisco NXOS to configure multiple ports Implements blueprint cisco-nxos-enables-multiple-ports * Implementation of second phase of provider extension * deal with parent\_id not in target * remove old gflags config code * convert query string according to attr map * Add device\_owner attribute to port * implementation for bug 1008180 * Fix bulk create operations and make them atomic * Make sure that there's a way of creating a subnet without a gateway * Update latest openstack files * improve test\_db\_plugin so it can be leveraged by extension tests * Adds the 'public network' concept to Quantum * RPC support for OVS Plugin and Agent * Initial implemention of MetaPlugin * Make dhcp agent configurable for namespace * Linux Agent improvements for L3 * In some cases device check causes an exception * normalize the json output of show a given extension * move the correct veth into the netns for the LB * linux bridge fixes following v1 code removal * fixes typo in ensure\_namespace * Remove v1 code from quantum-server * Add netns to support overlapping address ranges * dhcp-agent: Ryu plugin support for dhcp agent * fix missing deallocation of gateway ip * RPC support for Linux Bridge Plugin and Agent * Implementation of bp per-net-dhcp-enable * Enhance Base MAC validation * Use function registration for policy checks * Exempt openstack-common from pep8 check * Make 4th octet of mac\_range configurable * Replace openvswitch plugin's VlanMap with vlan\_ids DB table * Remove unused properties * Notification for network/subnet/port create/delete/update. blueprint quantum-notifications * Make the plugin for test\_db\_plugin configurable * update DHCP agent to work with linuxbridge plug-in * ryu/plugin, agent: unbreak 610017c460b85e1b7d11327d050972bb03fcc0c3 * Add classmethod decorator to class methods of providervlan ext * Only delete VLAN information after Quantum network is deleted * Make quantum pipeline configurable from quantum.conf * ovs\_quantum\_plugin should use reconnect\_interval in common conf * add name into port and subnet * Update openvswitch tunnel unittest * Enable agents and plugins to use the same configuration file * Fix linuxbridge agent tests * Update openstack-common files * Initial V2 implementation of provider extension * Implements data-driven views and extended attributes * Add v2 API support for the Cisco plugin Blueprint cisco-plugin-v2-api-support * Enhance V2 validations to work better for integers and booleans * Refactor the test cases so that all the test cases are under one test class * Add quota features into quantum. Blueprint quantum-api-quotas * Assume that subclass validates value of UUID * fix bug lp:1025526,update iniparser.py to accept empty value * Ensures policy file is reloaded only if updated * Provide way to specify id in models\_v2 * Add validity checks to Quantum v2 resources * Avoid removal of attributes used by policy engine * Raise proper exception if policy file do not exist * Introduce files from openstack common * Ensures API v2 router does not load plugin twice * ovs-agent exception non-existent ports * Ryu plugin support for v2 Quantum API * Add option sql\_max\_retries for database connection * Enable quantum agents to work with global cfg.CONF * Create DHCP agent tap device from port ID * Fix some syntax errors * fix bug lp:1019230,update rpc from openstack-common * Fix v2 API policy checks when keystone is in use * implement dhcp agent for quantum * Corrects imported modules in Cisco and Ryu according to latest nova packages * Validate that network\_id in port/subnet POST belong to the same tenant * Verify CIDR overlaps among networks' subnets * Address problems with foreign keys with subnet and network deletion * Add 'allocation\_pools' to Quantum v2 API subnets * Delete IP allocation range for subnet when deleting subnet * Fix linux bridge plugin to be consistent with naming rules * v2 support for the linux bridge plugin * OVS plugin support for v2 Quantum API * Check if interface exists in bridge prior to adding * Ensure that subnet\_id is on correct network * Use setuptools git plugin for file inclusion * Cisco's unplug\_iface refers to non existing exception * Implement IP address allocation * Enable user to configure base mac address * Bug #1012418 - quantum agent for OVS does not install properly on Xen XCP * Add simple file loggin to ovs\_quantum\_agent * Fixing pep8 warning messages Bug #1017805 * Network deletion and subnet creation bug fixes bug 1017395 * Remove paste configuration details to a seperate file. blueprint use-common-cfg * Bug 1015953 - linuxbridge\_quantum\_agent device\_exists() is buggy * Reorder imports by full module path * Added iptables\_manager ( based on openstack/linux\_net.py ) This module will be the base library to implement security groups and generic firewall. It is an independent iptables module, made to be easy to package if used by agents and also inside quantum * Unit test and Readme changes related to cisco plugin * Implements the blueprint use-common-cfg for the quantum service. More specifically uses global CONF for the quantum.conf file * Ensure unique mac address allocation. This is the first part of bug 1008029 * Add authZ through incorporation of policy checks * Fix additional pep8 issues on Jenkins bug 1014644 * removed "runthis" and other unused functions from utils.py * Linux bridge agents did not work with common linus utils bug 1014286 * Added vlan range management for OVS plugin * Bug #1013967 - Quantum is breaking on tests with pep 1.3 * Remove wrong base class for l2network\_models after v2.0 API * Cisco cli cannot find argument action\_prefix * Use openstack.common.exception * Remove unused functions in common/utils.py * API v2: mprove validation of post/put, rename few attributes * Bug #1000406 - Return value of shell commands is not checked by plugins * Fix python2.4 incompatibility * Add API v2 support * Binaries should report versions * Fix up test running to match jenkins expectation * Add build\_sphinx options * Remove unused imports * Quantum should use openstack.common.jsonutils * Remove hardcoded version for pep8 from tools/test-requires * AuthN support for Quantum * fix bug lp:1007557,remove unused functions in utils.py * Add common dir for shared agent code, add OVS lib * Bug #1007153 * Register enable\_tunneling as bool opt * Quantum should use openstack.common.importutils * PEP8 fixes * Bug #1002605 * Automatically determine Quantum version from source * Fix linux bridge section name Bug #1006684 * Remove the reference to non existing exception by linuxbridgeplugin * bug #1006281 * Parse linuxbridge plugins using openstack.common.cfg * Bug #1004584 * fix some pylint warnings * fix errors in database test cases * Log the exception so app loading issues can be debuged * remove unneeded import from OVS agent that break 2.4 compat * blueprint man-support and fix documentation build bug 995283 * Fix print error for linux bridge bindings bug 1001941 * Add HACKING.rst to tarball generation bug 1001220 * fall back to \`ip link\` when \`ip tuntap\` unavailable bug 989868 * Cisco plugin CLI call to quantumclient CLI * Calling Super method from QuantumPortAwareScheduler.\_\_init\_\_ * OVS plugin: add tunnel ips to central database * Include AUTHORS in release package * blueprint database-common bug 995438 * bug 996163 * Bug #994758 * Change Resource.\_\_call\_\_() to not leak internal errors * Let OVSQuantumTunnelAgent sync with database * Cleaned up log usage * blueprint agent-db-ha bug 985470 bug 985646 * Update codebase for HACKING compliance * Make sample quantum.conf compliant with docs * Make ovs Interface option set properly * Removed simplejson from pip-requires * Remove dependency on python-quantumclient * Add sphinx to the test build deps * Add HACKING.rst coding style doc * return 404 for invalid api version request * fix issue with OVS plugin VLAN allocation after a quantum-server restart * bug 963152: add a few missing files to sdist tarball * API docs: fix typo for network delete * Open Folsom * Bug #956559 VIF driver and scheduler for UCS plugin are broken since the flag configuration mechanism in nova is changed. Fixing that and also fixing some property names, along changes to how the quantum client code is invoked * plugin/ryu/agent: unbreak a06b316cb47369ef4a2c522f5240fa3f7f529135 * Fix path to python-quantumclient * Split out pip requires and aligned tox file * ryu/nova: catch up d1888a3359345acffd8d0845c137eefd88072112 * Add root\_helper to quantum agents * Fix missing files in sdist package [bug 954906] * Fix for bug 921743 Response codes for create ops in API v1.0 not compliant with spec * bug 954538 Fix for the cisco unit tests * check connection in Listener. refer to Bug #943031 * fixed incorrect duplicate title * Fixed incorrect title for example 3.10 * Downgraded required version of WebOb to 1.0.8 * Bug #949261 Removing nova drivers for Linux Bridge Plugin * Remove outdated content from OVS plugin README, point to website instead * add git commit date / sha1 to sphinx html docs * more files missing in sdist tarball * make sure pip-requires is included in setup.py sdist * Introducing the tenant owenrship checks in the Cisco plugin, changes are almost identical to those in Bug#942713 * Fix some plugins that don't check that nets + ports are owned by tenant * remove pep8 and strict lxml version from setup.py * plugin: introduce ryu plugin * bug 934459: pip no longer supports -E * Fix bug 940732 stack.sh can't match sql\_connection string * Return appropriate error for invalid-port state in create port API * blueprint quantum-ovs-tunnel-agent * Initial commit: nvp plugin * unittests: setup FLAGS.state\_path properly: bug 938637 * Cleanup the source distribution * Fix ovs config file location * blueprint quantum-linux-bridge-plugin * Remove quantum CLI console script * Bug 925372: remove deprecated webob attributes (and also specify stable webob version in pip-requires) * bug 923510: avoid querying all ports for non-detail GET Network call * Make tox config work * Pin versions to standard versions * bp/api-filters This changeset implements filters for core Quantum API and provides unit tests * Split out quantum.client and quantum.common * Quantum was missing depend on lxml * bp/api-error-codes Restructured API error codes for Quantum API v1.1 This changeset provides the following changes: - Only standard HTTP errors for Quantum API v1.1 - Customized fault response body formatting according to API version - Changes to unit tests to deal with version specific status codes * blueprint ovs-portstats * Add support for dealing with 501 errors (notimplemented) * Improved VlanMap * moving batch config out of quantum-server repo * bug 920299: remove duplicate + outdate README * Getting ready for the client split * Removed erroneous print from setup.py * Fixes setup scripts for quantum plugins * Base version.py on glance * fix mysql port in sql\_connection example.. * Make the quantum top-level a namespace package * Add \_\_init\_\_.py from plugin to be copied on setup scripts * Fix lp bug 897882 * PEP8 quantum cleanup * Install a good version of pip in the venv * Rename .quantum-venv to .venv * Updating Cisco README with instructions on installing the patched ncclient library * Remove plugin pip-requires * blueprint refactor-readme-to-manual * Bug #890028 * Implementation of the BP services-insertion-wrapper inside the Cisco Plugin * blueprint operational-status-ovs-plugin * bug 903580: remove invalid extensions path from quantum.conf * Fix for bug 902175 * Readme Fix * blueprint api-framework-essex * Fix for bug 900277 * Fix for bug 900316 * Modified the Readme for Unit Test Execution Instructions * Bug 900093 Remove unused function in db/api.py * bug #891246: Fix paths in agent Makefile * Second round of packaging changes * Bug 891705 Fix to change reference to the Quantum CLI from within the Cisco extensions' CLI module * Correcting the plugins classpath in the Quantum README * The relative path for the "ucs\_inventory.ini" file has been fixed * bug #891267 : for XS, grab iface-id from XAPI directly if needed * Changes to make pip-based tests work with jenkins * Fix for bug 890498 * Fix for bug 888811 * Fixing find\_config\_file after packaging changes * Added timeout flag to ovs-vsctl to avoid infinte waiting * Add quantum.exceptions path to configed ext paths * Fix for Bug #888820 - pip-requires file support for plugins * Fixing Cisco plugin after update\_\* change * Fix for bug 888207 * Fix for bug 877525 * Bug #875995: Quantum README fixes * Change version numbers to be compatible with debian packaging * Make the openvswitch plugin tests work again * Swich over to update\_{net,port} instead of rename\_net and set\_port\_state * Added try import to quantum-server and quantum-cli * Bug 887706 * Blueprint authentication-for-quantum * blueprint quantum-packaging * Moved the initialization of the blade state so that the interfaces which are configured outside of Quantum are also initialized in the blade state * fix minor double-serialization bug in client.py * bug #863635: remove vestigial cheetah import from bin/cli * Change the ovs plugin create\_\*() calls to take the kwargs param * Changing the log messages in order to be always identified by their sub-packages of origin, and they can even be filtered on that basis * Add .gitreview config file for gerrit * New tests are being adding to the Diablo code (Cisco L2-Network plugin), and some fixes in the case where the tests were failing * Add the ability to specify multiple extension directories * Add code-coverage support to run\_tests.sh (lp860160) * Change port/net create calls to take an additional kwargs param * ovs plugin: Remove reference to set\_external\_ids.sh * fix pep8 issues in Cisco plugin * Remove hack for figuring out the vif interface identifier (lp859864) 2011.3 ------ * Update openvswitch plugin README * Update openvswitch plugin README * Get output from run\_tests * Add rfc.sh to help with gerrit workflow * merge tyler's unit tests for cisco plugin changes lp845140 * merge salv's no-cheetah CLI branch lp 842190 * Addressing Dan's comment on output generator * merge sumit's branch for lp837752 * merge salv's branch for bug834013 * merge salv's branch for keystone token on client bug838006 * merge rohit's db test branch: lp838318 * merge salv fix for bug 841982, fix minor pep8 violation * merge salv fix for bug834008 * Changes to address Salvatore's review comments, removed unnecessary imports, and changed a debug message * changing key names to confirm to api specs * Merging latest from lp:quantum * Merging lo:~salvatore-orlando/quantum/quantum-api-auth * Implementing Dan's suggestion concerning fixing the bug in db api rather than FakePlugin * Fixing bad indent * syncing diverged branches * merging from lp:quantum * merging from lp:quantum * Updating CLI for not using Cheetah anymore. Now using a mechanism based on Python built-in templates * Fixing the bug in FakePlugin * made general exception handling messages consistent removed LOG pylint errors cleanup in tests * Create operation now generate response with status code 202 * restoring correct default pipeline * Mergin from lp:quantum * Add information about quantum dependency for nova * merge salv's branch to remove dummy plugin * Changing communication between UCSM driver to UCSM to HTTPS * Adding CLI usage examlpes to the README * Adding client-side support for Keystone integration * Keystone-integrated pipeline should not be default in quantum.conf * Removing class DUmmyDataPlugin * Removed redundant configuration, and added more comments in the configuration files * Updating the README file * Merging Shweta's test cases for mutliport resource * Adding Multinic tests * Typo fix in README * Merging Sumit's changes including fixes for multinic support, and CLI module for working with extensions * More fixes for multi-nic support * Fixed a bug with plug\_interface * Merging from Cisco branch * Changes to incorporate earlier review comments, also for multiport resource * adding quantum database unit test cases * Merging changes from Ying's branch (new mutliport resource) * add multiport and exception handling * add multiport resource * Merging from lp:quantum * Avoiding deserializing body multiple times with several parameters * merge cisco consolidated plugin changes * Test on param\_value changes as follows: * Merging lp:~salvatore-orlando/quantum/bug834449 * Merging Ying's changes (minor) * fix print statements in novatenant and portprofile * merge trunk * Minor refactoring * Changes to l2network\_plugin for create\_ports and pylint fixes to cli.py * Modified CLI to handle both core and extensions CLI * merge trunk * lp835216 client lib was not passing in kwargs when creating exceptions * lp834694 fix integrity error when deleting network with unattached ports. Add unit test * Minor fix in delete\_port * merging changes from cisco consolidated branch * Fixes to support multinic * Merging fixes from Sumit's branch for extension API version number and to UCS inventory to associated VIF-ID with ports * Merging from the Cisco branch * adding new api methods using just port\_id * Fixing the extensions URL to 1.0 and pep8 error * bug fixes to handle multinic * Merging Shweta's fix for extensions' test cases (clean up was not happening completely) * Adding Network and Port clean up functions for portprofile unit tests * Merging from lp:quantum * Merging Shweta's fixes in the tests for key names changes in the Core API * make CLI show\_port command display interface-id, add additional test case * merge salvatore's new cli code * Dictionary key values changes in test\_extension * Merging lp:quantum, resolving conflict * merge two pep8 branch * Merging Ying's pep8 fixes * fix pep8 issues * Merging quantum trunk * fix pep8 warnings * Updating common/extensions.py in order not to instantiate a QuantumManager when retrieving plugin * Cleaning pep8 * Merging lp:~danwent/quantum/lp834491 Fixing Bug #834491: api alignment merge broke ovs plugin (Critical) * Addressing comments from Dan * Merging from quantum * merge cisco extensions branch * lp834491: change plugin to work with API code after the API alignment merge * Merging Shweta's fixes to the test cases for the extensions * Added Extension & ucs driver test changes and fixes * Merging from Sumit's branch, changes to VIF-driver and Scheduler; extension action names have been changed in response to Salvatore's review comments in the extensions branch review * Syncing with Cisco extensions branch * Merging changes from Sumit's branch * Changes qos description to string; changes extension API names for get\_host and get\_instance\_port * Mergin Ying's branch * change get\_host and get\_instance\_port function name * Cleaning (removing) unused code..hooray ! fixes for extension tests * Sorting correctly all imports for the Nexus Driver and Unit Test * Fixed the Unit Test for Nexus Driver * add cisco\_faults under l2network package * move faults/exceptions to l2network package, remove unecessary faults definitions change the portprofile action api's method fix imports order and other comments issues * Merging from Sumit's branch, import ordering related changes * Changing the order of imports (to satisfy convention) * Merging the Cisco branch * Updating README according to Somik's comment * Finishing cli work Fixing bug with XML deserialization * Completing Unit Tests * Merging lp:~salvatore-orlando/quantum/quantum-api-alignment * Configuration of multiple VLANs on the same Nexus Switch Interfaces * Adding unit test for rename\_network * Added logging to syslog or file specified at command line removed plugin direct mode fixed unit tests to reflect changes in cli code fixex pep8 errors * Merging from Sumit's branch * Fixed some bugs with credential and qos resources; also fixed l2network\_single\_blade * Merging Rohit's changes * helper function to get creds based on name * integration with l2network\_plugin.py * fixing relative import in nexus\_db.py * putting in db support for creds and qos * merge latest quantum branch and resolve conflicts * Merging lp:~asomya/quantum/lp833163 Fix for Bug #833163: Pep8 violations in recent packaging changes that were merged into trunk (Critical) * Addressing Somik's comment * Templated output for CLI completed! * PEP8 fixes for setup.py * delete quantum/common/test\_lib.py to prepare for quantum merge * Made changes according to reviewer's comments. Add addtional information on extension test in README * Merging changes from Sumit's branch * Merging lp:~cisco-openstack/quantum/802dot1qbh-vifdriver-scheduler * Merging lp:~cisco-openstack/quantum/l2network-plugin-persistence * Fixed a bug in the initialization of the UCS inventory; fixed another bug in deleting a port * Noticed some pep8 errors, fixed them * Merging lp:quantum * Changes to incorporate reviwer's comments. Also changed client.py to handle extension URLs * Review Changes * remove unnecessary code and sync faults and exception handling * Code changed base on Reviews pep8 passed pylint 9.10 * merging with lp:quantum * merging from lp:quantum * Fixes based on review comments * Addressing comments from Ziad and Somik * merge lp:~bgh/quantum/lp837174 * Fix unit test printing (lp837174) * Fixing issue in view builders concerning attachment identifiers * Code clean up as per reviewr's request; documentation strings, unused code, etc * Rewording of the README file to clarify the use of the SSh port * clean up code and fix some comments * clean code and fix some comments * Merging from Sumit's latest branch - Fixed loading of Nexus DB tables; moved imports to l2nework\_db.py; Refactoring of code to generalize inventory handling (enhancement) * Fixed loading of Nexus DB tables; moved imports to l2nework\_db.py, changes discussed & approved by Rohit * Making Keystone version configurable * Accidentally took quantum.conf out of branch. Now back in * Merging lp:~raxnetworking/quantum/bug827272 * Merging branch: lp:~danwent/quantum/test-refactor * Removing "excess" file * Missed adding a file earlier, fixed a small issue * Refactoring of code to generalize inventory handling (enhancement) * Merging UCS inventory state initialization fix from Sumit's branch * Fixes an issue with loading the UCS inventory when a dynamic nic has been used outside of Quantum * Removed obsolete instructions from README * Changes to reflect the new features (mutli-blade, multi-chassis support) * Changes to support calls from VIF Driver and Scheduler * Pep8, pylint fixes * fixing pep8 error * adding helper function for port binding model * UCS inventore persistence and pep8/pylint fixes * UCS persistence fixes * added new columns to models for ucs plugin multi blade support updated methods in ucs\_db for newly added columns changed column dynamic\_vnic\_id in port binding table to blade\_intf\_dn updated tests to handle new column name * Merging rohit's UCS persistence support * UCS plugin persistence * Persistence support for UCS plugin network * adding utility functions to create dictionaries * Merging changes from Rohit's branch * Merging changes from cisco extensions * added ucs plugin related execptions in cisco\_exceptions.py added ucs plugin persistence related modules - ucs\_models.py and ucs\_db.py added ucs db related unit tests in test\_database.py fixed formatting in l2network\_models.py and test\_database.py * Adding some error checks * Reduced excessive logging * Several fixes to initial version * fixing the the test\_database.py tests * pylint and pep8 fixes * Change profile-id * merged Shweta's branch for ext test. Minor fix for review comments * Review Changes * merged Shweta's ext test branch * Initial commit with lots of changes * Moved the conf file uncer the cisco directory * Moved the conf file uncer the cisco directory * Updated conf file * Adding Entension API unt tests * Syncing with lp:quantum * Code refactored, made changes are per reviwer's suggestions * sync up with l2network exception handling for extension * merged Cisco branch's latest changes * Adding changes from Sumit's latest merge * merge with lp:~cisco-openstack/quantum/l2network-plugin-extensions * replace exception handler by using cisco\_exceptions * Raising exceptions in extension resources handling (where missing). Changing exception name to QosNotFound * Changing exception name to QosNotFound * Mergin from Cisco branch * Raising exceptions in extension resources handling (where missing) * Merging fixes to client side exception handling. Thanks lp:tylesmit ! * Merging fixes and changes batch-config script. Thanks lp:danwent ! * Adding the Nexus support to the Persistence Framwork Modification of the Nexus Unit Case to be running with Persistence Framework pep8 passed pylint 8.81/10 * added nexus exception in cisco\_exceptions.py added log to methods in l2network\_db.py added nexus\_db.py and nexus\_models.py - persistence modules for nexus plugin * add plugins.ini back * add all conf/\*.ini back * merge with ying's branch * merging with Ying's extension branch * remove ying's test ciscoplugin * remove all configuration files * remove cisco\_demo and test\_scripts directory, which were used by our local tests * Removed concatenation per review comments * change the configuration files to the default values * pylint and pep8 fix * merging with ~cisco-openstack/quantum/l2network-plugin-extensions * fix pylint issuses * Making keystone integration optional in quantum configuration * Merging bug fix for Bug 821733. Thanks lp:salvatore-orlando ! * Fixing typo * Making the client raise the appropriate exception if needed. Also increasing the pylint score to above 8 * pep8 error fixed for l2network\_db.py * Mering Sumit's branch with plugin support for Credentials, QoS, NovaTenant resources. Also merging latest from lp:~cisco-openstack/quantum/l2network-plugin-persistence * Merging from Sumit's branch, VIF-driver and Quantum-aware scheduler * Removed extra spaces to satisfy pep8 * VIF driver for 802.1qbh and Quantum aware scheduler * fix some pylint issues * Pylint and pep8 fixes * Changes to support credentials, qos, and novatenant extensions * Removing unused error response codes * Merging lp:~asomya/quantum/lp824145 Fix for Bug#824145 : Adding a setup script for quantum * merge trunk pep8 fixes adapting CLI to API v1.0 Fixing wsgi to avoid failure with extensions * Fixed indentation and changed file comments * add extension change to ying's branch * merge trunk * Pulling in changes from lp:quantum * Merging Cisco's contribution to Quantum. Thanks to various folks at Cisco Systems, Quantum will have plugins to integrate with Cisco UCS blade servers using 802.1Qbh, Cisco Nexus family of switches and the ability for Quantum plugin to have multiple switches/devices within a single Quantum plugin * Merging Shweta's change to fix a function call in the test code * Adding the changed UCS Driver function names in test\_ucs\_driver * Santhosh/Deepak | Fixed an issue where collection actions for PUT and DELETE methods in resource extension were routing to update and delete action of the resource * Merging from Sumit's branch pylint fixes and incorporating review comments * Changes to README file and merging Shweta's changes * Mergin Shweta's test changes, also README file * Changes to test structure. Adding pylint correctons * Fixes to the README file per earlier review comments. Also removed main from one of the modules * Mergin from cisco brach * Merging from lp:quantum * Pulling changes from Cisco branch * Pylint fixes * exit unit tests if tests are invoked specifying a particular test * Merging Nexus pylint changes and other enhancements from Edgar * pep8 passed pylint 8.83 * Merging Rohit's changes * Partial commit * Moved test\_database.py to plugins/cisco/tests/unit/ Edited test\_database.py to be able to run like other tests pylint for cisco/db folder - 8.85/10 pylint for cisco/tests/unit/test\_database.py - 8.42/10 pep8 done * Adding a new file with all the XML snippets to make code easier to read Moving the Nexus SSH server port to the configuration file Removing main functions Making some changes based on Dan and Salvatore reviews * Changes in the README file to incorporate Somik's comments * pylint changes - pylint score for cisco/db folder - 8.27/10 pep8 checks done * Removing extra testing function on Nexus Driver * Merging plugin and tests' changes * Fixes to the tests which were breaking, including fixes to the test cases * Pulling in changes from Rohit's branch * Pulling in changes from Shweta's branch * Removed main from modules as per review comments * updated README file to include persistence framework setup instructions updated db api.py unset\_attachment method to return port moved db\_conn.ini into cisco/conf/ with other configuration files updated l2network\_plugin\_configuration.py to get db config cleaned up l2network\_db.py - removed config parser code as using cisco config parser updated l2network\_db.py to raise specific exceptions in error cases updated create\_vlanid method in l2network\_db.py to not raise exception if vlan rows exist updated portprofile and portprofile\_binding methods to include tenant\_id as an argument added cisco/db/test\_database.py containing unit tests for quantum and l2network\_plugin tables edited get\_pp\_binding method in l2network\_db.py to return empty list when no results found pep8 checks done * Adding Persistence unit test * Fixed bugs while testing * pep8 errors fixed * Merging rohit's changes * Changes to support persistence framework * Merging: lp:~danwent/quantum/client-lib * Merging: lp:~tylesmit/quantum/api-client-fix-serialization Adding automattic serialization to all requests by moving it to do\_request * First, trivial, implementation of authN+authZ * fixes from rohit's branch * from rohit's branch * Adding more templates More tests * - Added new tables VlanID to generate ids and maintain usage of vlans - Added wrapper functions to get next unused vlan, populate vlans, release vlans, getall vlans, isused van and delete van - Added ported instead of networked for portprofile binding table - Changed wrapper methods and test cases for portprofile binding to use portid * Adding missing files to branch * Simplifying condition * FIxing missing 'output' variable @ line 243 (syntax error) * Adding automattic serialization to all requests by moving it to do\_request * added network and port models similar to quantum with following changes - - InnoDB as storage engine to allow foreign key constraints - joinedLoad operation on the queries to make use of relation between Network and Port Moved out the network and port code to make l2network contain vlanbinding, portprofile and portprofile bindings * Authentication with Keystone. auth\_token Middleware tweaked and imported in Quantum tree Developing Authorization middleware * Introducting cheetah Updating list\_nets in CLI Writing unit tests for list\_nets Stubbing out with FakeConnection now * I'm too tired * Stubout work in progress * Merging quantum extenions framework into trunk. Thanks rajaram vinkesh, deepak & santhosh for the great work! * - added network and port models into the l2network plugin instead of using quantum models - added api methods for network and ports - restructured code to use the l2network network and port - added l2network base class for other tables to inherit - added support for l2network plugin model objects to behave like dictionary (gets rid of code to convert objects into dictionaries) - added foreign key constraints to l2network plugin model attributes representing columns - added attributes to represent relation between models in l2network plugin - added joinedload only to network and port (need to to for others) - added InnoDB as the storage medium in base table for imposing foreign keys - updated l2network test cases to handle foreign key constraints * lp Bug#824145 : Adding a setup script for quantum * skeleton for cli unit tests * merge trunk * Removing exceptions as well (previously only API faults were removed) * Merged quantum trunk * adding renamed client-lib tests * Tiny change to the README file, instructions on how to get ncclient * - Adding setup script * Adding db connection and l2network plugin database modules * update CLI to use show instead of list for calls that do not return a list * rename client\_lib unit tests so it is run by ./run\_tests.sh, update tests to handle name changes * force batch\_config.py to use json, as XML has issues (see bug: 798262) * update batch\_config.py to use new client lib, hooray for deleting code * Changed to default plugin class name * Rajaram/Vinkesh | Added examples of scoping extension alias in request and action extension * Added tests directory to list of modules in the README file * Added "tests" directory to the list modules in the README file * Adding the required build for Nexus support * Merging changes addressing Bug # 802772. Thanks lp:danwent ! * Merging bugfix for Bug 822890 - Added License file for Quantum code distribution * Fixed typo in README * README file updates (pointer to Nova Cactus branch), and numerous other edits based on Mark's template * L2 Network Plugin Framework merge * Incorporated changes in response to review comments from Ram * Adding Apache Version 2.0 license file. This is the official license agreement under which Quantum code is available to the Open Source community * Making a check for the presence of UCS/Nexus plugin (earlier it was not in certain cases). With this change, if the UCS/Nexus plugins are not enabled, the core API tests can be run even on Ubuntu (and RHEL without the requirement of any specific network hardware) * Merging test cases from Shwetas' branch, and further modified README file * Merging the test framework from Shweta's branch * decluttering \_parse\_request\_params method for QuantumController * Fixing detail action for port collection Adding PortIsDown exception Adding unit tests for detail actions and PortIsDown PEP8 FIXES * Adding Unit Test Cases Now * Adding Cisco Unit Tests * minor enhancements to quantum client-lib * RHEL limitation updated * Adding support for expressing format through Content-Type header Adding action detail for port resource (Member & Collection) * Changes to enhance L2 network plugin framework * undo unintentional formatting change in run\_tests.sh * remove unneeded \_\_init\_\_ * refactoring testing code to support plugin tests * Added QuantunPluginBase as the base class for the l2network\_plugin * Generalized and put placeholders * another merge * pep8 cleanup, restore defaults * Added info about ssh conf required for nexus switch * merge * remove unneeded tests from ovs\_quantum\_plugin * Nexus plugin classpath was incorrect, fixed it * Edits to reflect conf changes, made it easier to follow * merge heckj's pip-requires fixes * Fixed issue with creating new port profiles (one configuration parameter got left out during the migration to the new configuration scheme). Also fixed a bug in the calculation of the profile id * Fixes the broken call to second level of plugins. Renaming will work now * updates to pip-requires for CI * Loading of device-specific plugins and drivers is done dynamically by setting configuration. All configuration is driven through configuration files place in the conf directory. Each .ini conf file contains info on the configuration. README file updated to reflect all the changes. Fixed issue with delete\_network deleting the network even when attachments were present. Fixed issue with port id generation * Deepak/Vinkesh | Fixed show action in extension controller to return 404, added example to include namespace in a request extension * Merged quantum trunk * Santhosh/Vinkesh | Added extension\_stubs file * Removing extra file in Nexus Driver * Removing extra file in Nexus Driver * Relabelling API version to 1.0! * Cosmetic changes to unit tests for client library. Pep8 fixes * Removed quantum/plugins/cisco/db/ and quantum/cisco\_extensions since these will be merged separately * Fixed pep8 error * Merging changes * Merging changes from lp:quantum * Fixed an issue selecting the right port interface and also properly switching off the Nexus Interface * Completing API spec alignment Unit tests aligned with changes in the API spec * Applying fix for bug #814518 Merging from lp:~salvatore-orlando/quantum/bug814518 * Adding controller and view builder for attachment resource * Merging the port profile client name fix * Earlier fix resulted in a different issue (profile client name, was also being used as profile name, hence breaking) * Truncated the port profile client name length to 16 characters (ucsm excepts max 17 chars) * Mergin fix for Bug 818321 * Merging approved OVS plugin configuration change branch. Thanks lp:danwent ! * Merging the brand new Quantum-client-library feature * Requests now send the Content-Type in the HTTP request * fix broken flush in db.network\_destroy, pep8 fixes * req/res alignment complete. Status code alignment ALMOST complete (need to sort out 200 vs 202 for create ops) * Vinkesh | Changed import orders according to pep8 recommendations * Including a flag to activate the NX-OS driver Updating the README documentation * merging branch for bug802772, which this branch is stacked on top of * WIP. Still need to align APIs for interface plug/unplug * Fixing pep8 errors * Adding the Nexus OS driver based on the new PlugIn structure * fix incorrect handling of duplicate network name, add exception for duplicate network name, and add unit test to confirm detection * WIP * Merging lp:quantum updates * Fixing syntax issue. I had a 2.7+ style dict comprehension, so I made it 2.6 friendly * Removing a debugging line * pep8 fix * Fixing API behaviour for throwing 400 error on invalid body. Adding unit test for creating a port without request body * make ovs plugin pay attention to port state * persistence of l2network & ucs plugins using mysql - db\_conn.ini - configuration details of making a connection to the database - db\_test\_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network\_db.py - db methods for l2network models - l2network\_models.py - class definitions for the l2 network tables - ucs\_db.py - db methods for ucs models - ucs\_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework * Vinkesh/Santhosh | Moved the stub classes in test\_extensions to a separate file extension\_stubs * Merged from trunk * bug802772 update exception handling in OVS plugin to use API exceptions * merged the latest changes from plugin-framework branch - revision 39 conforming to the new cisco plugin directory structure and moving all db related modules into cisco/db folder updated db\_test\_plugin.py - added import of cisco constants module - added LOG.getLogger for logging component name - updated import module paths for l2network\_models/db and ucs\_models/db to use the new directory structure - updated (rearranged) imports section to obey openstack alphabetical placement convention updated db\_conn.ini - updated database name from cisco\_naas to quantum\_l2network unit test cases ran successfully and pep8 checks done again * removing a few additional lines that aren't needed once we don't calculate port count * Adding a tests directory, this can be used for plugin-specific test cases * also remove line that computes portcount, as it is unneeded now that we don't return it * Including copyright info * merge branch for to fix bug817826 * For the modules to get added, missed in the earlier checkin * remove PortCount attribute of network object, as it is not in the spec and was causing us to hit bug 818321 (note: this commit does not fix the underlyingproblem with xml deserialization, it just makes sure we don't hit it with the existing API code) * Changed the directory structure to a more organized one. Fixed the imports to reflect the new structure * Merging the latest changes from lp:quantum * change default integration bridge from br100 to br-int to reflect new default for OVS vif-plugging in nova Diablo-3 release * fix bug 817826 and similar error in batch\_config.py * persistence of l2network & ucs plugins using mysql - db\_conn.ini - configuration details of making a connection to the database - db\_test\_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network\_db.py - db methods for l2network models - l2network\_models.py - class definitions for the l2 network tables - ucs\_db.py - db methods for ucs models - ucs\_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework * merge Salvatore's api branch with fixes for tests. Tweaking branch to remove unwanted bin/quantum.py as part of merge * Merging in main repo updates * Updating to fix some SSL issues * Removing extra quantum.py file from source control removing unused import from quantum/api/\_\_init\_\_.py * Apply fix for bug #817813 Merging lp:~danwent/quantum/bug817813 * Apply fix for bug #814012 Merging lp:~danwent/quantum/bug814012 * Apply fix for bug #814517 merging lp:~tylesmit/quantum/quantum-bug-814517 * bug 817813: default provider in plugins.ini accidentally changed. Changing it back to FakePlugin * Changed the param name "network-name" to "net-name" since the Quantum service expects the later * Removing some legacy code from the unit tests * Adding unit tests to cover the client library * Changing the CLI to use the new client library * Adding refactored API Client * pep8 fixes * fix bug 814012, add unit tests for it * Resolving Bug 814517 which caused XML to have extra whitespace * Vinkesh/Santhosh | Removed loading extensions from 'contrib' and fixed an indentation bug while loading extensions * Santhosh/Rajaram|modified extensions section in README * Rajaram/Santhosh | Added logging to the PluginAwareExtensionManager failures * Rajaram/Santhosh|Added plugin interface in foxinsox and Updated README * Rajaram/Santhosh|quantum manager loads plugin only once, even though both extension middleware and APIRouter calls it * Santhosh/Rajaram|latest merge from quantum and made extensions use options to load plugin * Apply fix for bug #797419 merging lp:~salvatore-orlando/quantum/bug797419 * Re-fixing issues with XML deserialization (changes got lost in merges with trunk) Adapting assertions in unit tests merged from trunk to reflect changes in the API due to RFE requested by Erik Carlin * Rajaram/Vinkesh | Plugins advertise which extensions it supports * Merging branch lp:~salvatore-orlando/quantum/bug802892 Fixing bug #802892 * Merging branch lp:~netstack/quantum/quantum-unit-tests * Fixing silly pep8 error * doh * Restoring quantum\_plugin\_base to previous state. Will discuss in the future whether allow API layer to pass options to plugins upon initialization * Vinkesh/Santhosh | Added tests to check the member and collection custom actions of ResourceExtensions * Vinkesh/Deepak | Moved plugin related checks in ExtensionManager code to PluginAwareExtensionManager * Deepak/Vinkesh | Added an base abstract class which can be inherited by PluginInterface class which defines the contract expected by extension * Vinkesh/Deepak| Added doc and small refactoring * Unit tests for API completed fixed pep8 errors * Add TESTING document: description and polices for quantum tests * Adding more unit tests * Deepak/Santhosh | ExtensionManager verifies that plugin implements the interface expected by the extension * Santhosh/Deepak | Made supports\_extension method optional for plugin, plugin will be loaded only once * Merged from quantum trunk * Santhosh/deepak| Load extensions supported by plugin * add extension code in.(last push does not include this directory.) * add api extensions (including portprofiles resources and associate/disassociate actions.) * Changes to support port-profile extension. Fixed an error in the README file * Very initial version of the nxos driver .... lets call it ver 0.0.1! * Removing code related to functional tests * Porting shell script get-vif.sh to python module get-vif.py for cisco ucsm module * Required for recognizing the "cisco" package. Missed in the initial checkin * Applying fix for bug #804237 from branch lp:~salvatore-orlando/quantum/bug804237 * minor pep8 fix * Changed some credentials (does not affect functionality) * This file is not required * Initial checkin for the L2-Network Plugin with all the associated modules and artifacts * Rajaram/Santosh|misc readablity improvements to extension tests * Santosh/Rajaram| added extenstion test to show header extensibility * Rajaram/Vinkesh | Added tests to confirm extensions can edit previously uneditable field * removing pep8 errors * Added more unit tests for API Starting work on functional tests, importing code from Glance * Now REALLY using in-memory db * Adapated plugin infrastructure to allow API to pass options to plugins Now using in-memory sqlite db for tests on FakePlugin teardown() now 'resets' the in-memory db Adding unit tests for APIs * Fixing error introduced in find\_config * Removing excess debug line * Fixing syntax errors in db/models.py * Temporary commit * Now loading plugin before setting up routes. Passing same plugin instance to API controllers * Adding unit test Applying pep8 fixes * Starting implementation of unit tests Fixing minor bugs with FakePlugin * Removing static data for FakePlugin * - Unit tests will use FakePlugin - FakePlugin adapted to db API with sqlite - db Models updated to inherit from generic Quantum Base model (provides utility functions and capabilities for treating db objects as dicts - see nova.db.models.NovaBase) - functional tests commented out temporarily. Will un-comment when code for starting actual service is in place * Adding Routes>=1.12.3 to tools/pip-requires * Work in progress - just starting * ...and again! * removing "quantum" folder as well from etc * removing api-paste.ini * Addressing comments from Somik * Merging dan wendlandt's bugfixes for Bug #800466 and improvements that enable Quantum to seamlessly run on KVM! * fix pep8 introduced by trunk merge * A small start on unit tests: mostly a proof of concept that contains a test for api/ports.py * Added some more plugin agnostic tests (attachment and negative tests) and some pep8 fixes * merge * more pep8 goodness * Fixing bug #798262 * refactor batch\_config, allow multiple attaches with the empty string * Merge: bzr merge lp:~bgh/quantum/bugfixes * Fix cut and paste error in api\_unplug\_iface * Fixing bug #798261 * no-commit * Santhosh/Vinkesh | Added extensions framework * merge and pep8 cleanup * Merging latest changes from parent repo - lp:network-service , Parent repo had approved merge proposal for merging lp:~santhom/network-service/quantum\_testing\_framework , which has now been merged into lp:network-service * Merging pep8 and functional test related changes lp:~santhom/network-service/quantum\_testing\_framework branch * add example to usage string for batch\_config.py * Bug fixes and clean-up, including supporting libvirt * Fix typo in mysql package check * Fix typo in mysql package check * Adding support for 'detail' action on networks objects * README fixes * Santhosh/Deepak | Fixed the import issue and config.load\_paste\_app issue * Santhosh/Vinkesh | Fixed all the pep8 violations. Modified the 'req' to 'request' across all the services and wsgi so that it's consistent with other projects * Santhosh/Vinkesh | Added the testing framework. Moved the smoketest to tests/functional * merged remote README changes * Fix cli.py from last merge when it got overwritten * Fixing pep8 errors removing excess debug lines * Add dependencies to README and fix whitespace * Fix merge indentation errors * Merged Brad's ovsplugin code * pep8 changes for quantum-framework code pieces * Update Quantum README file with instructions to launch the service and get going * Updated quantum\_plugin\_base with with return type dataformats as well as exceptions * Added a basic README file and updated Quantum plugin base class with appropriate exceptions * Initial commit of exceptions that are raised by a quantum plugin * Make the wording a little clearer * Remove -a option from examples (it no longer exists) * Make the API the default * Address Dan's review comments * Make the manager a little smarter about finding its config file * Fix another TODO: remove main function from manager * Fix detail\_net and list\_ports commands * Remove get\_all\_interfaces and fix detail\_network commands * Initial version of openvswitch plugin * \* Merged changes from Salvatore's branch - quantum-api-workinprogress \* Removed spurious methods from quantum\_base\_plugin class. \* Updated the sample plugins to be compliant with the new QuantumBase class * Update readme with quantum specific instructions * Address some of the remaining TODOs and general cleanup * Add headers * Initial cut of openvswitch plugin * Add database models/functions for ports and networks * Print the command list in the help * Whitespace fixes * Added api functions for the interface commands * Initial rework of cli to use the WS api * Copy over miniclient from testscripts and port tests.py to use unittest * Adding ports.py to source control * pep8 fixes (1st batch) * First working version of Quantum API * Adding views/networks.py to bzr * Adding serialization/deserilization for network resources. Adding fake plugin * networks api with final URL structure. No serialization yet * Implementing interface with plugin * adpating wsgi files * Work in progress on network API * Adding first files for quantum API * Minor fixes: indentation in bin/quantum and fix import in config.py * Adding api paste configuration file * Removing .pydevproject from version control * Branching from quantum-framework * Adding flags.py to infrastructure code * Move plugin configuration to plugins.ini - a config file * 1) Created a DummDataPlugin in SamplePlugin module * merged salvatore's changes to local branch * 1) Added a bare-bones framework for quantum plugins. 2) Created demo quantum plugin that conforms to QuantumPluginBase Abstract class specification. 3) Demonstrated plugin registration and invocation using the demo plugin called "QuantumEchoPlugin" 4) Created the initial file structure for a quantum CLI 5) Seeded the utils module that will contain frequently used Quantum utilities. 6) Modified the manager module to initialize and register the quantum plugin defined in a configuration file. I have hard-coded the path to plugin for now but this will move to a quantum.conf file * Fixing pep8 errors * adding /bzrignore to precent checking in pyc files and that sort of stuff.. * Pushing initial started code based on Glance project and infrstructure work done by the melange team * Merging in Shweta's fixes from the review by Sumit * Minor Fix in ucs tests * Fixing issues discussed in merge prop. The UCS Inventory clears the DB on teardown. The multiblade tests now check to see if a port exists in the db before deleting it. It checks to make sure the UCSInventory is set in the config * Adding UCS inventory tests * Merging in latest changes from lp:quantum * Merging in Shweta's test changes * Ading Ucs db tests * Removing excess imports * Fixing pep8 errors and pushing pylint score up to 8.57 * Fix for bug/893663 Making Cisco CLI usable from installed packages * Bug 903684: functions defined twice in utils.py * blueprint api-operational-status * Adds sqlalchemy support for ovs\_quantum\_plugin * bug 903581: remove etc/quantum.conf.sample as it is invalid * Fixing bug/903829 Making setup\_server.py not try to install quantum.conf.sample * Removing a couple extra lines * Adding some tests, fixing some bugs, and making the tearDown correctly remove PortProfiles * Adding author information * Removing a negative test until I can figure out how to implement it * Removing some negative tests until I can figure out how to implement them * Updating tests * Fixing port-related calls * Adding tests * Tweaking other multiblade tests * Updating multiblade create\_network test * Starting making multi\_blade model return data * Adding initial multi blade test file from Shubhangi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/HACKING.rst0000644000175000017500000000260500000000000017000 0ustar00coreycorey00000000000000VMware-NSX Style Commandments ============================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on VMware-NSX Specific Commandments -------------------------------- - [N319] Validate that debug level logs are not translated - [N320] Validate that LOG messages, except debug ones, have translations - [N321] Validate that jsonutils module is used instead of json - [N322] We do not use @authors tags in source files. We have git to track authorship. - [N323] Detect common errors with assert_called_once_with Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. In the Neutron test suite, this should be done by inheriting from neutron.tests.base.BaseTestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/LICENSE0000644000175000017500000002363700000000000016217 0ustar00coreycorey00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/MANIFEST.in0000644000175000017500000000047400000000000016742 0ustar00coreycorey00000000000000include AUTHORS include README.rst include ChangeLog include LICENSE include vmware_nsx/db/migration/alembic_migrations/script.py.mako recursive-include vmware_nsx/db/migration/alembic_migrations/versions * recursive-include vmware_nsx/neutron/locale * exclude .gitignore exclude .gitreview global-exclude *.pyc ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2462552 vmware-nsx-15.0.1.dev143/PKG-INFO0000644000175000017500000000326700000000000016304 0ustar00coreycorey00000000000000Metadata-Version: 1.2 Name: vmware-nsx Version: 15.0.1.dev143 Summary: VMware NSX library for OpenStack projects Home-page: https://launchpad.net/vmware-nsx Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: =================== VMware-NSX package =================== You have come across the VMware-NSX family of Neutron plugins External Resources: ------------------- The homepage for the VMware-NSX project is on Launchpad_. .. _Launchpad: https://launchpad.net/vmware-nsx Use this site for asking for help, and filing bugs. Code is available both git.openstack.org_ and github_. .. _git.openstack.org: https://git.openstack.org/cgit/openstack/vmware-nsx/tree/ .. _github: https://github.com/openstack/vmware-nsx For help on usage and hacking of VMware-NSX, please send a message to the openstack-discuss_ mailing list. .. _openstack-discuss: mailto:openstack-discuss@lists.openstack.org For information on how to contribute to VMware-NSX, please see the contents of the CONTRIBUTING.rst file. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/README.rst0000644000175000017500000000143500000000000016671 0ustar00coreycorey00000000000000=================== VMware-NSX package =================== You have come across the VMware-NSX family of Neutron plugins External Resources: ------------------- The homepage for the VMware-NSX project is on Launchpad_. .. _Launchpad: https://launchpad.net/vmware-nsx Use this site for asking for help, and filing bugs. Code is available both git.openstack.org_ and github_. .. _git.openstack.org: https://git.openstack.org/cgit/openstack/vmware-nsx/tree/ .. _github: https://github.com/openstack/vmware-nsx For help on usage and hacking of VMware-NSX, please send a message to the openstack-discuss_ mailing list. .. _openstack-discuss: mailto:openstack-discuss@lists.openstack.org For information on how to contribute to VMware-NSX, please see the contents of the CONTRIBUTING.rst file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/TESTING.rst0000644000175000017500000001414000000000000017046 0ustar00coreycorey00000000000000Testing VMware-NSX ================== Overview -------- The unit tests (vmware_nsx/tests/unit/) are meant to cover as much code as possible and should be executed without the service running. They are designed to test the various pieces of the neutron and VMware NSX tree to make sure any new changes don't break existing functionality. Development process ------------------- It is expected that any new changes that are proposed for merge come with tests for that feature or code area. Ideally any bugs fixes that are submitted also have tests to prove that they stay fixed! In addition, before proposing for merge, all of the current tests should be passing. Virtual environments ~~~~~~~~~~~~~~~~~~~~ Testing OpenStack projects, including Neutron, is made easier with `DevStack `_. Create a machine (such as a VM or Vagrant box) running a distribution supported by DevStack and install DevStack there. For example, there is a Vagrant script for DevStack at https://github.com/bcwaldon/vagrant_devstack. .. note:: If you prefer not to use DevStack, you can still check out source code on your local machine and develop from there. Running unit tests ------------------ There are three mechanisms for running tests: run_tests.sh, tox, and nose. Before submitting a patch for review you should always ensure all test pass; a tox run is triggered by the jenkins gate executed on gerrit for each patch pushed for review. With these mechanisms you can either run the tests in the standard environment or create a virtual environment to run them in. By default after running all of the tests, any pep8 errors found in the tree will be reported. With `run_tests.sh` ~~~~~~~~~~~~~~~~~~~ You can use the `run_tests.sh` script in the root source directory to execute tests in a virtualenv:: ./run_tests -V With `nose` ~~~~~~~~~~~ You can use `nose`_ to run individual tests, as well as use for debugging portions of your code:: . .venv/bin/activate pip install nose nosetests There are disadvantages to running Nose - the tests are run sequentially, so race condition bugs will not be triggered, and the full test suite will take significantly longer than tox & testr. The upside is that testr has some rough edges when it comes to diagnosing errors and failures, and there is no easy way to set a breakpoint in the Neutron code, and enter an interactive debugging session while using testr. .. _nose: https://nose.readthedocs.org/en/latest/index.html With `tox` ~~~~~~~~~~ VMware NSX, like other OpenStack projects, uses `tox`_ for managing the virtual environments for running test cases. It uses `Testr`_ for managing the running of the test cases. Tox handles the creation of a series of `virtualenvs`_ that target specific versions of Python. Testr handles the parallel execution of series of test cases as well as the tracking of long-running tests and other things. Running unit tests is as easy as executing this in the root directory of the Neutron source code:: tox To run functional tests that do not require sudo privileges or specific-system dependencies:: tox -e functional To run all the functional tests in an environment that has been configured by devstack to support sudo and system-specific dependencies:: tox -e dsvm-functional For more information on the standard Tox-based test infrastructure used by OpenStack and how to do some common test/debugging procedures with Testr, see this wiki page: https://wiki.openstack.org/wiki/Testr .. _Testr: https://wiki.openstack.org/wiki/Testr .. _tox: http://tox.readthedocs.org/en/latest/ .. _virtualenvs: https://pypi.org/project/virtualenv Running individual tests ~~~~~~~~~~~~~~~~~~~~~~~~ For running individual test modules or cases, you just need to pass the dot-separated path to the module you want as an argument to it. For executing a specific test case, specify the name of the test case class separating it from the module path with a colon. For example, the following would run only the TestSubnetsV2 tests from vmware_nsx/tests/unit/nsx_v/test_plugin.py:: $ ./run_tests.sh vmware_nsx.tests.unit.nsx_v.test_plugin.TestSubnetsV2 or:: $ tox -e py37 vmware_nsx.tests.unit.nsx_v.test_plugin.TestSubnetsV2 Adding more tests ~~~~~~~~~~~~~~~~~ VMware NSX has a fast growing code base and there is plenty of areas that need to be covered by unit and functional tests. To get a grasp of the areas where tests are needed, you can check current coverage by running:: $ ./run_tests.sh -c Debugging --------- By default, calls to pdb.set_trace() will be ignored when tests are run. For pdb statements to work, invoke run_tests as follows:: $ ./run_tests.sh -d [test module path] It's possible to debug tests in a tox environment:: $ tox -e venv -- python -m testtools.run [test module path] Tox-created virtual environments (venv's) can also be activated after a tox run and reused for debugging:: $ tox -e venv $ . .tox/venv/bin/activate $ python -m testtools.run [test module path] Tox packages and installs the vmware-nsx source tree in a given venv on every invocation, but if modifications need to be made between invocation (e.g. adding more pdb statements), it is recommended that the source tree be installed in the venv in editable mode:: # run this only after activating the venv $ pip install --editable . Editable mode ensures that changes made to the source tree are automatically reflected in the venv, and that such changes are not overwritten during the next tox run. Post-mortem debugging ~~~~~~~~~~~~~~~~~~~~~ Setting OS_POST_MORTEM_DEBUGGER in the shell environment will ensure that the debugger .post_mortem() method will be invoked on test failure:: $ OS_POST_MORTEM_DEBUGGER=pdb ./run_tests.sh -d [test module path] Supported debuggers are pdb, and pudb. Pudb is full-screen, console-based visual debugger for Python which let you inspect variables, the stack, and breakpoints in a very visual way, keeping a high degree of compatibility with pdb:: $ ./.venv/bin/pip install pudb $ OS_POST_MORTEM_DEBUGGER=pudb ./run_tests.sh -d [test module path] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1662529 vmware-nsx-15.0.1.dev143/api-ref/0000755000175000017500000000000000000000000016522 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/api-ref/rest.md0000644000175000017500000006221300000000000020025 0ustar00coreycorey00000000000000# VMware NSX OpenStack Neutron REST API Extensions ## General Information This document describes the REST API extensions integrated with the VMware [NSX OpenStack neutron plugins](https://wiki.openstack.org/wiki/Neutron/VMware_NSX_plugins). The intent of this document is to supplement the [OpenStack neutron REST API guide](https://docs.openstack.org/api-ref/network/v2) by describing the extensions implemented by the VMware NSX neutron plugins. The VMware NSX neutron plugins implement [Neutron API extensions](https://wiki.openstack.org/wiki/NeutronDevelopment#API_Extensions) by defining new top-level REST resources, operations (e.g. verbs) and attribute extensions to existing neutron REST API entities (depending on the extension). As all extensions apply to the neutron REST API, the [general information](https://docs.openstack.org/api-ref/network/v2/#general-information) for the neutron API applies here as well. The VMware NSX neutron extensions supported by your plugin will depend on the version of VMware NSX used. Two versions described herein are: * [NSX for vSphere](https://www.vmware.com/support/pubs/nsx_pubs.html) aka 'NSX-V'. * [NSX-T](https://docs.vmware.com/en/VMware-NSX-T/index.html). ## API Reference * [Advanced Service Providers](#advanced-service-providers) * [DHCP MTU](#dhcp-mtu) * [DNS Search Domain](#dns-search-domain) * [MAC Learning](#mac-learning) * [Provider Networks](#provider-networks) * [Provider Security Groups](#provider-security-groups) * [Router Size](#router-size) * [Router Type](#router-type) * [Security Group Rule IP Prefix](#security-group-rule-ip-prefix) * [Security Group Logging](#security-group-logging) * [VNIC Index](#vnic-index) ### [Advanced Service Providers](#advanced-service-providers) ###### Description This resource attribute extensions adds the `advanced_service_providers` attribute to neutron [subnets](https://docs.openstack.org/api-ref/network/v2/#subnets). This read-only attribute is a list of NSX advanced service provider IDs associated on a per-subnet basis. The advanced service provider IDs are populated by the plugin automatically when interfacing with the NSX manager backend. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-V. ###### Supported Verbs None (read-only). ###### Extended Resource [subnet](https://docs.openstack.org/api-ref/network/v2/#subnets) ###### Extension Attribute(s) * `advanced_service_providers`: A list of NSX advanced service provider IDs (in `string` format) associated with the subnet. ###### Example Response ```json { "subnet":{ "description":"", "enable_dhcp":true, "network_id":"7ea9964a-45b0-45eb-8b67-da47ce53cf5f", "tenant_id":"64b39295ba3942ca8be4a8a25d9b5157", "created_at":"2016-08-28T13:49:32", "dns_nameservers":[ ], "updated_at":"2016-08-28T13:49:32", "gateway_ip":"10.0.0.1", "ipv6_ra_mode":null, "allocation_pools":[ { "start":"10.0.0.2", "end":"10.0.0.254" } ], "host_routes":[ ], "advanced_service_providers":[ "edge-1", "edge-2" ], "ip_version":4, "ipv6_address_mode":null, "cidr":"10.0.0.0/24", "id":"f1153a28-8f36-4547-a024-3eb08e4e44b1", "subnetpool_id":null, "name":"private-subnet" } } ``` ### [DHCP MTU](#dhcp-mtu) ###### Description Extends neutron [subnets](https://docs.openstack.org/api-ref/network/v2/#subnets) providing the ability to specify per-subnet DHCP MTU via the `dhcp_mtu` attribute. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-V. ###### Supported Verbs POST, PUT ###### Extended Resource [subnet](https://docs.openstack.org/api-ref/network/v2/#subnets) ###### Extension Attribute(s) * `dhcp_mtu`: The DHCP MTU to use for the associated subnet. Must be a valid DHCP MTU value between 68 and 65535. ###### Example Response ```json { "subnet":{ "description":"", "enable_dhcp":true, "network_id":"91abf611-44a8-4c5e-bf19-92f91ee34d6d", "tenant_id":"16f24183154f4e51bebe3f10e810e19a", "created_at":"2016-09-16T16:28:34", "dhcp_mtu": 8048, "dns_nameservers":[ ], "updated_at":"2016-09-16T16:28:34", "gateway_ip":"192.168.1.1", "ipv6_ra_mode":null, "allocation_pools":[ { "start":"192.168.1.9", "end":"192.168.1.99" } ], "host_routes":[ ], "revision_number":2, "ip_version":4, "ipv6_address_mode":null, "cidr":"192.168.1.0/24", "project_id":"16f24183154f4e51bebe3f10e810e19a", "id":"8300a4ff-09db-4f64-955b-7e215044c9c3", "subnetpool_id":null, "name":"snet1" } } ``` ### [DNS Search Domain](#dns-search-domain) ###### Description Extends neutron [subnets](https://docs.openstack.org/api-ref/network/v2/#subnets) providing the ability to specify per-subnet DNS search via the `dns_search_domain` attribute. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-V. ###### Supported Verbs POST, PUT ###### Extended Resource [subnet](https://docs.openstack.org/api-ref/network/v2/#subnets) ###### Extension Attribute(s) * `dns_search_domain`: The DNS search domain to use for networking on the associated subnet. The value must be a valid DNS search domain. ###### Example Response ```json { "subnet":{ "description":"", "enable_dhcp":true, "network_id":"91abf611-44a8-4c5e-bf19-92f91ee34d6d", "tenant_id":"16f24183154f4e51bebe3f10e810e19a", "created_at":"2016-09-16T16:28:34", "dns_search_domain": "example.com", "dns_nameservers":[ ], "updated_at":"2016-09-16T16:28:34", "gateway_ip":"192.168.1.1", "ipv6_ra_mode":null, "allocation_pools":[ { "start":"192.168.1.9", "end":"192.168.1.99" } ], "host_routes":[ ], "revision_number":2, "ip_version":4, "ipv6_address_mode":null, "cidr":"192.168.1.0/24", "project_id":"16f24183154f4e51bebe3f10e810e19a", "id":"8300a4ff-09db-4f64-955b-7e215044c9c3", "subnetpool_id":null, "name":"snet1" } } ``` ### [MAC Learning](#mac-learning) ###### Description Extends neutron [ports](https://docs.openstack.org/api-ref/network/v2/#ports) providing the ability to enable MAC learning on the associated port via the `mac_learning_enabled` attribute. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-T. ###### Supported Verbs POST, PUT ###### Extended Resource [ports](https://docs.openstack.org/api-ref/network/v2/#ports) ###### Extension Attribute(s) * `mac_learning_enabled`: A boolean value that indicates if MAC Learning is enabled on the associated port. ###### Example Response ```json { "port":{ "allowed_address_pairs":[ ], "extra_dhcp_opts":[ ], "updated_at":"2016-09-16T16:28:35", "device_owner":"network:dhcp", "revision_number":3, "port_security_enabled":false, "mac_learning_enabled":true, "fixed_ips":[ { "subnet_id":"8300a4ff-09db-4f64-955b-7e215044c9c3", "ip_address":"192.168.1.9" } ], "id":"0093f4cc-f936-448a-9a25-ae57f66a6d57", "security_groups":[ ], "binding:vif_details":{ "port_filter":true, "nsx-logical-switch-id":"785f0bb4-3341-4e8c-abc4-cd3068f333f2" }, "binding:vif_type":"ovs", "mac_address":"fa:16:3e:2d:19:96", "project_id":"16f24183154f4e51bebe3f10e810e19a", "status":"ACTIVE", "binding:host_id":"l2b", "description":"", "device_id":"dhcp559b5e8d-0b9d-5e4c-a8ff-819ade66d01d-91abf611-44a8-4c5e-bf19-92f91ee34d6d", "name":"", "admin_state_up":true, "network_id":"91abf611-44a8-4c5e-bf19-92f91ee34d6d", "tenant_id":"16f24183154f4e51bebe3f10e810e19a", "created_at":"2016-09-16T16:28:35", "provider_security_groups":[ ], "binding:vnic_type":"normal" } } ``` ### [Provider Networks](#provider-networks) ###### Description The VMware NSX neutron plugins also support the [neutron provider networks extension](https://docs.openstack.org/neutron/latest/admin/archives/adv-features.html#provider-networks). Provider network extensions add [attributes](https://docs.openstack.org/neutron/latest/admin/archives/adv-features.html#provider-attributes) to neutron [networks](https://docs.openstack.org/api-ref/network/v2/#networks) enabling providers to map virtual networks onto physical networks, or in this case onto physical networks in NSX. ###### Extension Type Resource attribute extensions. ###### Supported NSX Versions NSX-T, NSX-V. ###### Supported Verbs See the [neutron provider networks extension](https://docs.openstack.org/api-ref/network/v2/#networks-provider-extended-attributes-networks) API reference documentation. ###### Extended Resource * [networks](https://docs.openstack.org/api-ref/network/v2/#networks) ###### Extension Attribute(s) * `provider:network_type`: For the NSX plugins valid values are `flat` or `vlan`. * `provider:physical_network`: For the NSX plugins, this value should be the UUID of the NSX transport zone to bridge the network on. * `provider:segmentation_id`: For the NSX plugins, this value should be set to the VLAN identifier of the physical network, or unset of the network type is `flat`. ###### Example Response ```json { "network": { "status": "ACTIVE", "subnets": [ "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" ], "name": "private-network", "router:external": false, "admin_state_up": true, "tenant_id": "4fd44f30292945e481c7b8a0c8908869", "created_at": "2016-03-08T20:19:41", "mtu": 0, "shared": true, "port_security_enabled": true, "provider:network_type": "vlan", "provider:physical_network": "00cff66d-5fa8-4fda-bd7d-87e372fe86c7", "provider:segmentation_id": 101, "updated_at": "2016-03-08T20:19:41", "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" } } ``` ### [Provider Security Groups](#provider-security-groups) ###### Description This extension enables support for provider-only created/managed neutron [security groups](https://docs.openstack.org/api-ref/network/v2/#security-groups-security-groups). To enable this support a `provider` boolean attribute is added to neutron security groups indicating if the group is a provider-only group. Additionally, neutron [ports](https://docs.openstack.org/api-ref/network/v2/#ports) are extended with a `provider_security_groups` attribute that indicates a list of provider-only security groups belonging to the said port. ###### Extension Type Resource attribute extensions. ###### Supported NSX Versions NSX-T, NSX-V. ###### Supported Verbs The `provider` attribute on neutron security groups is only settable during creation (POST). However the `provider_security_groups` attribute on ports supports both POST and PUT. ###### Extended Resource * [ports](https://docs.openstack.org/api-ref/network/v2/#ports) * [security groups](https://docs.openstack.org/api-ref/network/v2/#security-groups-security-groups) ###### Extension Attribute(s) * `provider`: A boolean indicating if the security group is provider-only. * `provider_security_groups`: A list of provider-only security group UUIDs associated with a said port. ###### Example Response GET security-group ```json { "security_group":{ "logging":false, "description":"My security group", "tenant_id":"1efff4cd762944a6bbdb6d3bba0468ef", "created_at":"2016-09-16T16:34:55", "updated_at":"2016-09-16T16:34:55", "provider":true, "security_group_rules":[ { "local_ip_prefix":null, "direction":"ingress", "protocol":null, "description":null, "port_range_max":null, "updated_at":"2016-09-16T16:34:55", "revision_number":1, "id":"98acaf6e-0b9d-45d6-b4ec-d9dd0df3a52b", "remote_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "remote_ip_prefix":null, "created_at":"2016-09-16T16:34:55", "security_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "tenant_id":"1efff4cd762944a6bbdb6d3bba0468ef", "port_range_min":null, "ethertype":"IPv6", "project_id":"1efff4cd762944a6bbdb6d3bba0468ef" }, { "local_ip_prefix":null, "direction":"egress", "protocol":null, "description":null, "port_range_max":null, "updated_at":"2016-09-16T16:34:55", "revision_number":1, "id":"9fba2f50-9eef-48c0-8b45-c2fae98e7294", "remote_group_id":null, "remote_ip_prefix":null, "created_at":"2016-09-16T16:34:55", "security_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "tenant_id":"1efff4cd762944a6bbdb6d3bba0468ef", "port_range_min":null, "ethertype":"IPv4", "project_id":"1efff4cd762944a6bbdb6d3bba0468ef" }, { "local_ip_prefix":null, "direction":"egress", "protocol":null, "description":null, "port_range_max":null, "updated_at":"2016-09-16T16:34:55", "revision_number":1, "id":"c2eecacb-5328-4081-8fe7-701777fbb2a1", "remote_group_id":null, "remote_ip_prefix":null, "created_at":"2016-09-16T16:34:55", "security_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "tenant_id":"1efff4cd762944a6bbdb6d3bba0468ef", "port_range_min":null, "ethertype":"IPv6", "project_id":"1efff4cd762944a6bbdb6d3bba0468ef" }, { "local_ip_prefix":null, "direction":"ingress", "protocol":null, "description":null, "port_range_max":null, "updated_at":"2016-09-16T16:34:55", "revision_number":1, "id":"e073a066-bc14-41e7-939b-84ec4af0606f", "remote_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "remote_ip_prefix":null, "created_at":"2016-09-16T16:34:55", "security_group_id":"3a729518-0214-44d6-9f25-704db70710a5", "tenant_id":"1efff4cd762944a6bbdb6d3bba0468ef", "port_range_min":null, "ethertype":"IPv4", "project_id":"1efff4cd762944a6bbdb6d3bba0468ef" } ], "revision_number":1, "provider":false, "project_id":"1efff4cd762944a6bbdb6d3bba0468ef", "id":"3a729518-0214-44d6-9f25-704db70710a5", "name":"my provider group" } } ``` GET port ```json { "port":{ "allowed_address_pairs":[ ], "extra_dhcp_opts":[ ], "updated_at":"2016-09-16T16:28:35", "device_owner":"network:dhcp", "revision_number":3, "port_security_enabled":false, "provider_security_groups":["910da4ff-09db-4f64-955b-7e215044ca56"], "fixed_ips":[ { "subnet_id":"8300a4ff-09db-4f64-955b-7e215044c9c3", "ip_address":"192.168.1.9" } ], "id":"0093f4cc-f936-448a-9a25-ae57f66a6d57", "security_groups":[ ], "binding:vif_details":{ "port_filter":true, "nsx-logical-switch-id":"785f0bb4-3341-4e8c-abc4-cd3068f333f2" }, "binding:vif_type":"ovs", "mac_address":"fa:16:3e:2d:19:96", "project_id":"16f24183154f4e51bebe3f10e810e19a", "status":"ACTIVE", "binding:host_id":"l2b", "description":"", "device_id":"dhcp559b5e8d-0b9d-5e4c-a8ff-819ade66d01d-91abf611-44a8-4c5e-bf19-92f91ee34d6d", "name":"", "admin_state_up":true, "network_id":"91abf611-44a8-4c5e-bf19-92f91ee34d6d", "tenant_id":"16f24183154f4e51bebe3f10e810e19a", "created_at":"2016-09-16T16:28:35", "provider_security_groups":[ ], "binding:vnic_type":"normal" } } ``` ### [Router Size](#router-size) ###### Description Extends neutron [routers](https://docs.openstack.org/api-ref/network/v2/#routers-routers) by adding the `router_size` attribute to support configuration of NSX-V edge size. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-V. ###### Supported Verbs POST, PUT ###### Extended Resource [routers](https://docs.openstack.org/api-ref/network/v2/#routers-routers) ###### Extension Attribute(s) * `router_size`: The NSX-V edge size to use. ###### Example Response ```json { "router":{ "admin_state_up":true, "availability_zone_hints":[ ], "availability_zones":[ "nova" ], "description":"", "router_size":"xlarge", "distributed":false, "external_gateway_info":{ "enable_snat":true, "external_fixed_ips":[ { "ip_address":"172.24.4.6", "subnet_id":"b930d7f6-ceb7-40a0-8b81-a425dd994ccf" }, { "ip_address":"2001:db8::9", "subnet_id":"0c56df5d-ace5-46c8-8f4c-45fa4e334d18" } ], "network_id":"ae34051f-aa6c-4c75-abf5-50dc9ac99ef3" }, "ha":false, "id":"f8a44de0-fc8e-45df-93c7-f79bf3b01c95", "name":"router1", "routes":[ ], "status":"ACTIVE", "tenant_id":"0bd18306d801447bb457a46252d82d13" } } ``` ### [Router Type](#router-type) ###### Description Extends neutron [routers](https://docs.openstack.org/api-ref/network/v2/#routers-routers) by adding the `router_type` attribute to support configuration of NSX-V router type. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-V. ###### Supported Verbs POST, PUT ###### Extended Resource [routers](https://docs.openstack.org/api-ref/network/v2/#routers-routers) ###### Extension Attribute(s) * `router_type`: The NSX-V router type. Must be either `shared` or `exclusive`. ###### Example Response ```json { "router":{ "admin_state_up":true, "availability_zone_hints":[ ], "availability_zones":[ "nova" ], "description":"", "router_type":"exclusive", "distributed":false, "external_gateway_info":{ "enable_snat":true, "external_fixed_ips":[ { "ip_address":"172.24.4.6", "subnet_id":"b930d7f6-ceb7-40a0-8b81-a425dd994ccf" }, { "ip_address":"2001:db8::9", "subnet_id":"0c56df5d-ace5-46c8-8f4c-45fa4e334d18" } ], "network_id":"ae34051f-aa6c-4c75-abf5-50dc9ac99ef3" }, "ha":false, "id":"f8a44de0-fc8e-45df-93c7-f79bf3b01c95", "name":"router1", "routes":[ ], "status":"ACTIVE", "tenant_id":"0bd18306d801447bb457a46252d82d13" } } ``` ### [Security Group Rule IP Prefix](#security-group-rule-ip-prefix) ###### Description Extends neutron [security group rules](https://docs.openstack.org/api-ref/network/v2/#security-group-rules-security-group-rules) by adding a `local_ip_prefix` attribute allowing rules to be created with IP prefixes. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-T, NSXv. ###### Supported Verbs POST; using an IP prefix on a rule can only be done when creating the rule. ###### Extended Resource [security group rules](https://docs.openstack.org/api-ref/network/v2/#security-group-rules-security-group-rules) ###### Extension Attribute(s) * `local_ip_prefix`: The local IP prefix used for the rule. ###### Example Response ```json { "security_group_rule":{ "direction":"ingress", "port_range_min":"80", "ethertype":"IPv4", "port_range_max":"80", "protocol":"tcp", "local_prefix_ip":"239.240.1.0/16", "remote_prefix_ip":"192.168.1.0/24", "security_group_id":"a7734e61-b545-452d-a3cd-0189cbd9747a" } } ``` ### [Security Group Logging](#security-group-logging) ###### Description Extends neutron [security groups](https://docs.openstack.org/api-ref/network/v2/#security-groups-security-groups) with a boolean attribute `logging` to enable per security group logging on NSX. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-T, NSX-V. ###### Supported Verbs POST, PUT. ###### Extended Resource [security groups](https://docs.openstack.org/api-ref/network/v2/#security-groups-security-groups) ###### Extension Attribute(s) * `logging`: A boolean attribute indicating if logging is enabled for the group. ###### Example Response ```json { "security_group":{ "Description":"logged secgroup", "id":"85cc3048-abc3-43cc-89b3-377341426ac5", "name":"logged secgroup", "logging":true, "security_group_rules":[ { "direction":"egress", "ethertype":"IPv6", "id":"3c0e45ff-adaf-4124-b083-bf390e5482ff", "port_range_max":null, "port_range_min":null, "protocol":null, "remote_group_id":null, "remote_ip_prefix":null, "security_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "tenant_id":"e4f50856753b4dc6afee5fa6b9b6c550" }, { "direction":"egress", "ethertype":"IPv4", "id":"93aa42e5-80db-4581-9391-3a608bd0e448", "port_range_max":null, "port_range_min":null, "protocol":null, "remote_group_id":null, "remote_ip_prefix":null, "security_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "tenant_id":"e4f50856753b4dc6afee5fa6b9b6c550" }, { "direction":"ingress", "ethertype":"IPv6", "id":"c0b09f00-1d49-4e64-a0a7-8a186d928138", "port_range_max":null, "port_range_min":null, "protocol":null, "remote_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "remote_ip_prefix":null, "security_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "tenant_id":"e4f50856753b4dc6afee5fa6b9b6c550" }, { "direction":"ingress", "ethertype":"IPv4", "id":"f7d45c89-008e-4bab-88ad-d6811724c51c", "port_range_max":null, "port_range_min":null, "protocol":null, "remote_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "remote_ip_prefix":null, "security_group_id":"85cc3048-abc3-43cc-89b3-377341426ac5", "tenant_id":"e4f50856753b4dc6afee5fa6b9b6c550" } ], "tenant_id":"e4f50856753b4dc6afee5fa6b9b6c550" } } ``` ### [VNIC Index](#vnic-index) ###### Description Extends neutron [ports](https://docs.openstack.org/api-ref/network/v2/#ports) by adding the `vnic_index` attribute enabling per-port assignment of a VNIC index. ###### Extension Type Resource attribute extension. ###### Supported NSX Versions NSX-V. ###### Supported Verbs POST, PUT. ###### Extended Resource [ports](https://docs.openstack.org/api-ref/network/v2/#ports) ###### Extension Attribute(s) * `vnic_index`: The VNIC index (integer value) assigned to the port. ###### Example Response ```json { "port":{ "status":"ACTIVE", "vnic_index":3, "name":"", "allowed_address_pairs":[ ], "admin_state_up":true, "network_id":"a87cc70a-3e15-4acf-8205-9b711a3531b7", "tenant_id":"7e02058126cc4950b75f9970368ba177", "created_at":"2016-03-08T20:19:41", "extra_dhcp_opts":[ ], "device_owner":"network:router_interface", "mac_address":"fa:16:3e:23:fd:d7", "fixed_ips":[ { "subnet_id":"a0304c3a-4f08-4c43-88af-d796509c97d2", "ip_address":"10.0.0.1" } ], "id":"46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", "updated_at":"2016-03-08T20:19:41", "security_groups":[ ], "device_id":"5e3898d7-11be-483e-9732-b2f5eccd2b2e" } } ``` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/babel.cfg0000644000175000017500000000002000000000000016715 0ustar00coreycorey00000000000000[python: **.py] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/bindep.txt0000644000175000017500000000202100000000000017174 0ustar00coreycorey00000000000000# This file contains runtime (non-python) dependencies # More info at: http://docs.openstack.org/infra/bindep/readme.html # tools/misc-sanity-checks.sh validates .po[t] files gettext [test] # cffi (required by oslo.privsep) and PyNaCL (required by paramiko) libffi-dev [platform:dpkg] libffi-devel [platform:rpm] # MySQL and PostgreSQL databases since some jobs are set up in # OpenStack infra that need these like # periodic-neutron-py35-with-neutron-lib-master. haproxy libmysqlclient-dev [platform:dpkg test] mysql [platform:rpm test] mysql-client [platform:dpkg test] mysql-devel [platform:rpm test] mysql-server [test] postgresql-server-dev-all [platform:dpkg] postgresql [test] postgresql-client [platform:dpkg test] postgresql-devel [platform:rpm test] postgresql-server [platform:rpm test] # Neutron's test-requirements requires tempest which requires paramiko # which requires cryptography which requires ssl. libssl-dev [platform:dpkg] openssl-devel [platform:rpm !platform:suse] libopenssl-devel [platform:suse !platform:rpm] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1662529 vmware-nsx-15.0.1.dev143/devstack/0000755000175000017500000000000000000000000017003 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/README.rst0000644000175000017500000000404100000000000020471 0ustar00coreycorey00000000000000======================== Devstack external plugin ======================== Add and set the following in your local.conf/localrc file: enable_plugin vmware-nsx https://git.openstack.org/openstack/vmware-nsx For Nsx-mh: ----------- Q_PLUGIN=vmware_nsx PUBLIC_BRIDGE # bridge used for external connectivity, typically br-ex NSX_GATEWAY_NETWORK_INTERFACE # interface used to communicate with the NSX Gateway NSX_GATEWAY_NETWORK_CIDR # CIDR to configure $PUBLIC_BRIDGE, e.g. 172.24.4.211/24 For Nsx-v: ---------- Q_PLUGIN=vmware_nsx_v NSXV_MANAGER_URI # URL for NSXv manager (e.g - https://management_ip). NSXV_USER # NSXv username. NSXV_PASSWORD # NSXv password. NSXV_CLUSTER_MOID # clusters ids containing OpenStack hosts. NSXV_DATACENTER_MOID # datacenter id for edge deployment. NSXV_RESOURCE_POOL_ID # resource-pool id for edge deployment. NSXV_AVAILABILITY_ZONES # alternative resource-pools/data stores ids/edge_ha for edge deployment NSXV_DATASTORE_ID # datastore id for edge deployment. NSXV_EXTERNAL_NETWORK # id of logic switch for physical network connectivity. NSXV_VDN_SCOPE_ID # network scope id for VXLAN virtual-wires. NSXV_DVS_ID # Dvs id for VLAN based networks. NSXV_BACKUP_POOL # backup edge pools management range, # :[edge_size]::. # edge_type:'service'(service edge) or 'vdr'(distributed edge). # edge_size: 'compact', 'large'(by default), 'xlarge' or 'quadlarge'. # To enable the metadata service, the following variables should be also set: NSXV_MGT_NET_PROXY_IPS # management network IP address for metadata proxy. NSXV_MGT_NET_PROXY_NETMASK # management network netmask for metadata proxy. NSXV_NOVA_METADATA_IPS # IP addresses used by Nova metadata service. NSXV_NOVA_METADATA_PORT # TCP Port used by Nova metadata server. NSXV_MGT_NET_MOID # Network ID for management network connectivity ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1662529 vmware-nsx-15.0.1.dev143/devstack/lib/0000755000175000017500000000000000000000000017551 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/lib/nsx_common0000644000175000017500000001731400000000000021662 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Common VMware NSXv, NSXv3 and NSXp plugin # ----------------------------------- # ensure we don't re-source this in the same environment [[ -z "$_NSX_COMMON" ]] || return 0 declare -r -g _NSX_COMMON=1 function _nsxv_ini_set { if [[ $2 != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsxv $1 $2 fi } function install_neutron_projects { pkg_list="networking-l2gw networking-sfc neutron-fwaas neutron-dynamic-routing neutron-vpnaas vmware-nsxlib" for pkg in `echo $pkg_list` do pkg_renamed=`echo $pkg | sed 's/-/_/g'` sudo rm -rf /usr/local/lib/python2.7/dist-packages/${pkg_renamed}* sudo rm -rf /usr/local/lib/python3.5/dist-packages/${pkg_renamed}* sudo rm -rf ./src/${pkg_renamed}* if is_plugin_enabled $pkg; then echo "Plugin $pkg enabled explicitly with enable_plugin" elif use_library_from_git $pkg; then echo "Project $pkg enabled explicitly from LIBS_FROM_GIT" else sudo -H ${PIP} install -e "git+https://opendev.org/openstack/${pkg}@${NEUTRON_BRANCH}#egg=${pkg_renamed}" sudo chown -R ${USER}:${USER} src/${pkg} fi done # install neutron and octavia separately to not delete neutron-lib & octavia-lib sudo rm -rf /usr/local/lib/python2.7/dist-packages/neutron sudo rm -rf /usr/local/lib/python2.7/dist-packages/neutron.egg* sudo rm -rf /usr/local/lib/python3.5/dist-packages/neutron sudo rm -rf /usr/local/lib/python3.5/dist-packages/neutron.egg* if is_service_enabled neutron; then echo "service Neutron is enabled explicitly by devstack" else sudo -H ${PIP} install -e "git+https://opendev.org/openstack/neutron@${NEUTRON_BRANCH}#egg=neutron" sudo chown -R ${USER}:${USER} src/neutron fi sudo rm -rf /usr/local/lib/python2.7/dist-packages/octavia sudo rm -rf /usr/local/lib/python2.7/dist-packages/octavia.egg* sudo rm -rf /usr/local/lib/python3.5/dist-packages/octavia sudo rm -rf /usr/local/lib/python3.5/dist-packages/octavia.egg* if is_service_enabled octavia; then echo "service octavia is enabled explicitly by devstack" else sudo -H ${PIP} install -e "git+https://opendev.org/openstack/octavia@${NEUTRON_BRANCH}#egg=octavia" sudo chown -R ${USER}:${USER} src/octavia fi } function nsxv_configure_service { install_neutron_projects if [[ "$NSX_L2GW_DRIVER" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_l2gw_driver $NSX_L2GW_DRIVER fi _nsxv_ini_set password "$NSXV_PASSWORD" _nsxv_ini_set user "$NSXV_USER" _nsxv_ini_set vdn_scope_id "$NSXV_VDN_SCOPE_ID" _nsxv_ini_set dvs_id "$NSXV_DVS_ID" _nsxv_ini_set manager_uri "$NSXV_MANAGER_URI" _nsxv_ini_set ca_file "$NSXV_CA_FILE" _nsxv_ini_set insecure "$NSXV_INSECURE" _nsxv_ini_set datacenter_moid "$NSXV_DATACENTER_MOID" _nsxv_ini_set datastore_id "$NSXV_DATASTORE_ID" _nsxv_ini_set resource_pool_id "$NSXV_RESOURCE_POOL_ID" _nsxv_ini_set availability_zones "$NSXV_AVAILABILITY_ZONES" _nsxv_ini_set external_network "$NSXV_EXTERNAL_NETWORK" _nsxv_ini_set cluster_moid "$NSXV_CLUSTER_MOID" _nsxv_ini_set backup_edge_pool "$NSXV_BACKUP_POOL" _nsxv_ini_set mgt_net_proxy_ips "$NSXV_MGT_NET_PROXY_IPS" _nsxv_ini_set mgt_net_moid "$NSXV_MGT_NET_MOID" _nsxv_ini_set mgt_net_proxy_netmask "$NSXV_MGT_NET_PROXY_NETMASK" _nsxv_ini_set nova_metadata_port "$NSXV_NOVA_METADATA_PORT" _nsxv_ini_set nova_metadata_ips "$NSXV_NOVA_METADATA_IPS" _nsxv_ini_set metadata_shared_secret "$NSXV_METADATA_SHARED_SECRET" _nsxv_ini_set metadata_insecure "$NSXV_METADATA_INSECURE" _nsxv_ini_set metadata_nova_client_cert "$NSXV_METADATA_NOVA_CERT" _nsxv_ini_set metadata_nova_client_priv_key "$NSXV_METADATA_NOVA_PRIV_KEY" _nsxv_ini_set metadata_service_allowed_ports "$NSXV_METADATA_SERVICE_ALLOWED_PORTS" _nsxv_ini_set edge_ha "$NSXV_EDGE_HA" _nsxv_ini_set exclusive_router_appliance_size "$NSXV_EXCLUSIVE_ROUTER_APPLIANCE_SIZE" _nsxv_ini_set use_dvs_features "$NSXV_USE_DVS_FEATURES" _nsxv_ini_set use_nsx_policies "$NSXV_USE_NSX_POLICIES" _nsxv_ini_set default_policy_id "$NSXV_DEFAULT_POLICY_ID" _nsxv_ini_set allow_tenant_rules_with_policy "$NSXV_ALLOW_TENANT_RULES_WITH_POLICY" } function _dvs_ini_set { if [[ $2 != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE dvs $1 $2 fi } function dvs_configure_service { _dvs_ini_set host_ip $1 _dvs_ini_set host_username $2 _dvs_ini_set host_password $3 _dvs_ini_set ca_file $4 _dvs_ini_set insecure $5 _dvs_ini_set dvs_name $6 } function _nsxv3_ini_set { if [[ -z $1 || -z $2 ]]; then if [[ $3 != "" ]]; then die $LINENO $3 fi fi if [[ $2 != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx_v3 $1 $2 fi } function _nsxp_ini_set { if [[ -z $1 || -z $2 ]]; then if [[ $3 != "" ]]; then die $LINENO $3 fi fi if [[ $2 != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx_p $1 $2 fi } function nsxv3_configure_service { install_neutron_projects if [[ $1 == "nsx_v3" ]]; then _nsxv3_ini_set default_overlay_tz $DEFAULT_OVERLAY_TZ_UUID "The VMware NSX plugin won't work without a default transport zone." else _nsxv3_ini_set default_overlay_tz $DEFAULT_OVERLAY_TZ_UUID fi _nsxv3_ini_set default_vlan_tz $DEFAULT_VLAN_TZ_UUID if [[ "$DEFAULT_TIER0_ROUTER_UUID" != "" ]]; then _nsxv3_ini_set default_tier0_router $DEFAULT_TIER0_ROUTER_UUID Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True fi # NSX_MANAGER must be a comma separated string if [[ "$NSX_MANAGERS" != "" ]]; then _nsxv3_ini_set nsx_api_managers $NSX_MANAGERS elif [[ "$NSX_MANAGER" != "" ]]; then _nsxv3_ini_set nsx_api_managers $NSX_MANAGER else if [[ $1 == "nsx_v3" ]]; then die $LINENO "The VMware NSX plugin needs at least one NSX manager." fi fi if [[ "$NSX_L2GW_DRIVER" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_l2gw_driver $NSX_L2GW_DRIVER fi _nsxv3_ini_set ens_support $ENS_SUPPORT _nsxv3_ini_set nsx_api_user $NSX_USER _nsxv3_ini_set nsx_api_password $NSX_PASSWORD _nsxv3_ini_set retries $NSX_RETRIES _nsxv3_ini_set insecure $NSX_INSECURE _nsxv3_ini_set ca_file $NSX_CA_FILE _nsxv3_ini_set default_bridge_cluster $DEFAULT_BRIDGE_CLUSTER_UUID _nsxv3_ini_set native_dhcp_metadata $NATIVE_DHCP_METADATA if [[ "$NATIVE_DHCP_METADATA" == "True" ]]; then _nsxv3_ini_set native_metadata_route $NATIVE_METADATA_ROUTE _nsxv3_ini_set dhcp_profile $DHCP_PROFILE_UUID _nsxv3_ini_set metadata_proxy $METADATA_PROXY_UUID _nsxv3_ini_set dhcp_relay_service $DHCP_RELAY_SERVICE iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification False fi if [[ "$NSX_USE_CLIENT_CERT_AUTH" == "True" ]]; then _nsxv3_ini_set nsx_use_client_auth "True" _nsxv3_ini_set nsx_client_cert_file "$CLIENT_CERT_FILE" _nsxv3_ini_set nsx_client_cert_storage "nsx-db" _nsxv3_ini_set nsx_client_cert_pk_password "openstack" fi } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/lib/nsx_v3_p_common0000644000175000017500000002046700000000000022614 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2018 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Common code for VMware NSXv3 and NSXp plugins which share the same backend # -------------------------------------------------------------------------- # Settings previously defined in devstack:lib/neutron-legacy NEUTRON_CONF_DIR=/etc/neutron export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini # The interface which has connectivity to the NSX Gateway uplink NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-} # Override default 'True' in devstack:lib/neutron_plugins/services/l3 Q_USE_PROVIDERNET_FOR_PUBLIC=False # Native support from platform NATIVE_DHCP_METADATA=${NATIVE_DHCP_METADATA:-True} NATIVE_METADATA_ROUTE=${NATIVE_METADATA_ROUTE:-169.254.169.254/31} METADATA_PROXY_SHARED_SECRET=${METADATA_PROXY_SHARED_SECRET:-} # File to store client certificate and PK CLIENT_CERT_FILE=${DEST}/data/neutron/client.pem source $TOP_DIR/lib/neutron_plugins/ovs_base function is_neutron_ovs_base_plugin { # This allows the deployer to decide whether devstack should install OVS. # By default, we install OVS, to change this behavior add "OVS_BASE=1" to your localrc file. # Note: Any KVM compute must have OVS installed on it. return ${OVS_BASE:-0} } function neutron_plugin_create_nova_conf { if [[ "$VIRT_DRIVER" != 'vsphere' ]]; then # if n-cpu or octavia is enabled, then setup integration bridge if is_service_enabled n-cpu || is_service_enabled octavia ; then setup_integration_bridge if is_service_enabled n-cpu ; then iniset $NOVA_CONF neutron ovs_bridge $OVS_BRIDGE fi fi fi # if n-api is enabled, then setup the metadata_proxy_shared_secret if is_service_enabled n-api; then iniset $NOVA_CONF neutron service_metadata_proxy True if [[ "$NATIVE_DHCP_METADATA" == "True" ]]; then iniset $NOVA_CONF neutron metadata_proxy_shared_secret $METADATA_PROXY_SHARED_SECRET if [[ "$METADATA_PROXY_USE_HTTPS" == "True" ]]; then iniset $NOVA_CONF DEFAULT enabled_ssl_apis metadata if [[ "$METADATA_PROXY_CERT_FILE" != "" ]]; then iniset $NOVA_CONF wsgi ssl_cert_file $METADATA_PROXY_CERT_FILE fi if [[ "$METADATA_PROXY_PRIV_KEY_FILE" != "" ]]; then iniset $NOVA_CONF wsgi ssl_key_file $METADATA_PROXY_PRIV_KEY_FILE fi fi fi fi # if n-api-meta is enabled, then setup https on n-api-meta if is_service_enabled n-api-meta; then if [[ "$NATIVE_DHCP_METADATA" == "True" && "$METADATA_PROXY_USE_HTTPS" == "True" ]]; then inidelete $NOVA_METADATA_UWSGI_CONF uwsgi http https=":8775,$METADATA_PROXY_CERT_FILE,$METADATA_PROXY_PRIV_KEY_FILE" iniset $NOVA_METADATA_UWSGI_CONF uwsgi https $https fi fi } function neutron_plugin_configure_l3_agent { # VMware NSX plugin does not run L3 agent die $LINENO "q-l3 should not be executed with VMware NSX plugin!" } function neutron_plugin_configure_plugin_agent { # VMware NSX plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware NSX plugin!" } function get_bridge_up { # NOTE(armando-migliaccio): if running in a nested environment this will work # only with mac learning enabled, portsecurity and security profiles disabled # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off # Try to create it anyway sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE sudo ovs-vsctl --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE # Flush all existing addresses on public bridge sudo ip addr flush dev $PUBLIC_BRIDGE nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE for address in $addresses; do sudo ip addr add dev $PUBLIC_BRIDGE $address done sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR sudo ip link set $PUBLIC_BRIDGE up } function set_nsx_gateway_network_cidr { if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE # Save and then flush remaining addresses on the interface addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) sudo ip addr flush $PUBLIC_BRIDGE # Try to detach physical interface from PUBLIC_BRIDGE sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE for address in $addresses; do sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address done } function setup_integration_bridge_common { die_if_not_set $LINENO NSX_USER "NSX_USER has not been set!" die_if_not_set $LINENO NSX_PASSWORD "NSX_PASSWORD has not been set!" # Ensure that the OVS params are set for the OVS utils iniset $NEUTRON_CONF DEFAULT ovs_integration_bridge $OVS_BRIDGE iniset $NEUTRON_CONF OVS ovsdb_connection $(_ovsdb_connection) iniset $NEUTRON_CONF OVS ovsdb_interface vsctl _neutron_ovs_base_add_bridge $OVS_BRIDGE sudo ovs-vsctl set bridge $OVS_BRIDGE external_ids:bridge-id=nsx-managed sudo ovs-vsctl set-manager $(_ovsdb_connection) } function neutron_plugin_install_agent_packages { # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } function _version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; } function neutron_plugin_configure_common_v3 { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_PLUGIN_SRC_CONF_PATH=vmware-nsx/etc VMWARE_NSX_DIR=vmware-nsx # Uses oslo config generator to generate sample configuration file (cd $DEST/$VMWARE_NSX_DIR && exec ./tools/generate_config_file_samples.sh) mkdir -p /$Q_PLUGIN_CONF_PATH cp $DEST/$Q_PLUGIN_SRC_CONF_PATH/nsx.ini.sample /$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR/policy.d cp -vr $DEST/$Q_PLUGIN_SRC_CONF_PATH/policy.d/* $NEUTRON_CONF_DIR/policy.d/ Q_PLUGIN_CLASS=$1 } function neutron_plugin_configure_debug_command { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE } function neutron_plugin_configure_dhcp_agent { setup_integration_bridge iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_integration_bridge $OVS_BRIDGE iniset $Q_DHCP_CONF_FILE OVS ovsdb_connection $(_ovsdb_connection) iniset $Q_DHCP_CONF_FILE OVS ovsdb_interface vsctl } function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } function neutron_plugin_check_adv_test_requirements { is_service_enabled q-dhcp && return 0 } function _ovsdb_connection_common { NSX_POLICY_IP=$1 NSX_VER=$(curl -1 -s -k -u "$NSX_USER:$NSX_PASSWORD" -H 'Accept: application/json' https://$NSX_POLICY_IP/api/v1/node | python -c 'import sys, json; print json.load(sys.stdin)["node_version"][:5]') if [ $(_version $NSX_VER) -ge $(_version 1.1.0) ]; then echo "unix:/var/run/vmware/nsx-agent/nsxagent_ovsdb.sock" else echo "tcp:127.0.0.1:6632" fi } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/lib/vmware_dvs0000644000175000017500000000725700000000000021664 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron VMware DVS plugin # ------------------------- # Settings previously defined in devstack:lib/neutron-legacy NEUTRON_CONF_DIR=/etc/neutron export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini # Save trace setting DVS_XTRACE=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base source $dir/lib/nsx_common DVS_BRIDGE=${DVS_BRIDGE:-br-dvs} DVS_INTERFACE=${DVS_INTERFACE:-eth1} function setup_integration_bridge { # remove integration bridge created by Neutron for bridge in $(sudo ovs-vsctl list-br | grep -o -e $DVS_BRIDGE); do sudo ovs-vsctl del-br ${bridge} done _neutron_ovs_base_setup_bridge $DVS_BRIDGE sudo ovs-vsctl add-port $DVS_BRIDGE $DVS_INTERFACE } function is_neutron_ovs_base_plugin { # DVS uses OVS, but not the l3-agent return 0 } function neutron_plugin_create_nova_conf { # if n-cpu is enabled, then setup integration bridge if is_service_enabled n-cpu; then setup_integration_bridge fi } function neutron_plugin_install_agent_packages { # VMware DVS Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_PLUGIN_SRC_CONF_PATH=vmware-nsx/etc VMWARE_NSX_DIR=vmware-nsx # Uses oslo config generator to generate sample configuration file (cd $DEST/$VMWARE_NSX_DIR && exec ./tools/generate_config_file_samples.sh) mkdir -p /$Q_PLUGIN_CONF_PATH cp $DEST/$Q_PLUGIN_SRC_CONF_PATH/nsx.ini.sample /$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME Q_PLUGIN_CLASS="vmware_dvs" } function neutron_plugin_configure_debug_command { # TBD (garyk) : } function neutron_plugin_configure_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_integration_bridge $OVS_BRIDGE iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver "vmware_nsx.plugins.dvs.dhcp.Dnsmasq" } function neutron_plugin_configure_l3_agent { # VMware DVS plugin does not run L3 agent die $LINENO "q-l3 should not be executed with VMware DVS plugin!" } function neutron_plugin_configure_plugin_agent { # VMware DVS plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware DVS plugin!" } function neutron_plugin_configure_service { dvs_configure_service "$VMWAREAPI_IP" "$VMWAREAPI_USER" "$VMWAREAPI_PASSWORD" "$VMWAREAPI_CA_FILE" "$VMWAREAPI_INSECURE" "$VMWARE_DVS_NAME" iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_extension_drivers vmware_dvs_dns } function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } function neutron_plugin_check_adv_test_requirements { is_service_enabled q-dhcp && return 0 } # Restore xtrace $DVS_XTRACE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/lib/vmware_nsx0000644000175000017500000002106600000000000021672 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron VMware NSX plugin # ------------------------- # Settings previously defined in devstack:lib/neutron-legacy NEUTRON_CONF_DIR=/etc/neutron export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST} # Save trace setting NSX_XTRACE=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base function setup_integration_bridge { _neutron_ovs_base_setup_bridge $OVS_BRIDGE # Set manager to NSX controller (1st of list) if [[ "$NSX_CONTROLLERS" != "" ]]; then # Get the first controller controllers=(${NSX_CONTROLLERS//,/ }) OVS_MGR_IP=${controllers[0]} else die $LINENO "Error - No controller specified. Unable to set a manager for OVS" fi sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP } function is_neutron_ovs_base_plugin { # NSX uses OVS, but not the l3-agent return 0 } function neutron_plugin_create_nova_conf { # if n-cpu is enabled, then setup integration bridge if is_service_enabled n-cpu; then setup_integration_bridge fi } function neutron_plugin_install_agent_packages { # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages } function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_PLUGIN_SRC_CONF_PATH=vmware-nsx/etc VMWARE_NSX_DIR=vmware-nsx # Uses oslo config generator to generate sample configuration file (cd $DEST/$VMWARE_NSX_DIR && exec ./tools/generate_config_file_samples.sh) mkdir -p /$Q_PLUGIN_CONF_PATH cp $DEST/$Q_PLUGIN_SRC_CONF_PATH/nsx.ini.sample /$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME Q_PLUGIN_CLASS="vmware_nsx" } function neutron_plugin_configure_debug_command { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE } function neutron_plugin_configure_dhcp_agent { setup_integration_bridge iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_integration_bridge $OVS_BRIDGE } function neutron_plugin_configure_l3_agent { # VMware NSX plugin does not run L3 agent die $LINENO "q-l3 should not be executed with VMware NSX plugin!" } function neutron_plugin_configure_plugin_agent { # VMware NSX plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware NSX plugin!" } function neutron_plugin_configure_service { if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS fi if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS fi if [[ "$FAILOVER_TIME" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx failover_time $FAILOVER_TIME fi if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx concurrent_connections $CONCURRENT_CONNECTIONS fi if [[ "$DEFAULT_TZ_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID else die $LINENO "The VMware NSX plugin won't work without a default transport zone." fi if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True iniset /$Q_PLUGIN_CONF_FILE nsx metadata_mode access_network fi if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID fi # NSX_CONTROLLERS must be a comma separated string if [[ "$NSX_CONTROLLERS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_controllers $NSX_CONTROLLERS else die $LINENO "The VMware NSX plugin needs at least an NSX controller." fi if [[ "$NSX_USER" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_user $NSX_USER fi if [[ "$NSX_PASSWORD" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_password $NSX_PASSWORD fi if [[ "$NSX_HTTP_TIMEOUT" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NSX_HTTP_TIMEOUT fi if [[ "$NSX_RETRIES" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NSX_RETRIES fi if [[ "$NSX_REDIRECTS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NSX_REDIRECTS fi if [[ "$AGENT_MODE" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE nsx agent_mode $AGENT_MODE if [[ "$AGENT_MODE" == "agentless" ]]; then if [[ "$DEFAULT_SERVICE_CLUSTER_UUID" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_service_cluster_uuid $DEFAULT_SERVICE_CLUSTER_UUID else die $LINENO "Agentless mode requires a service cluster." fi iniset /$Q_PLUGIN_CONF_FILE nsx_metadata metadata_server_address $Q_META_DATA_IP fi fi } function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } function neutron_plugin_check_adv_test_requirements { is_service_enabled q-dhcp && return 0 } function init_vmware_nsx { if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi # Make sure the interface is up, but not configured sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE # Use the PUBLIC Bridge to route traffic to the NSX gateway # NOTE(armando-migliaccio): if running in a nested environment this will work # only with mac learning enabled, portsecurity and security profiles disabled # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off # Try to create it anyway sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE sudo ovs-vsctl --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE # Flush all existing addresses on public bridge sudo ip addr flush dev $PUBLIC_BRIDGE nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE for address in $addresses; do sudo ip addr add dev $PUBLIC_BRIDGE $address done sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR sudo ip link set $PUBLIC_BRIDGE up } function stop_vmware_nsx { if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE # Save and then flush remaining addresses on the interface addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) sudo ip addr flush $PUBLIC_BRIDGE # Try to detach physical interface from PUBLIC_BRIDGE sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE for address in $addresses; do sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address done } function check_vmware_nsx { neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini } # Restore xtrace $NSX_XTRACE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/lib/vmware_nsx_p0000644000175000017500000001034500000000000022207 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2018 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron VMware NSX Policy plugin # -------------------------------- # Save trace setting NSX_XTRACE=$(set +o | grep xtrace) set +o xtrace dir=${GITDIR['vmware-nsx']}/devstack source $dir/lib/nsx_common source $dir/lib/nsx_v3_p_common function _ovsdb_connection { managers=(${NSX_POLICY//,/ }) _ovsdb_connection_common ${managers[0]} } function setup_integration_bridge { die_if_not_set $LINENO NSX_POLICY "NSX_POLICY has not been set!" setup_integration_bridge_common } function neutron_plugin_configure_common { neutron_plugin_configure_common_v3 "vmware_nsxp" } function neutron_plugin_configure_service { nsxp_configure_service nsx_p iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_extension_drivers vmware_nsxp_dns } function nsxp_configure_service { install_neutron_projects _nsxp_ini_set default_overlay_tz $DEFAULT_OVERLAY_TZ_UUID _nsxp_ini_set default_vlan_tz $DEFAULT_VLAN_TZ_UUID if [[ "$DEFAULT_TIER0_ROUTER_UUID" != "" ]]; then _nsxp_ini_set default_tier0_router $DEFAULT_TIER0_ROUTER_UUID Q_L3_ENABLED=True Q_L3_ROUTER_PER_TENANT=True fi # NSX_POLICY must be a comma separated string if [[ "$NSX_POLICIES" != "" ]]; then _nsxp_ini_set nsx_api_managers $NSX_POLICIES elif [[ "$NSX_POLICY" != "" ]]; then _nsxp_ini_set nsx_api_managers $NSX_POLICY else if [[ $1 == "nsx_p" ]]; then die $LINENO "The VMware nsx-p plugin needs at least one NSX policy manager." fi fi if [[ "$NSX_L2GW_DRIVER" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_l2gw_driver $NSX_L2GW_DRIVER fi _nsxp_ini_set native_metadata_route $NATIVE_METADATA_ROUTE _nsxp_ini_set dhcp_profile $DHCP_PROFILE_UUID _nsxp_ini_set metadata_proxy $METADATA_PROXY_UUID iniset $NEUTRON_CONF DEFAULT dhcp_agent_notification False _nsxp_ini_set nsx_api_user $NSX_USER _nsxp_ini_set nsx_api_password $NSX_PASSWORD _nsxp_ini_set retries $NSX_RETRIES _nsxp_ini_set insecure $NSX_INSECURE _nsxp_ini_set ca_file $NSX_CA_FILE if [[ "$NSX_USE_CLIENT_CERT_AUTH" == "True" ]]; then _nsxp_ini_set nsx_use_client_auth "True" _nsxp_ini_set nsx_client_cert_file "$CLIENT_CERT_FILE" _nsxp_ini_set nsx_client_cert_storage "nsx-db" _nsxp_ini_set nsx_client_cert_pk_password "openstack" fi } function init_vmware_nsx_p { # Generate client certificate if [[ "$NSX_USE_CLIENT_CERT_AUTH" == "True" ]]; then nsxadmin -o generate -r certificate fi if ! is_set NSX_GATEWAY_NETWORK_INTERFACE; then echo "NSX_GATEWAY_NETWORK_INTERFACE not set not configuring routes" return fi if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to $NSX_GATEWAY_NETWORK_CIDR" fi # Make sure the interface is up, but not configured sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE # Use the PUBLIC Bridge to route traffic to the NSX gateway get_bridge_up } function stop_vmware_nsx_p { # Clean client certificate if exists nsxadmin -o clean -r certificate if ! is_set NSX_GATEWAY_NETWORK_INTERFACE; then echo "NSX_GATEWAY_NETWORK_INTERFACE was not configured." return fi set_nsx_gateway_network_cidr } # Restore xtrace $NSX_XTRACE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/lib/vmware_nsx_tvd0000644000175000017500000002452000000000000022545 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron VMware NSX plugin # ------------------------- # Settings previously defined in devstack:lib/neutron-legacy NEUTRON_CONF_DIR=/etc/neutron export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini # The interface which has connectivity to the NSX Gateway uplink NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-} # Override default 'True' in devstack:lib/neutron_plugins/services/l3 Q_USE_PROVIDERNET_FOR_PUBLIC=False # Native support from platform NATIVE_DHCP_METADATA=${NATIVE_DHCP_METADATA:-True} NATIVE_METADATA_ROUTE=${NATIVE_METADATA_ROUTE:-169.254.169.254/31} METADATA_PROXY_SHARED_SECRET=${METADATA_PROXY_SHARED_SECRET:-} # Save trace setting NSX_XTRACE=$(set +o | grep xtrace) set +o xtrace # File to store client certificate and PK CLIENT_CERT_FILE=${DEST}/data/neutron/client.pem source $TOP_DIR/lib/neutron_plugins/ovs_base dir=${GITDIR['vmware-nsx']}/devstack source $dir/lib/nsx_common function _version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; } function _ovsdb_connection { managers=(${NSX_MANAGER//,/ }) NSX_MGR_IP=${managers[0]} NSX_VER=$(curl -1 -s -k -u "$NSX_USER:$NSX_PASSWORD" -H 'Accept: application/json' https://$NSX_MGR_IP/api/v1/node | python -c 'import sys, json; print json.load(sys.stdin)["node_version"][:5]') if [ $(_version $NSX_VER) -ge $(_version 1.1.0) ]; then echo "unix:/var/run/vmware/nsx-agent/nsxagent_ovsdb.sock" else echo "tcp:127.0.0.1:6632" fi } function setup_integration_bridge { die_if_not_set $LINENO NSX_MANAGER "NSX_MANAGER has not been set!" die_if_not_set $LINENO NSX_USER "NSX_USER has not been set!" die_if_not_set $LINENO NSX_PASSWORD "NSX_PASSWORD has not been set!" # Ensure that the OVS params are set for the OVS utils iniset $NEUTRON_CONF DEFAULT ovs_integration_bridge $OVS_BRIDGE iniset $NEUTRON_CONF OVS ovsdb_connection $(_ovsdb_connection) iniset $NEUTRON_CONF OVS ovsdb_interface vsctl _neutron_ovs_base_setup_bridge $OVS_BRIDGE sudo ovs-vsctl set bridge $OVS_BRIDGE external_ids:bridge-id=nsx-managed sudo ovs-vsctl set-manager $(_ovsdb_connection) } function is_neutron_ovs_base_plugin { # This allows the deployer to decide whether devstack should install OVS. # By default, we install OVS, to change this behavior add "OVS_BASE=1" to your localrc file. # Note: Any KVM compute must have OVS installed on it. return ${OVS_BASE:-0} } function neutron_plugin_create_nova_conf { if [[ "$VIRT_DRIVER" != 'vsphere' ]]; then # if n-cpu or octavia is enabled, then setup integration bridge if is_service_enabled n-cpu || is_service_enabled octavia ; then setup_integration_bridge if is_service_enabled n-cpu ; then iniset $NOVA_CONF neutron ovs_bridge $OVS_BRIDGE fi fi fi # if n-api is enabled, then setup the metadata_proxy_shared_secret if is_service_enabled n-api; then iniset $NOVA_CONF neutron service_metadata_proxy True if [[ "$NATIVE_DHCP_METADATA" == "True" ]]; then iniset $NOVA_CONF neutron metadata_proxy_shared_secret $METADATA_PROXY_SHARED_SECRET if [[ "$METADATA_PROXY_USE_HTTPS" == "True" ]]; then iniset $NOVA_CONF DEFAULT enabled_ssl_apis metadata if [[ "$METADATA_PROXY_CERT_FILE" != "" ]]; then iniset $NOVA_CONF wsgi ssl_cert_file $METADATA_PROXY_CERT_FILE fi if [[ "$METADATA_PROXY_PRIV_KEY_FILE" != "" ]]; then iniset $NOVA_CONF wsgi ssl_key_file $METADATA_PROXY_PRIV_KEY_FILE fi fi fi fi } function neutron_plugin_install_agent_packages { # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents _neutron_ovs_base_install_agent_packages install_neutron_projects } function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_PLUGIN_SRC_CONF_PATH=vmware-nsx/etc VMWARE_NSX_DIR=vmware-nsx # Uses oslo config generator to generate sample configuration file (cd $DEST/$VMWARE_NSX_DIR && exec ./tools/generate_config_file_samples.sh) mkdir -p /$Q_PLUGIN_CONF_PATH cp $DEST/$Q_PLUGIN_SRC_CONF_PATH/nsx.ini.sample /$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR/policy.d cp -vr $DEST/$Q_PLUGIN_SRC_CONF_PATH/policy.d/* $NEUTRON_CONF_DIR/policy.d/ Q_PLUGIN_CLASS="vmware_nsxtvd" } function neutron_plugin_configure_debug_command { sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE } function neutron_plugin_configure_dhcp_agent { setup_integration_bridge iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True iniset $Q_DHCP_CONF_FILE DEFAULT ovs_integration_bridge $OVS_BRIDGE iniset $Q_DHCP_CONF_FILE OVS ovsdb_connection $(_ovsdb_connection) iniset $Q_DHCP_CONF_FILE OVS ovsdb_interface vsctl } function neutron_plugin_configure_l3_agent { # VMware NSX plugin does not run L3 agent die $LINENO "q-l3 should not be executed with VMware NSX plugin!" } function neutron_plugin_configure_plugin_agent { # VMware NSX plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware NSX plugin!" } function neutron_plugin_configure_service { nsxv3_configure_service nsx_tvd nsxv_configure_service dvs_configure_service "$DVS_VMWAREAPI_IP" "$DVS_VMWAREAPI_USER" "$DVS_VMWAREAPI_PASSWORD" "$DVS_VMWAREAPI_CA_FILE" "$DVS_VMWAREAPI_INSECURE" "$VMWARE_DVS_NAME" iniset /$Q_PLUGIN_CONF_FILE nsx_tvd nsx_v_extension_drivers vmware_nsxv_dns iniset /$Q_PLUGIN_CONF_FILE nsx_tvd nsx_v3_extension_drivers vmware_nsxv3_dns iniset /$Q_PLUGIN_CONF_FILE nsx_tvd dvs_extension_drivers vmware_dvs_dns iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_availability_zones $NSX_DEFAULT_AZ } function neutron_plugin_setup_interface_driver { local conf_file=$1 iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver } function neutron_plugin_check_adv_test_requirements { is_service_enabled q-dhcp && return 0 } function init_vmware_nsx_tvd { if (is_service_enabled q-svc || is_service_enabled neutron-api) && [[ "$NATIVE_DHCP_METADATA" == "True" ]]; then if ! is_set DHCP_PROFILE_UUID; then die $LINENO "DHCP profile needs to be configured!" fi if ! is_set METADATA_PROXY_UUID; then die $LINENO "Metadata proxy needs to be configured!" fi if is_service_enabled q-dhcp q-meta; then die $LINENO "Native support does not require DHCP and Metadata agents!" fi fi # Generate client certificate if [[ "$NSX_USE_CLIENT_CERT_AUTH" == "True" ]]; then nsxadmin --plugin nsxv3 -o generate -r certificate fi if ! is_set NSX_GATEWAY_NETWORK_INTERFACE; then echo "NSX_GATEWAY_NETWORK_INTERFACE not set not configuring routes" return fi if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to $NSX_GATEWAY_NETWORK_CIDR" fi # Make sure the interface is up, but not configured sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE # Use the PUBLIC Bridge to route traffic to the NSX gateway # NOTE(armando-migliaccio): if running in a nested environment this will work # only with mac learning enabled, portsecurity and security profiles disabled # The public bridge might not exist for the NSX plugin if Q_USE_DEBUG_COMMAND is off # Try to create it anyway sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE sudo ovs-vsctl --may-exist add-port $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_INTERFACE # Flush all existing addresses on public bridge sudo ip addr flush dev $PUBLIC_BRIDGE nsx_gw_net_if_mac=$(ip link show $NSX_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}') sudo ip link set address $nsx_gw_net_if_mac dev $PUBLIC_BRIDGE for address in $addresses; do sudo ip addr add dev $PUBLIC_BRIDGE $address done sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR sudo ip link set $PUBLIC_BRIDGE up } function stop_vmware_nsx_tvd { # Clean client certificate if exists nsxadmin --plugin nsxv3 -o clean -r certificate if ! is_set NSX_GATEWAY_NETWORK_INTERFACE; then echo "NSX_GATEWAY_NETWORK_INTERFACE was not configured." return fi if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address expected on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to "$NSX_GATEWAY_NETWORK_CIDR fi sudo ip addr del $NSX_GATEWAY_NETWORK_CIDR dev $PUBLIC_BRIDGE # Save and then flush remaining addresses on the interface addresses=$(ip addr show dev $PUBLIC_BRIDGE | grep inet | awk {'print $2'}) sudo ip addr flush $PUBLIC_BRIDGE # Try to detach physical interface from PUBLIC_BRIDGE sudo ovs-vsctl del-port $NSX_GATEWAY_NETWORK_INTERFACE # Restore addresses on NSX_GATEWAY_NETWORK_INTERFACE for address in $addresses; do sudo ip addr add dev $NSX_GATEWAY_NETWORK_INTERFACE $address done } # Restore xtrace $NSX_XTRACE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/lib/vmware_nsx_v0000644000175000017500000000575400000000000022225 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron VMware NSXv plugin # -------------------------- # Save trace setting NSXV_XTRACE=$(set +o | grep xtrace) set +o xtrace dir=${GITDIR['vmware-nsx']}/devstack source $dir/lib/nsx_common function setup_integration_bridge { : } function is_neutron_ovs_base_plugin { # NSXv does not use OVS return 1 } function neutron_plugin_create_nova_conf { if [[ -n $NSXV_NOVA_METADATA_IPS ]]; then iniset $NOVA_CONF neutron service_metadata_proxy "True" iniset $NOVA_CONF neutron metadata_proxy_shared_secret "$NSXV_METADATA_SHARED_SECRET" fi } function neutron_plugin_install_agent_packages { # NSXv does not require this : } function neutron_plugin_configure_common { Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware Q_PLUGIN_CONF_FILENAME=nsx.ini Q_PLUGIN_SRC_CONF_PATH=vmware-nsx/etc VMWARE_NSX_DIR=vmware-nsx # Uses oslo config generator to generate sample configuration file (cd $DEST/$VMWARE_NSX_DIR && exec ./tools/generate_config_file_samples.sh) mkdir -p /$Q_PLUGIN_CONF_PATH cp $DEST/$Q_PLUGIN_SRC_CONF_PATH/nsx.ini.sample /$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR/policy.d cp -vr $DEST/$Q_PLUGIN_SRC_CONF_PATH/policy.d/* $NEUTRON_CONF_DIR/policy.d/ Q_PLUGIN_CLASS="vmware_nsxv" } function neutron_plugin_configure_debug_command { : } function neutron_plugin_configure_dhcp_agent { # VMware NSXv plugin does not run L3 agent die $LINENO "q-dhcp should not be executed with VMware NSXv plugin!" } function neutron_plugin_configure_l3_agent { # VMware NSXv plugin does not run L3 agent die $LINENO "q-l3 should not be executed with VMware NSXv plugin!" } function neutron_plugin_configure_plugin_agent { # VMware NSXv plugin does not run L2 agent die $LINENO "q-agt must not be executed with VMware NSXv plugin!" } function neutron_plugin_configure_service { nsxv_configure_service iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_extension_drivers vmware_nsxv_dns if [[ "$NSXV_USE_DVS_FEATURES" != "" ]]; then dvs_configure_service "$VMWAREAPI_IP" "$VMWAREAPI_USER" "$VMWAREAPI_PASSWORD" "$VMWAREAPI_CA_FILE" "$VMWAREAPI_INSECURE" "$VMWARE_DVS_NAME" fi } function neutron_plugin_setup_interface_driver { : } function neutron_plugin_check_adv_test_requirements { return 0 } # Restore xtrace $NSXV_XTRACE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/lib/vmware_nsx_v30000644000175000017500000000617700000000000022310 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Neutron VMware NSX plugin # ------------------------- # Save trace setting NSX_XTRACE=$(set +o | grep xtrace) set +o xtrace dir=${GITDIR['vmware-nsx']}/devstack source $dir/lib/nsx_common source $dir/lib/nsx_v3_p_common function _ovsdb_connection { managers=(${NSX_MANAGER//,/ }) _ovsdb_connection_common ${managers[0]} } function setup_integration_bridge { die_if_not_set $LINENO NSX_MANAGER "NSX_MANAGER has not been set!" setup_integration_bridge_common } function neutron_plugin_configure_common { neutron_plugin_configure_common_v3 "vmware_nsxv3" } function neutron_plugin_configure_service { nsxv3_configure_service nsx_v3 iniset /$Q_PLUGIN_CONF_FILE DEFAULT nsx_extension_drivers vmware_nsxv3_dns } function init_vmware_nsx_v3 { if (is_service_enabled q-svc || is_service_enabled neutron-api) && [[ "$NATIVE_DHCP_METADATA" == "True" ]]; then if ! is_set DHCP_PROFILE_UUID; then die $LINENO "DHCP profile needs to be configured!" fi if ! is_set METADATA_PROXY_UUID; then die $LINENO "Metadata proxy needs to be configured!" fi if is_service_enabled q-dhcp q-meta; then die $LINENO "Native support does not require DHCP and Metadata agents!" fi fi # Generate client certificate if [[ "$NSX_USE_CLIENT_CERT_AUTH" == "True" ]]; then nsxadmin -o generate -r certificate fi if ! is_set NSX_GATEWAY_NETWORK_INTERFACE; then echo "NSX_GATEWAY_NETWORK_INTERFACE not set not configuring routes" return fi if ! is_set NSX_GATEWAY_NETWORK_CIDR; then NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/} echo "The IP address to set on $PUBLIC_BRIDGE was not specified. " echo "Defaulting to $NSX_GATEWAY_NETWORK_CIDR" fi # Make sure the interface is up, but not configured sudo ip link set $NSX_GATEWAY_NETWORK_INTERFACE up # Save and then flush the IP addresses on the interface addresses=$(ip addr show dev $NSX_GATEWAY_NETWORK_INTERFACE | grep inet | awk {'print $2'}) sudo ip addr flush $NSX_GATEWAY_NETWORK_INTERFACE # Use the PUBLIC Bridge to route traffic to the NSX gateway get_bridge_up } function stop_vmware_nsx_v3 { # Clean client certificate if exists nsxadmin -o clean -r certificate if ! is_set NSX_GATEWAY_NETWORK_INTERFACE; then echo "NSX_GATEWAY_NETWORK_INTERFACE was not configured." return fi set_nsx_gateway_network_cidr } # Restore xtrace $NSX_XTRACE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/localrc_nsx_p0000644000175000017500000000111200000000000021547 0ustar00coreycorey00000000000000enable_plugin vmware-nsx https://git.openstack.org/openstack/vmware-nsx ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,n-cauth,horizon,mysql,rabbit,sysstat,quantum,q-svc,q-dhcp,n-novnc,n-xvnc DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_TOKEN=password SERVICE_PASSWORD=password ADMIN_PASSWORD=password Q_PLUGIN=vmware_nsx_p NSX_PASSWORD=Admin!23Admin DEFAULT_OVERLAY_TZ_UUID= EDGE_CLUSTER_UUID= NSX_MANAGER= NSX_CONTROLLERS= DHCP_PROFILE_UUID= METADATA_PROXY_UUID= DHCP_RELAY_SERVICE= ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/localrc_nsx_v30000644000175000017500000000111300000000000021641 0ustar00coreycorey00000000000000enable_plugin vmware-nsx https://git.openstack.org/openstack/vmware-nsx ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-sch,n-cond,n-cauth,horizon,mysql,rabbit,sysstat,quantum,q-svc,q-dhcp,n-novnc,n-xvnc DATABASE_PASSWORD=password RABBIT_PASSWORD=password SERVICE_TOKEN=password SERVICE_PASSWORD=password ADMIN_PASSWORD=password Q_PLUGIN=vmware_nsx_v3 NSX_PASSWORD=Admin!23Admin DEFAULT_OVERLAY_TZ_UUID= EDGE_CLUSTER_UUID= NSX_MANAGER= NSX_CONTROLLERS= DHCP_PROFILE_UUID= METADATA_PROXY_UUID= DHCP_RELAY_SERVICE= ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1662529 vmware-nsx-15.0.1.dev143/devstack/nsx_p/0000755000175000017500000000000000000000000020132 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/nsx_p/devstackgaterc0000644000175000017500000000425600000000000023056 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file is sourced by the NSX-p CI to run selective set of tests # based on the features that are ready to be tested. # Begin list of exclusions. r="^(?!.*" r="$r(?:tempest\.api\.network\.test_extensions\.ExtensionsTestJSON.*)" r="$r|(?:tempest\.api\.network\.test_routers\.DvrRoutersTest.*)" r="$r|(?:tempest\.api\.network\.test_routers_negative\.DvrRoutersNegativeTest.*)" r="$r|(?:tempest\.api\.network\.test_allowed_address_pair\.AllowedAddressPairTestJSON\.test_update_port_with_cidr_address_pair.*)" #Can not create more than one DHCP-enabled subnet r="$r|(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_create_update_port_with_second_ip.*)" r="$r|(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_update_port_with_security_group_and_extra_attributes.*)" r="$r|(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_update_port_with_two_security_groups_and_extra_attributes.*)" r="$r|(?:tempest\.api\.network\.test_extra_dhcp_options\.ExtraDHCPOptionsTestJSON\.test_.*_with_extra_dhcp_options.*)" r="$r|(?:tempest\.api\.network\.test_floating_ips\.FloatingIPTestJSON\.test_create_update_floatingip_with_port_multiple_ip_address.*)" r="$r|(?:tempest\.api\.network\.admin\.test_external_network_extension\.ExternalNetworksTestJSON\.test_update_external_network.*)" # Some ICMP types are not supported by the NSX backend r="$r|(?:tempest\.api\.network\.test_security_groups\.SecGroupTest\.test_create_security_group_rule_with_icmp_type_code.*)" # End list of exclusions. r="$r)" # only run tempest.api.network tests r="$r(tempest\.api\.network).*$" export DEVSTACK_GATE_TEMPEST_REGEX="$r" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.170253 vmware-nsx-15.0.1.dev143/devstack/nsx_v/0000755000175000017500000000000000000000000020140 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/nsx_v/devstackgaterc0000644000175000017500000000261000000000000023054 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file is sourced by the NSX-v3 CI to run selective set of tests # based on the features that are ready to be tested. # Begin list of exclusions. r="^(?!.*" r="$r(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_create_update_port_with_second_ip.*)" r="$r|(?:tempest\.api\.network\.test_floating_ips\.FloatingIPTestJSON\.test_create_update_floatingip_with_port_multiple_ip_address.*)" r="$r|(?:tempest\.api\.network\.test_routers\.RoutersTest\.test_update_delete_extra_route.*)" # Skip allowed address pairs tests as ip collision with address pairs is not supported r="$r|(?:tempest\.api\.network\.test_allowed_address_pair\.AllowedAddressPairTestJSON.*)" # End list of exclusions. r="$r)" # only run tempest.api.network tests r="$r(tempest\.api\.network).*$" export DEVSTACK_GATE_TEMPEST_REGEX="$r" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/nsx_v/tvd_devstackgaterc0000644000175000017500000000361700000000000023741 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file is sourced by the NSX-TVD-V CI to run selective set of tests # based on the features that are ready to be tested. # Note that the TVD plugin enabled a lot of extensions that the NSX-V plugin does not support # so those tests should be skipped. # Begin list of exclusions. r="^(?!.*" # unsupported NSX-V tests r="$r(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_create_update_port_with_second_ip.*)" r="$r|(?:tempest\.api\.network\.test_floating_ips\.FloatingIPTestJSON\.test_create_update_floatingip_with_port_multiple_ip_address.*)" r="$r|(?:tempest\.api\.network\.test_routers\.RoutersTest\.test_update_delete_extra_route.*)" # unsupported TVD tests r="$r|(?:tempest\.api\.network\.test_networks\.NetworksTest\.test_show_network_fields.*)" r="$r|(?:tempest\.api\.network\.test_extra_dhcp_options\.ExtraDHCPOptionsTestJSON\.test_update.*)" r="$r|(?:tempest\.api\.network\.test_extensions\.ExtensionsTestJSON\..*)" r="$r|(?:tempest\.api\.network\.test_allowed_address_pair\.AllowedAddressPairTestJSON\.test_update.*)" r="$r|(?:tempest\.api\.network\.admin\.test_routers_dvr\.RoutersTestDVR\..*)" r="$r|(?:tempest\.api\.network\.admin.\test_metering_extensions.*)" # End list of exclusions. r="$r)" # only run tempest.api.network tests r="$r(tempest\.api\.network).*$" export DEVSTACK_GATE_TEMPEST_REGEX="$r" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.170253 vmware-nsx-15.0.1.dev143/devstack/nsx_v3/0000755000175000017500000000000000000000000020223 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/nsx_v3/controller_local.conf.sample0000644000175000017500000000563600000000000025721 0ustar00coreycorey00000000000000[[post-extra|$TEMPEST_CONFIG]] [nsxv3] nsx_manager= nsx_user= nsx_password= [[post-config|$NOVA_CONF]] [DEFAULT] image_handlers=vmware_copy, vmware_download force_config_drive = False [vmware] task_poll_interval=0.5 use_linked_clone=false insecure = true datastore_regex = vdnet* [[local|localrc]] DATABASE_PASSWORD=openstack ADMIN_PASSWORD=openstack SERVICE_PASSWORD=openstack SERVICE_TOKEN=openstack RABBIT_PASSWORD=openstack # Enable Logging LOGFILE=/opt/stack/logs/stack.sh.log VERBOSE=True LOG_COLOR=True RECLONE=True VIRT_DRIVER=vsphere CINDER_DRIVER=vsphere CINDER_ENABLED_BACKENDS=vsphere VMWAREAPI_IP= VMWAREAPI_USER= VMWAREAPI_PASSWORD= VMWAREAPI_CLUSTER= # Use IPv4 only IP_VERSION=4 # Pre-requisite ENABLED_SERVICES=rabbit,mysql,key # Horizon (Dashboard UI) ENABLED_SERVICES+=,horizon #HORIZON_REPO=https://github.com/openstack/horizon # Nova - Compute Service ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch # Nova Network - If you don't want to use Neutron and need a simple network setup (old good stuff!) #ENABLED_SERVICES+=,n-net ## Nova Cells #ENABLED_SERVICES+=,n-cell # VNC server ENABLED_SERVICES+=,n-novnc,n-xvnc,n-cauth # Glance - Image Service ENABLED_SERVICES+=,g-api,g-reg # Tempest ENABLED_SERVICES+=,tempest # Swift - Object Storage #ENABLED_SERVICES+=,s-proxy,s-object,s-container,s-account # Neutron - Networking Service # If Neutron is not declared the old good nova-network will be used # If use agent to provider DHCP and metadata #ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,neutron # If use native DHCP support from NSX, q-dhcp & q-meta shouldn't be enabled ENABLED_SERVICES+=,q-svc,neutron ## Neutron - Load Balancing #ENABLED_SERVICES+=,q-lbaas ## Neutron - VPN as a Service #ENABLED_SERVICES+=,q-vpn ## Neutron - Firewall as a Service #ENABLED_SERVICES+=,q-fwaas # Cinder - Block Device Service #ENABLED_SERVICES+=,cinder,c-api,c-vol,c-sch,c-bak # Apache fronted for WSGI APACHE_ENABLED_SERVICES+=keystone,swift # Enable NSX-T plugin stable/liberty branch enable_plugin vmware-nsx Q_PLUGIN=vmware_nsx_v3 # Defatult vlan transport zone for provider network DEFAULT_VLAN_TZ_UUID= # Defatult overlay transport zone fro tenant network DEFAULT_OVERLAY_TZ_UUID= NSX_MANAGER= OVS_BRIDGE=nsxvswitch NSX_USER= NSX_PASSWORD= # Default tier0 uuid which is created by admin DEFAULT_TIER0_ROUTER_UUID= # Enabled native DHCP support from NSX backend DHCP_PROFILE_UUID= DHCP_RELAY_SERVICE= METADATA_PROXY_UUID= METADATA_PROXY_SHARED_SECRET= METADATA_PROXY_USE_HTTPS=False METADATA_PROXY_CERT_FILE= METADATA_PROXY_PRIV_KEY_FILE= NATIVE_DHCP_METADATA=True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/nsx_v3/devstackgaterc0000644000175000017500000000425700000000000023150 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file is sourced by the NSX-v3 CI to run selective set of tests # based on the features that are ready to be tested. # Begin list of exclusions. r="^(?!.*" r="$r(?:tempest\.api\.network\.test_extensions\.ExtensionsTestJSON.*)" r="$r|(?:tempest\.api\.network\.test_routers\.DvrRoutersTest.*)" r="$r|(?:tempest\.api\.network\.test_routers_negative\.DvrRoutersNegativeTest.*)" r="$r|(?:tempest\.api\.network\.test_allowed_address_pair\.AllowedAddressPairTestJSON\.test_update_port_with_cidr_address_pair.*)" #Can not create more than one DHCP-enabled subnet r="$r|(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_create_update_port_with_second_ip.*)" r="$r|(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_update_port_with_security_group_and_extra_attributes.*)" r="$r|(?:tempest\.api\.network\.test_ports\.PortsTestJSON\.test_update_port_with_two_security_groups_and_extra_attributes.*)" r="$r|(?:tempest\.api\.network\.test_extra_dhcp_options\.ExtraDHCPOptionsTestJSON\.test_.*_with_extra_dhcp_options.*)" r="$r|(?:tempest\.api\.network\.test_floating_ips\.FloatingIPTestJSON\.test_create_update_floatingip_with_port_multiple_ip_address.*)" r="$r|(?:tempest\.api\.network\.admin\.test_external_network_extension\.ExternalNetworksTestJSON\.test_update_external_network.*)" # Some ICMP types are not supported by the NSX backend r="$r|(?:tempest\.api\.network\.test_security_groups\.SecGroupTest\.test_create_security_group_rule_with_icmp_type_code.*)" # End list of exclusions. r="$r)" # only run tempest.api.network tests r="$r(tempest\.api\.network).*$" export DEVSTACK_GATE_TEMPEST_REGEX="$r" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/nsx_v3/kvm_compute_local.conf.sample0000644000175000017500000000202100000000000026050 0ustar00coreycorey00000000000000[[post-config|$NOVA_CONF]] [neutron] ovs_bridge=nsxvswitch [[local|localrc]] =https://git.openstack.org/ enable_plugin vmware-nsx Q_PLUGIN=vmware_nsx_v3 ENABLED_SERVICES=n-cpu,neutron SERVICE_HOST= # OpenStack controller node IP MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST Q_HOST=$SERVICE_HOST DATABASE_PASSWORD=openstack RABBIT_PASSWORD=openstack SERVICE_TOKEN=openstack SERVICE_PASSWORD=openstack ADMIN_PASSWORD=openstack RECLONE=no OVS_BRIDGE=nsxvswitch IPV6_ENABLED=False IP_VERSION=4 HOST_IP= # OpenStack compute node IP MULTI_HOST=1 NOVA_VNC_ENABLED=True NOVNCPROXY_URL="" VNCSERVER_LISTEN=$HOST_IP VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN disable_service zookeeper #DEBUG=False #DEFAULT_VLAN_TZ_UUID=changeme # Optional, for VLAN provider networks # Enable Logging LOGFILE=/opt/stack/logs/stack.sh.log VERBOSE=True LOG_COLOR=True NSX_MANAGER= NSX_USER= NSX_PASSWORD= ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/override-defaults0000644000175000017500000000012400000000000022347 0ustar00coreycorey00000000000000function has_neutron_plugin_security_group { # 0 means True here return 0 } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/plugin.sh0000644000175000017500000001416500000000000020644 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. GITDIR['vmware-nsxlib']=$DEST/vmware-nsxlib GITREPO['vmware-nsxlib']=${NSXLIB_REPO:-${GIT_BASE}/openstack/vmware-nsxlib.git} GITBRANCH['vmware-nsxlib']=${NSXLIB_BRANCH:-master} PYTHON='python' PIP='pip' if [[ $USE_PYTHON3 == "True" ]]; then PYTHON='python3' PIP='pip3' fi dir=${GITDIR['vmware-nsx']}/devstack if [[ "$1" == "stack" && "$2" == "install" ]]; then if use_library_from_git 'vmware-nsxlib'; then git_clone_by_name 'vmware-nsxlib' setup_dev_lib 'vmware-nsxlib' fi setup_develop ${GITDIR['vmware-nsx']} fi if [[ $Q_PLUGIN == 'vmware_nsx_v' ]]; then source $dir/lib/vmware_nsx_v if [[ "$1" == "unstack" ]]; then db_connection=$(iniget $NEUTRON_CONF database connection) $PYTHON $dir/tools/nsxv_cleanup.py --vsm-ip ${NSXV_MANAGER_URI/https:\/\/} --user $NSXV_USER --password $NSXV_PASSWORD --db-connection $db_connection elif [[ "$1" == "clean" ]]; then if is_service_enabled q-svc || is_service_enabled neutron-api; then $PYTHON $dir/tools/nsxv_cleanup.py --vsm-ip ${NSXV_MANAGER_URI/https:\/\/} --user $NSXV_USER --password $NSXV_PASSWORD fi fi elif [[ $Q_PLUGIN == 'vmware_nsx' ]]; then source $dir/lib/vmware_nsx if [[ "$1" == "stack" && "$2" == "post-config" ]]; then init_vmware_nsx elif [[ "$1" == "stack" && "$2" == "extra" ]]; then check_vmware_nsx elif [[ "$1" == "unstack" ]]; then stop_vmware_nsx fi elif [[ $Q_PLUGIN == 'vmware_nsx_v3' ]]; then source $dir/lib/vmware_nsx_v3 if [[ "$1" == "stack" && "$2" == "post-config" ]]; then init_vmware_nsx_v3 elif [[ "$1" == "unstack" ]]; then db_connection=$(iniget $NEUTRON_CONF database connection) stop_vmware_nsx_v3 # only clean up when q-svc (legacy support) or neutron-api is enabled if is_service_enabled q-svc || is_service_enabled neutron-api; then NSX_MANAGER=${NSX_MANAGERS:-$NSX_MANAGER} IFS=',' NSX_MANAGER=($NSX_MANAGER) unset IFS $PYTHON $dir/tools/nsxv3_cleanup.py --mgr-ip $NSX_MANAGER --user $NSX_USER --password $NSX_PASSWORD --db-connection $db_connection fi elif [[ "$1" == 'clean' ]]; then if is_service_enabled q-svc || is_service_enabled neutron-api; then $PYTHON $dir/tools/nsxv3_cleanup.py --mgr-ip $NSX_MANAGER --user $NSX_USER --password $NSX_PASSWORD fi fi elif [[ $Q_PLUGIN == 'vmware_nsx_tvd' ]]; then source $dir/lib/vmware_nsx_tvd if [[ "$1" == "stack" && "$2" == "post-config" ]]; then init_vmware_nsx_tvd elif [[ "$1" == "unstack" ]]; then db_connection=$(iniget $NEUTRON_CONF database connection) stop_vmware_nsx_tvd # only clean up when q-svc (legacy support) or neutron-api is enabled if is_service_enabled q-svc || is_service_enabled neutron-api; then NSX_MANAGER=${NSX_MANAGERS:-$NSX_MANAGER} IFS=',' NSX_MANAGER=($NSX_MANAGER) unset IFS if [[ "$NSX_MANAGER" != "" ]]; then $PYTHON $dir/tools/nsxv3_cleanup.py --mgr-ip $NSX_MANAGER --user $NSX_USER --password $NSX_PASSWORD --db-connection $db_connection fi if [[ "$NSXV_MANAGER_URI" != "" ]]; then $PYTHON $dir/tools/nsxv_cleanup.py --vsm-ip ${NSXV_MANAGER_URI/https:\/\/} --user $NSXV_USER --password $NSXV_PASSWORD --db-connection $db_connection fi fi elif [[ "$1" == 'clean' ]]; then if is_service_enabled q-svc || is_service_enabled neutron-api; then if [[ "$NSX_MANAGER" != "" ]]; then $PYTHON $dir/tools/nsxv3_cleanup.py --mgr-ip $NSX_MANAGER --user $NSX_USER --password $NSX_PASSWORD fi if [[ "$NSXV_MANAGER_URI" != "" ]]; then $PYTHON $dir/tools/nsxv_cleanup.py --vsm-ip ${NSXV_MANAGER_URI/https:\/\/} --user $NSXV_USER --password $NSXV_PASSWORD fi fi fi elif [[ $Q_PLUGIN == 'vmware_dvs' ]]; then source $dir/lib/vmware_dvs elif [[ $Q_PLUGIN == 'vmware_nsx_p' ]]; then source $dir/lib/vmware_nsx_p if [[ "$1" == "stack" && "$2" == "post-config" ]]; then init_vmware_nsx_p elif [[ "$1" == "unstack" ]]; then db_connection=$(iniget $NEUTRON_CONF database connection) stop_vmware_nsx_p # only clean up when q-svc (legacy support) or neutron-api is enabled if is_service_enabled q-svc || is_service_enabled neutron-api; then NSX_POLICY=${NSX_POLICIES:-$NSX_POLICY} IFS=',' NSX_POLICY=($NSX_POLICY) unset IFS $PYTHON $dir/tools/nsxp_cleanup.py --policy-ip $NSX_POLICY --user $NSX_USER --password $NSX_PASSWORD --db-connection $db_connection fi elif [[ "$1" == 'clean' ]]; then if is_service_enabled q-svc || is_service_enabled neutron-api; then $PYTHON $dir/tools/nsxp_cleanup.py --policy-ip $NSX_POLICY --user $NSX_USER --password $NSX_PASSWORD fi fi fi if [[ "$1" == "stack" && ("$2" == "install" || "$2" == "post-config") ]]; then if is_service_enabled q-fwaas-v2; then # make sure ml2 config exists for FWaaS-v2 if [ ! -f "/etc/neutron/plugins/ml2/ml2_conf.ini" ]; then if [[ ! -f "/etc/neutron" ]]; then # Create /etc/neutron with the right ownership sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR fi mkdir -p /etc/neutron/plugins/ml2 touch /etc/neutron/plugins/ml2/ml2_conf.ini fi fi fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/settings0000644000175000017500000000172700000000000020575 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. NSX_XTRACE=$(set +o | grep xtrace) set +o xtrace if [[ $Q_PLUGIN == 'vmware_nsx' ]]; then NSX_GATEWAY_NETWORK_INTERFACE=${NSX_GATEWAY_NETWORK_INTERFACE:-eth2} # Re-declare floating range as it's needed also in stop_vmware_nsx, which # is invoked by unstack.sh FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} fi $NSX_XTRACE ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.170253 vmware-nsx-15.0.1.dev143/devstack/tools/0000755000175000017500000000000000000000000020143 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/tools/nsxp_cleanup.py0000755000175000017500000004610500000000000023225 0ustar00coreycorey00000000000000# Copyright 2018 VMware Inc # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import optparse import sqlalchemy as sa from neutron.db.models import l3 from neutron.db.models import securitygroup from neutron.db.models import segment # noqa from neutron.db import models_v2 from vmware_nsx.db import nsx_models from vmware_nsxlib import v3 from vmware_nsxlib.v3 import config from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import policy from vmware_nsxlib.v3.policy import constants as policy_constants class NeutronNsxDB(object): def __init__(self, db_connection): super(NeutronNsxDB, self).__init__() engine = sa.create_engine(db_connection) self.session = sa.orm.session.sessionmaker()(bind=engine) def query_all(self, column, model): return list(set([r[column] for r in self.session.query(model).all()])) def get_security_groups(self): return self.query_all('id', securitygroup.SecurityGroup) def get_security_groups_rules(self): return self.query_all('id', securitygroup.SecurityGroupRule) def get_routers(self): return self.query_all('id', l3.Router) def get_networks(self): return self.query_all('id', models_v2.Network) def get_ports(self): return self.query_all('id', models_v2.Port) def get_logical_dhcp_servers(self): """The policy plugin still has mapping for the dhcp servers because it uses the passthrough api """ return self.query_all('nsx_service_id', nsx_models.NeutronNsxServiceBinding) def get_logical_ports(self): return self.query_all('nsx_port_id', nsx_models.NeutronNsxPortMapping) class NSXClient(object): """Base NSX REST client""" API_VERSION = "v1" NULL_CURSOR_PREFIX = '0000' def __init__(self, host, username, password, db_connection, allow_passthrough=True): self.host = host self.username = username self.password = password self.allow_passthrough = allow_passthrough self.neutron_db = (NeutronNsxDB(db_connection) if db_connection else None) nsxlib_config = config.NsxLibConfig( username=self.username, password=self.password, nsx_api_managers=[self.host], allow_passthrough=allow_passthrough, # allow admin user to delete entities created # under openstack principal identity allow_overwrite_header=True) self.nsxpolicy = policy.NsxPolicyLib(nsxlib_config) if allow_passthrough: self.nsxlib = v3.NsxLib(nsxlib_config) else: self.NsxLib = None def get_os_resources(self, resources): """ Get all logical resources created by OpenStack """ os_resources = [r for r in resources if 'tags' in r for tag in r['tags'] if 'os-api-version' in tag.values()] return os_resources def get_os_nsx_groups_and_maps(self, domain_id): """ Retrieve all NSX policy groups & maps created from OpenStack (by tags) If the DB is available - use only objects in the neutron DB """ groups = self.get_os_resources(self.nsxpolicy.group.list(domain_id)) maps = self.get_os_resources(self.nsxpolicy.comm_map.list(domain_id)) if self.neutron_db: db_sgs = self.neutron_db.get_security_groups() filtered_groups = [g for g in groups if g['id'] in db_sgs] maps = [m for m in maps if m['id'] in db_sgs] # Add groups based on SG rules local/remote ips db_rules = self.neutron_db.get_security_groups_rules() filtered_groups.extend([g for g in groups if g['id'][:36] in db_rules]) groups = filtered_groups return groups, maps def cleanup_security_groups(self, domain_id): """Delete all OS created NSX Policy security group resources""" groups, maps = self.get_os_nsx_groups_and_maps(domain_id) print("Number of OS Communication maps of domain %s to be deleted: " "%s" % (domain_id, len(maps))) for m in maps: self.nsxpolicy.comm_map.delete(domain_id, m['id']) print("Number of OS Groups of domain %s to be deleted: " "%s" % (domain_id, len(groups))) for grp in groups: try: self.nsxpolicy.group.delete(domain_id, grp['id']) except exceptions.ManagerError as e: print("Failed to delete group %s: %s" % (grp['id'], e)) def get_os_nsx_tier1_routers(self): """ Retrieve all NSX policy routers created from OpenStack (by tags) If the DB is available - use only objects in the neutron DB """ routers = self.get_os_resources(self.nsxpolicy.tier1.list()) if routers and self.neutron_db: db_routers = self.neutron_db.get_routers() routers = [r for r in routers if r['id'] in db_routers] return routers def cleanup_tier1_nat_rules(self, tier1_uuid): rules = self.nsxpolicy.tier1_nat_rule.list(tier1_uuid) for rule in rules: try: self.nsxpolicy.tier1_nat_rule.delete(tier1_uuid, rule['id']) except exceptions.ManagerError as e: print("Failed to delete nat rule %s: %s" % (rule['id'], e)) def cleanup_tier1_routers(self): """Delete all OS created NSX Policy routers""" routers = self.get_os_nsx_tier1_routers() print("Number of OS Tier1 routers to be deleted: %s" % len(routers)) for rtr in routers: # remove all nat rules from this router before deletion self.cleanup_tier1_nat_rules(rtr['id']) try: self.nsxpolicy.tier1.delete_locale_service(rtr['id']) except exceptions.ManagerError: # Not always exists pass try: self.nsxpolicy.tier1.delete(rtr['id']) except exceptions.ManagerError as e: print("Failed to delete tier1 %s: %s" % (rtr['id'], e)) def get_os_nsx_segments(self): """ Retrieve all NSX policy segments created from OpenStack (by tags) If the DB is available - use only objects in the neutron DB """ segments = self.get_os_resources(self.nsxpolicy.segment.list()) if segments and self.neutron_db: db_networks = self.neutron_db.get_networks() segments = [s for s in segments if s['id'] in db_networks] return segments def delete_network_nsx_dhcp_port(self, network_id): # Delete dhcp port when using MP dhcp if not self.nsxlib: # no passthrough api return port_id = self.nsxlib.get_id_by_resource_and_tag( self.nsxlib.logical_port.resource_type, 'os-neutron-net-id', network_id) if port_id: self.nsxlib.logical_port.delete(port_id) def cleanup_segments_interfaces(self): segments = self.get_os_nsx_segments() routers = self.get_os_nsx_tier1_routers() print("Cleaning interfaces of %s segments and %s tier-1s" % ( len(segments), len(routers))) for s in segments: # Disassociate overlay interfaces from tier1 routers self.nsxpolicy.segment.remove_connectivity_and_subnets(s['id']) for rtr in routers: # Disassociate VLAN interfaces from tier1 routers interfaces = self.nsxpolicy.tier1.list_segment_interface( rtr['id']) for intf in interfaces: self.nsxpolicy.tier1.remove_segment_interface( rtr['id'], intf['id']) def cleanup_segments(self): """Delete all OS created NSX Policy segments & ports""" segments = self.get_os_nsx_segments() print("Number of OS segments to be deleted: %s" % len(segments)) for s in segments: # Delete all the ports self.cleanup_segment_ports(s['id']) # Delete the nsx mdproxy port self.delete_network_nsx_dhcp_port(s['id']) try: # Delete the segment self.nsxpolicy.segment.delete(s['id']) except exceptions.ManagerError as e: print("Failed to delete segment %s: %s" % (s['id'], e)) def get_os_nsx_segment_ports(self, segment_id): """ Retrieve all NSX policy segment ports created from OpenStack (by tags) If the DB is available - use only objects in the neutron DB """ segment_ports = self.get_os_resources( self.nsxpolicy.segment_port.list(segment_id)) if segment_ports and self.neutron_db: db_ports = self.neutron_db.get_ports() segment_ports = [s for s in segment_ports if s['id'] in db_ports] return segment_ports def cleanup_segment_ports(self, segment_id): """Delete all OS created NSX Policy segments ports per segment""" segment_ports = self.get_os_nsx_segment_ports(segment_id) for p in segment_ports: try: self.nsxpolicy.segment_port_security_profiles.delete( segment_id, p['id']) except Exception: pass try: self.nsxpolicy.segment_port_discovery_profiles.delete( segment_id, p['id']) except Exception: pass try: self.nsxpolicy.segment_port_qos_profiles.delete( segment_id, p['id']) except Exception: pass try: self.nsxpolicy.segment_port.delete(segment_id, p['id']) except exceptions.ManagerError as e: print("Failed to delete segment port %s: %s" % (p['id'], e)) def get_logical_dhcp_servers(self): """ Retrieve all logical DHCP servers on NSX backend The policy plugin still uses nsxlib for this because it uses the passthrough api. """ return self.nsxlib.dhcp_server.list()['results'] def get_logical_ports(self): """ Retrieve all logical ports on NSX backend """ return self.nsxlib.logical_port.list()['results'] def get_os_dhcp_logical_ports(self): """ Retrieve all DHCP logical ports created from OpenStack """ # Get all NSX openstack ports, and filter the DHCP ones lports = self.get_os_resources( self.get_logical_ports()) lports = [lp for lp in lports if lp.get('attachment') and lp['attachment'].get( 'attachment_type') == nsx_constants.ATTACHMENT_DHCP] if self.neutron_db: db_lports = self.neutron_db.get_logical_ports() lports = [lp for lp in lports if lp['id'] in db_lports] return lports def cleanup_os_dhcp_logical_ports(self): """Delete all DHCP logical ports created by OpenStack DHCP ports are the only ones the policy plugin creates directly on the NSX """ os_lports = self.get_os_dhcp_logical_ports() print("Number of OS Logical Ports to be deleted: %s" % len(os_lports)) for p in os_lports: try: self.nsxlib.logical_port.update( p['id'], None, attachment_type=None) self.nsxlib.logical_port.delete(p['id']) except Exception as e: print("ERROR: Failed to delete logical port %s, error %s" % (p['id'], e)) else: print("Successfully deleted logical port %s" % p['id']) def get_os_logical_dhcp_servers(self): """ Retrieve all logical DHCP servers created from OpenStack """ dhcp_servers = self.get_os_resources( self.get_logical_dhcp_servers()) if self.neutron_db: db_dhcp_servers = self.neutron_db.get_logical_dhcp_servers() dhcp_servers = [srv for srv in dhcp_servers if srv['id'] in db_dhcp_servers] return dhcp_servers def cleanup_nsx_logical_dhcp_servers(self): """ Cleanup all logical DHCP servers created from OpenStack plugin The policy plugin still uses nsxlib for this because it uses the passthrough api. """ if not self.nsxlib: # No passthrough api return # First delete the DHCP ports (from the NSX) self.cleanup_os_dhcp_logical_ports() dhcp_servers = self.get_os_logical_dhcp_servers() print("Number of OS Logical DHCP Servers to be deleted: %s" % len(dhcp_servers)) for server in dhcp_servers: try: self.nsxlib.dhcp_server.delete(server['id']) except Exception as e: print("ERROR: Failed to delete logical DHCP server %s, " "error %s" % (server['display_name'], e)) else: print("Successfully deleted logical DHCP server %s" % server['display_name']) def get_os_nsx_services(self): """ Retrieve all NSX policy services created from OpenStack SG rules (by tags) If the DB is available - use only objects in the neutron DB """ services = self.get_os_resources(self.nsxpolicy.service.list()) if services and self.neutron_db: db_rules = self.neutron_db.get_security_groups_rules() services = [s for s in services if s['id'] in db_rules] return services def cleanup_rules_services(self): """Delete all OS created NSX services""" services = self.get_os_nsx_services() print("Number of OS rule services to be deleted: %s" % len(services)) for srv in services: try: self.nsxpolicy.service.delete(srv['id']) except exceptions.ManagerError as e: print("Failed to delete rule service %s: %s" % (srv['id'], e)) def _cleanup_lb_resource(self, service, service_name): r_list = self.get_os_resources(service.list()) print("Number of %s to be deleted: %d" % (service_name, len(r_list))) for r in r_list: try: service.delete( r['id']) except Exception as e: print("ERROR: Failed to delete %s %s, error %s" % (r['resource_type'], r['id'], e)) def cleanup_lb_virtual_servers(self): self._cleanup_lb_resource(self.nsxpolicy.load_balancer.virtual_server, 'LB virtual servers') def cleanup_lb_server_pools(self): self._cleanup_lb_resource(self.nsxpolicy.load_balancer.lb_pool, 'LB pools') def cleanup_lb_profiles(self): lb_svc = self.nsxpolicy.load_balancer self._cleanup_lb_resource(lb_svc.lb_http_profile, 'LB HTTP app profiles') self._cleanup_lb_resource(lb_svc.lb_fast_tcp_profile, 'LB HTTPS app profiles') self._cleanup_lb_resource(lb_svc.lb_fast_udp_profile, 'LB UDP app profiles') self._cleanup_lb_resource(lb_svc.client_ssl_profile, 'LB SSL client profiles') self._cleanup_lb_resource(lb_svc.lb_cookie_persistence_profile, 'LB cookie persistence profiles') self._cleanup_lb_resource(lb_svc.lb_source_ip_persistence_profile, 'LB source IP persistence profiles') def cleanup_lb_monitors(self): lb_svc = self.nsxpolicy.load_balancer self._cleanup_lb_resource(lb_svc.lb_monitor_profile_http, 'LB HTTP monitor profiles') self._cleanup_lb_resource(lb_svc.lb_monitor_profile_https, 'LB HTTPS monitor profiles') self._cleanup_lb_resource(lb_svc.lb_monitor_profile_udp, 'LB UDP monitor profiles') self._cleanup_lb_resource(lb_svc.lb_monitor_profile_icmp, 'LB ICMP monitor profiles') self._cleanup_lb_resource(lb_svc.lb_monitor_profile_tcp, 'LB TCP monitor profiles') def cleanup_lb_services(self): self._cleanup_lb_resource(self.nsxpolicy.load_balancer.lb_service, 'LB services') def cleanup_load_balancers(self): self.cleanup_lb_virtual_servers() self.cleanup_lb_profiles() self.cleanup_lb_services() self.cleanup_lb_server_pools() self.cleanup_lb_monitors() def cleanup_all(self): """ Per domain cleanup steps: - Security groups resources Global cleanup steps: - Tier1 routers - Segments and ports - rules and services """ print("Cleaning up openstack resources") self.cleanup_security_groups(policy_constants.DEFAULT_DOMAIN) self.cleanup_segments_interfaces() self.cleanup_segments() self.cleanup_load_balancers() self.cleanup_nsx_logical_dhcp_servers() self.cleanup_tier1_routers() self.cleanup_rules_services() if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("--policy-ip", dest="policy_ip", help="NSX Policy IP " "address") parser.add_option("-u", "--username", default="admin", dest="username", help="NSX Policy username") parser.add_option("-p", "--password", default="default", dest="password", help="NSX Policy password") parser.add_option("--db-connection", default="", dest="db_connection", help=("When set, cleaning only backend resources that " "have db record.")) parser.add_option("--allow-passthrough", default="true", dest="allow_passthrough", help=("When True, passthrough api will be used to " "cleanup some NSX objects.")) (options, args) = parser.parse_args() # Get NSX REST client nsx_client = NSXClient(options.policy_ip, options.username, options.password, options.db_connection, options.allow_passthrough) # Clean all objects created by OpenStack nsx_client.cleanup_all() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/tools/nsxv3_cleanup.py0000755000175000017500000005754400000000000023327 0ustar00coreycorey00000000000000# Copyright 2015 VMware Inc # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import optparse import sqlalchemy as sa from vmware_nsx.db import nsx_models from vmware_nsxlib import v3 from vmware_nsxlib.v3 import config from vmware_nsxlib.v3 import nsx_constants class NeutronNsxDB(object): def __init__(self, db_connection): super(NeutronNsxDB, self).__init__() engine = sa.create_engine(db_connection) self.session = sa.orm.session.sessionmaker()(bind=engine) def query_all(self, column, model): return list(set([r[column] for r in self.session.query(model).all()])) def get_logical_ports(self): return self.query_all('nsx_port_id', nsx_models.NeutronNsxPortMapping) def get_nsgroups(self): return self.query_all('nsx_id', nsx_models.NeutronNsxSecurityGroupMapping) def get_firewall_sections(self): return self.query_all('nsx_id', nsx_models.NeutronNsxFirewallSectionMapping) def get_logical_routers(self): return self.query_all('nsx_id', nsx_models.NeutronNsxRouterMapping) def get_logical_switches(self): return self.query_all('nsx_id', nsx_models.NeutronNsxNetworkMapping) def get_logical_dhcp_servers(self): return self.query_all('nsx_service_id', nsx_models.NeutronNsxServiceBinding) def get_vpn_objects(self, column_name): return self.query_all(column_name, nsx_models.NsxVpnConnectionMapping) def get_db_objects_by_table_and_column(self, db_table, db_column): return self.query_all(db_column, db_table) class NSXClient(object): """Base NSX REST client""" API_VERSION = "v1" NULL_CURSOR_PREFIX = '0000' def __init__(self, host, username, password, db_connection): self.host = host self.username = username self.password = password self.neutron_db = (NeutronNsxDB(db_connection) if db_connection else None) nsxlib_config = config.NsxLibConfig( username=self.username, password=self.password, nsx_api_managers=[self.host], # allow admin user to delete entities created # under openstack principal identity allow_overwrite_header=True) self.nsxlib = v3.NsxLib(nsxlib_config) def get_transport_zones(self): """ Retrieve all transport zones """ return self.nsxlib.transport_zone.list()['results'] def get_logical_ports(self): """ Retrieve all logical ports on NSX backend """ return self.nsxlib.logical_port.list()['results'] def get_os_logical_ports(self): """ Retrieve all logical ports created from OpenStack """ lports = self.get_os_resources( self.get_logical_ports()) if self.neutron_db: db_lports = self.neutron_db.get_logical_ports() lports = [lp for lp in lports if lp['id'] in db_lports] return lports def update_logical_port_attachment(self, lports): """ In order to delete logical ports, we need to detach the VIF attachment on the ports first. """ for p in lports: try: self.nsxlib.logical_port.update( p['id'], None, attachment_type=None) except Exception as e: print("ERROR: Failed to update lport %s: %s" % p['id'], e) def _remove_port_from_exclude_list(self, p): try: self.nsxlib.firewall_section.remove_member_from_fw_exclude_list( p['id'], None) except Exception: pass def _cleanup_logical_ports(self, lports): # logical port vif detachment self.update_logical_port_attachment(lports) for p in lports: # delete this port from the exclude list (if in it) self._remove_port_from_exclude_list(p) try: self.nsxlib.logical_port.delete(p['id']) except Exception as e: print("ERROR: Failed to delete logical port %s, error %s" % (p['id'], e)) else: print("Successfully deleted logical port %s" % p['id']) def cleanup_os_logical_ports(self): """ Delete all logical ports created by OpenStack """ os_lports = self.get_os_logical_ports() print("Number of OS Logical Ports to be deleted: %s" % len(os_lports)) self._cleanup_logical_ports(os_lports) def get_os_resources(self, resources): """ Get all logical resources created by OpenStack """ os_resources = [r for r in resources if 'tags' in r for tag in r['tags'] if 'os-api-version' in tag.values()] return os_resources def get_logical_switches(self): """ Retrieve all logical switches on NSX backend """ return self.nsxlib.logical_switch.list()['results'] def get_os_logical_switches(self): """ Retrieve all logical switches created from OpenStack """ lswitches = self.get_os_resources( self.get_logical_switches()) if self.neutron_db: db_lswitches = self.neutron_db.get_logical_switches() lswitches = [ls for ls in lswitches if ls['id'] in db_lswitches] return lswitches def get_lswitch_ports(self, ls_id): """ Return all the logical ports that belong to this lswitch """ lports = self.get_logical_ports() return [p for p in lports if p['logical_switch_id'] == ls_id] def cleanup_os_logical_switches(self): """ Delete all logical switches created from OpenStack """ lswitches = self.get_os_logical_switches() print("Number of OS Logical Switches to be deleted: %s" % len(lswitches)) for ls in lswitches: # Check if there are still ports on switch and blow them away # An example here is a metadata proxy port (this is not stored # in the DB so we are unable to delete it when reading ports # from the DB) lports = self.get_lswitch_ports(ls['id']) if lports: print("Number of orphan OS Logical Ports to be " "deleted: %s" % len(lports)) self._cleanup_logical_ports(lports) try: self.nsxlib.logical_switch.delete(ls['id']) except Exception as e: print("ERROR: Failed to delete logical switch %s-%s, " "error %s" % (ls['display_name'], ls['id'], e)) else: print("Successfully deleted logical switch %s-%s" % (ls['display_name'], ls['id'])) def get_firewall_sections(self): """ Retrieve all firewall sections """ return self.nsxlib.firewall_section.list() def get_os_firewall_sections(self): """ Retrieve all firewall sections created from OpenStack """ fw_sections = self.get_os_resources( self.get_firewall_sections()) if self.neutron_db: db_sections = self.neutron_db.get_firewall_sections() fw_sections = [fws for fws in fw_sections if fws['id'] in db_sections] return fw_sections def cleanup_os_firewall_sections(self): """ Cleanup all firewall sections created from OpenStack """ fw_sections = self.get_os_firewall_sections() print("Number of OS Firewall Sections to be deleted: %s" % len(fw_sections)) for fw in fw_sections: try: self.nsxlib.firewall_section.delete(fw['id']) except Exception as e: print("Failed to delete firewall section %s: %s" % (fw['display_name'], e)) else: print("Successfully deleted firewall section %s" % fw['display_name']) def get_ns_groups(self): """ Retrieve all NSGroups on NSX backend """ backend_groups = self.nsxlib.ns_group.list() ns_groups = self.get_os_resources(backend_groups) if self.neutron_db: db_nsgroups = self.neutron_db.get_nsgroups() ns_groups = [nsg for nsg in ns_groups if nsg['id'] in db_nsgroups] return ns_groups def cleanup_os_ns_groups(self): """ Cleanup all NSGroups created from OpenStack plugin """ ns_groups = self.get_ns_groups() print("Number of OS NSGroups to be deleted: %s" % len(ns_groups)) for nsg in ns_groups: try: self.nsxlib.ns_group.delete(nsg['id']) except Exception as e: print("Failed to delete NSGroup: %s: %s" % (nsg['display_name'], e)) else: print("Successfully deleted NSGroup: %s" % nsg['display_name']) def get_switching_profiles(self): """ Retrieve all Switching Profiles on NSX backend """ return self.nsxlib.switching_profile.list()['results'] def get_os_switching_profiles(self): """ Retrieve all Switching Profiles created from OpenStack """ sw_profiles = self.get_os_resources( self.get_switching_profiles()) if self.neutron_db: sw_profiles = [] return sw_profiles def cleanup_os_switching_profiles(self): """ Cleanup all Switching Profiles created from OpenStack plugin """ sw_profiles = self.get_os_switching_profiles() print("Number of OS SwitchingProfiles to be deleted: %s" % len(sw_profiles)) for swp in sw_profiles: try: self.nsxlib.switching_profile.delete(swp['id']) except Exception as e: print("Failed to delete Switching Profile: %s: %s" % (swp['display_name'], e)) else: print("Successfully deleted Switching Profile: %s" % swp['display_name']) def get_logical_routers(self, tier=None): """ Retrieve all the logical routers based on router type. If tier is None, it will return all logical routers. """ return self.nsxlib.logical_router.list( router_type=tier)['results'] def get_tier1_logical_routers(self): """ Retrieve all tier1 logical routers, and return only neutron ones. """ lrouters = self.get_logical_routers(tier='TIER1') if self.neutron_db: db_routers = self.neutron_db.get_logical_routers() lrouters = [lr for lr in lrouters if lr['id'] in db_routers] return lrouters def get_tier0_logical_routers(self): """ Retrieve all tier0 logical routers. """ return self.get_logical_routers(tier='TIER0') def get_os_logical_routers(self): """ Retrieve all logical routers created from Neutron NSXv3 plugin """ lrouters = self.get_tier1_logical_routers() return self.get_os_resources(lrouters) def get_logical_router_ports(self, lrouter): """ Get all logical ports attached to lrouter """ return self.nsxlib.logical_router_port.get_by_router_id(lrouter['id']) def get_os_logical_router_ports(self, lrouter): """ Retrieve all logical router ports created from Neutron NSXv3 plugin """ lports = self.get_logical_router_ports(lrouter) return self.get_os_resources(lports) def cleanup_logical_router_ports(self, lrouter): """ Cleanup all logical ports on a logical router """ lports = self.get_os_logical_router_ports(lrouter) for lp in lports: try: self.nsxlib.logical_router_port.delete(lp['id']) except Exception as e: print("Failed to delete logical router port %s-%s, " "and response is %s" % (lp['display_name'], lp['id'], e)) else: print("Successfully deleted logical router port %s-%s" % (lp['display_name'], lp['id'])) def cleanup_os_logical_routers(self): """ Delete all logical routers created from OpenStack To delete a logical router, we need to delete all logical ports on the router first. """ lrouters = self.get_os_logical_routers() print("Number of OS Logical Routers to be deleted: %s" % len(lrouters)) for lr in lrouters: self.cleanup_logical_router_ports(lr) self.cleanup_logical_router_vpn_sess(lr) try: self.nsxlib.logical_router.delete(lr['id']) except Exception as e: print("ERROR: Failed to delete logical router %s-%s, " "error %s" % (lr['display_name'], lr['id'], e)) else: print("Successfully deleted logical router %s-%s" % (lr['display_name'], lr['id'])) def cleanup_os_tier0_logical_ports(self): """ Delete all TIER0 logical router ports created from OpenStack """ tier0_routers = self.get_tier0_logical_routers() print("Number of TIER0 Logical Routers whos ports will be deleted: " "%s" % len(tier0_routers)) for lr in tier0_routers: self.cleanup_logical_router_ports(lr) def get_logical_dhcp_servers(self): """ Retrieve all logical DHCP servers on NSX backend """ return self.nsxlib.dhcp_server.list()['results'] def get_os_logical_dhcp_servers(self): """ Retrieve all logical DHCP servers created from OpenStack """ dhcp_servers = self.get_os_resources( self.get_logical_dhcp_servers()) if self.neutron_db: db_dhcp_servers = self.neutron_db.get_logical_dhcp_servers() dhcp_servers = [srv for srv in dhcp_servers if srv['id'] in db_dhcp_servers] return dhcp_servers def cleanup_os_logical_dhcp_servers(self): """ Cleanup all logical DHCP servers created from OpenStack plugin """ dhcp_servers = self.get_os_logical_dhcp_servers() print("Number of OS Logical DHCP Servers to be deleted: %s" % len(dhcp_servers)) for server in dhcp_servers: try: self.nsxlib.dhcp_server.delete(server['id']) except Exception as e: print("ERROR: Failed to delete logical DHCP server %s, " "error %s" % (server['display_name'], e)) else: print("Successfully deleted logical DHCP server %s" % server['display_name']) def get_os_vpn_objects(self, nsxlib_class, db_column_name): """ Retrieve all nsx vpn sessions from nsx and OpenStack """ objects = self.get_os_resources(nsxlib_class.list()['results']) if self.neutron_db: db_objects = self.neutron_db.get_vpn_objects(db_column_name) objects = [obj for obj in objects if obj['id'] in db_objects] return objects def clean_vpn_objects(self, obj_name, nsxlib_class, db_column_name): objects = self.get_os_vpn_objects(nsxlib_class, db_column_name) print("Number of VPN %(name)ss to be deleted: %(num)s" % {'name': obj_name, 'num': len(objects)}) for obj in objects: try: nsxlib_class.delete(obj['id']) except Exception as e: print("ERROR: Failed to delete vpn ipsec %(name)s %(id)s, " "error %(e)s" % {'name': obj_name, 'id': obj['id'], 'e': e}) else: print("Successfully deleted vpn ipsec %(name)s %(id)s" % {'name': obj_name, 'id': obj['id']}) def cleanup_vpnaas(self): """ Cleanup vpn/ipsec nsx objects """ if not self.nsxlib.feature_supported(nsx_constants.FEATURE_IPSEC_VPN): # no vpn support return self.clean_vpn_objects('session', self.nsxlib.vpn_ipsec.session, 'session_id') self.clean_vpn_objects('peer endpoint', self.nsxlib.vpn_ipsec.peer_endpoint, 'peer_ep_id') self.clean_vpn_objects('DPD profile', self.nsxlib.vpn_ipsec.dpd_profile, 'dpd_profile_id') self.clean_vpn_objects('IKE profile', self.nsxlib.vpn_ipsec.ike_profile, 'ike_profile_id') self.clean_vpn_objects('tunnel profile', self.nsxlib.vpn_ipsec.tunnel_profile, 'ipsec_profile_id') #NOTE(asarfaty): The vpn services are not deleted since we have 1 per # Tier-0 router, and those can be used outside of openstack too. def cleanup_logical_router_vpn_sess(self, lr): """ Cleanup the vpn local session of the logical router """ if not self.nsxlib.feature_supported(nsx_constants.FEATURE_IPSEC_VPN): # no vpn support return # find the router neutron id in its tags neutron_id = None for tag in lr['tags']: if tag.get('scope') == 'os-neutron-router-id': neutron_id = tag.get('tag') break if not neutron_id: return tags = [{'scope': 'os-neutron-router-id', 'tag': neutron_id}] ep_list = self.nsxlib.search_by_tags( tags=tags, resource_type=self.nsxlib.vpn_ipsec.local_endpoint.resource_type) if ep_list['results']: id = ep_list['results'][0]['id'] try: self.nsxlib.vpn_ipsec.local_endpoint.delete(id) except Exception as e: print("ERROR: Failed to delete vpn ipsec local endpoint %s, " "error %s" % (id, e)) else: print("Successfully deleted vpn ipsec local endpoint %s" % id) def get_os_nsx_objects(self, nsxlib_class, db_table, db_column): """ Retrieve all nsx objects of a given type from the nsx and OpenStack DB """ objects = self.get_os_resources(nsxlib_class.list()['results']) if self.neutron_db: db_objects = self.neutron_db.get_db_objects_by_table_and_column( db_table, db_column) objects = [obj for obj in objects if obj['id'] in db_objects] return objects def clean_lb_objects(self, obj_name, nsxlib_class, objects): print("Number of LB %(name)ss to be deleted: %(num)s" % {'name': obj_name, 'num': len(objects)}) for obj in objects: try: nsxlib_class.delete(obj['id']) except Exception as e: print("ERROR: Failed to delete LB %(name)s %(id)s, " "error %(e)s" % {'name': obj_name, 'id': obj['id'], 'e': e}) else: print("Successfully deleted LB %(name)s %(id)s" % {'name': obj_name, 'id': obj['id']}) def cleanup_loadbalancer(self): """ Cleanup LBaaS/Octavia loadbalancer objects """ if not self.nsxlib.feature_supported( nsx_constants.FEATURE_LOAD_BALANCER): # no LB support return # lb services objects = self.get_os_nsx_objects(self.nsxlib.load_balancer.service, nsx_models.NsxLbaasLoadbalancer, 'lb_service_id') self.clean_lb_objects('service', self.nsxlib.load_balancer.service, objects) # listeners objects = self.get_os_nsx_objects( self.nsxlib.load_balancer.virtual_server, nsx_models.NsxLbaasListener, 'lb_vs_id') # get a list of application profiles by their virtual servers app_profiles = [] for virtual_server in objects: lb_vs = self.nsxlib.load_balancer.virtual_server.get( virtual_server['id']) if lb_vs.get('application_profile_id'): app_profiles.append({'id': lb_vs['application_profile_id']}) self.clean_lb_objects('listener', self.nsxlib.load_balancer.virtual_server, objects) # pools objects = self.get_os_nsx_objects(self.nsxlib.load_balancer.pool, nsx_models.NsxLbaasPool, 'lb_pool_id') self.clean_lb_objects('pool', self.nsxlib.load_balancer.pool, objects) # health monitors objects = self.get_os_nsx_objects(self.nsxlib.load_balancer.monitor, nsx_models.NsxLbaasMonitor, 'lb_monitor_id') self.clean_lb_objects('monitor', self.nsxlib.load_balancer.monitor, objects) # application profiles self.clean_lb_objects('application-profile', self.nsxlib.load_balancer.application_profile, app_profiles) def cleanup_all(self): """ Cleanup steps: - Firewall sections - NSGroups - VPN objects - Loadbalancer objects - Logical router and their ports - Logical Tier 0 routers ports - Logical switch ports - Logical switches - DHCP servers - Switching profiles """ self.cleanup_os_firewall_sections() self.cleanup_os_ns_groups() self.cleanup_vpnaas() self.cleanup_loadbalancer() self.cleanup_os_logical_routers() self.cleanup_os_tier0_logical_ports() self.cleanup_os_logical_ports() self.cleanup_os_logical_switches() self.cleanup_os_logical_dhcp_servers() self.cleanup_os_switching_profiles() if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("--mgr-ip", dest="mgr_ip", help="NSX Manager IP address") parser.add_option("-u", "--username", default="admin", dest="username", help="NSX Manager username") parser.add_option("-p", "--password", default="default", dest="password", help="NSX Manager password") parser.add_option("--db-connection", default="", dest="db_connection", help=("When set, cleaning only backend resources that " "have db record.")) (options, args) = parser.parse_args() # Get NSX REST client nsx_client = NSXClient(options.mgr_ip, options.username, options.password, options.db_connection) # Clean all objects created by OpenStack nsx_client.cleanup_all() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/tools/nsxv_cleanup.py0000644000175000017500000004020100000000000023217 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2015 VMware Inc # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Purpose: Sometimes NSXv backend are out of sync with OpenStack and all the objects created by OpenStack needs to be cleaned up. This is a util script to cleanup NSXv objects created by OpenStack List of objects to be cleared: - Edge (Service Edge, DHCP Edge, VDR Edge) - Logical Switches (Tenant Networks) - Firewall Rules (Security Group) Usage: python nsxv_cleanup.py --vsm-ip --username --password --force Note: force is optional. If it is specified, force delete security group You can also use it in python interactive console by import the module >>>> import nsxv_cleanup >>>> vsm = nsxv_cleanup.VSMClient('10.34.57.101', 'admin', 'default') Cleanup all logical switch >>>> vsm.cleanup_logical_switch() Cleanup all firewall section >>>> vsm.cleanup_firewall_section() Cleanup all security group >>>> vsm.cleanup_security_group() Cleanup all edges >>>> vsm.cleanup_edge() Cleanup all >>>> vsm.cleanup_all() If you have any comment or find a bug, please contact Tong Liu """ import base64 import optparse import sys from oslo_serialization import jsonutils import requests import sqlalchemy as sa from vmware_nsx.db import nsx_models from vmware_nsx.db import nsxv_models requests.packages.urllib3.disable_warnings() class NeutronNsxDB(object): def __init__(self, db_connection): super(NeutronNsxDB, self).__init__() engine = sa.create_engine(db_connection) self.session = sa.orm.session.sessionmaker()(bind=engine) def query_all(self, column, model): return list(set([r[column] for r in self.session.query(model).all()])) def query_all_firewall_sections(self): return self.query_all('ip_section_id', nsxv_models.NsxvSecurityGroupSectionMapping) def query_all_security_groups(self): return self.query_all('nsx_id', nsx_models.NeutronNsxSecurityGroupMapping) def query_all_logical_switches(self): return self.query_all('nsx_id', nsx_models.NeutronNsxNetworkMapping) def query_all_spoofguard_policies(self): return self.query_all('policy_id', nsxv_models.NsxvSpoofGuardPolicyNetworkMapping) def query_all_edges(self): return self.query_all('edge_id', nsxv_models.NsxvRouterBinding) class VSMClient(object): """Base VSM REST client """ API_VERSION = "2.0" def __init__(self, host, username, password, db_connection, force): self.force = force self.host = host self.username = username self.password = password self.version = None self.endpoint = None self.content_type = "application/json" self.accept_type = "application/json" self.verify = False self.secure = True self.interface = "json" self.url = None self.headers = None self.api_version = VSMClient.API_VERSION self.neutron_db = (NeutronNsxDB(db_connection) if db_connection else None) self.__set_headers() def __set_endpoint(self, endpoint): self.endpoint = endpoint def get_endpoint(self): return self.endpoint def __set_content_type(self, content_type): self.content_type = content_type def get_content_type(self): return self.content_type def __set_accept_type(self, accept_type): self.accept_type = accept_type def get_accept_type(self): return self.accept_type def __set_api_version(self, api_version): self.api_version = api_version def get_api_version(self): return self.api def __set_url(self, api=None, secure=None, host=None, endpoint=None): api = self.api_version if api is None else api secure = self.secure if secure is None else secure host = self.host if host is None else host endpoint = self.endpoint if endpoint is None else endpoint http_type = 'https' if secure else 'http' self.url = '%s://%s/api/%s%s' % (http_type, host, api, endpoint) def get_url(self): return self.url def __set_headers(self, content=None, accept=None): content_type = self.content_type if content is None else content accept_type = self.accept_type if accept is None else accept auth_cred = self.username + ":" + self.password auth = base64.b64encode(auth_cred.encode()).decode() headers = {} headers['Authorization'] = "Basic %s" % auth headers['Content-Type'] = content_type headers['Accept'] = accept_type self.headers = headers def get(self, endpoint=None, params=None): """ Basic query method for json API request """ self.__set_url(endpoint=endpoint) response = requests.get(self.url, headers=self.headers, verify=self.verify, params=params) return response def delete(self, endpoint=None, params=None): """ Basic delete API method on endpoint """ self.__set_url(endpoint=endpoint) response = requests.delete(self.url, headers=self.headers, verify=self.verify, params=params) return response def post(self, endpoint=None, body=None): """ Basic post API method on endpoint """ self.__set_url(endpoint=endpoint) self.__set_headers() response = requests.post(self.url, headers=self.headers, verify=self.verify, data=jsonutils.dumps(body)) return response def get_vdn_scope_id(self): """ Retrieve existing network scope id """ self.__set_api_version('2.0') self.__set_endpoint("/vdn/scopes") response = self.get() if len(response.json()['allScopes']) == 0: return else: return response.json()['allScopes'][0]['objectId'] def query_all_logical_switches(self): lswitches = [] self.__set_api_version('2.0') vdn_scope_id = self.get_vdn_scope_id() if not vdn_scope_id: return lswitches endpoint = "/vdn/scopes/%s/virtualwires" % (vdn_scope_id) self.__set_endpoint(endpoint) # Query all logical switches response = self.get() paging_info = response.json()['dataPage']['pagingInfo'] page_size = int(paging_info['pageSize']) total_count = int(paging_info['totalCount']) print("There are total %s logical switches and page size is %s" % ( total_count, page_size)) pages = ceil(total_count, page_size) print("Total pages: %s" % pages) for i in range(0, pages): start_index = page_size * i params = {'startindex': start_index} response = self.get(params=params) temp_lswitches = response.json()['dataPage']['data'] lswitches += temp_lswitches if self.neutron_db: db_lswitches = self.neutron_db.query_all_logical_switches() lswitches = [ls for ls in lswitches if ls['objectId'] in db_lswitches] return lswitches def cleanup_logical_switch(self): print("Cleaning up logical switches on NSX manager") lswitches = self.query_all_logical_switches() print("There are total %s logical switches" % len(lswitches)) for ls in lswitches: print("\nDeleting logical switch %s (%s) ..." % (ls['name'], ls['objectId'])) endpoint = '/vdn/virtualwires/%s' % ls['objectId'] response = self.delete(endpoint=endpoint) if response.status_code != 200: print("ERROR: response status code %s" % response.status_code) def query_all_firewall_sections(self): firewall_sections = [] self.__set_api_version('4.0') self.__set_endpoint('/firewall/globalroot-0/config') # Query all firewall sections response = self.get() # Get layer3 sections related to security group if response.status_code == 200: l3_sections = response.json()['layer3Sections']['layer3Sections'] # do not delete the default section, or sections created by the # service composer firewall_sections = [s for s in l3_sections if (s['name'] != "Default Section Layer3" and "NSX Service Composer" not in s['name'])] else: print("ERROR: wrong response status code! Exiting...") sys.exit() if self.neutron_db: db_sections = self.neutron_db.query_all_firewall_sections() firewall_sections = [fws for fws in firewall_sections if fws['id'] in db_sections] return firewall_sections def cleanup_firewall_section(self): print("\n\nCleaning up firewall sections on NSX manager") l3_sections = self.query_all_firewall_sections() print("There are total %s firewall sections" % len(l3_sections)) for l3sec in l3_sections: print("\nDeleting firewall section %s (%s) ..." % (l3sec['name'], l3sec['id'])) endpoint = '/firewall/globalroot-0/config/layer3sections/%s' % \ l3sec['id'] response = self.delete(endpoint=endpoint) if response.status_code != 204: print("ERROR: response status code %s" % response.status_code) def query_all_security_groups(self): security_groups = [] self.__set_api_version('2.0') self.__set_endpoint("/services/securitygroup/scope/globalroot-0") # Query all security groups response = self.get() if response.status_code == 200: sg_all = response.json() else: print("ERROR: wrong response status code! Exiting...") sys.exit() # Remove Activity Monitoring Data Collection, which is not # related to any security group created by OpenStack security_groups = [sg for sg in sg_all if sg['name'] != "Activity Monitoring Data Collection"] if self.neutron_db: db_sgs = self.neutron_db.query_all_security_groups() security_groups = [sg for sg in security_groups if sg['objectId'] in db_sgs] return security_groups def cleanup_security_group(self): print("\n\nCleaning up security groups on NSX manager") security_groups = self.query_all_security_groups() print("There are total %s security groups" % len(security_groups)) for sg in security_groups: print("\nDeleting security group %s (%s) ..." % (sg['name'], sg['objectId'])) endpoint = '/services/securitygroup/%s' % sg['objectId'] params = {'force': self.force} response = self.delete(endpoint=endpoint, params=params) if response.status_code != 200: print("ERROR: response status code %s" % response.status_code) def query_all_spoofguard_policies(self): self.__set_api_version('4.0') self.__set_endpoint("/services/spoofguard/policies/") # Query all spoofguard policies response = self.get() if response.status_code != 200: print("ERROR: Faield to get spoofguard policies") return sgp_all = response.json() policies = [sgp for sgp in sgp_all['policies'] if sgp['name'] != 'Default Policy'] if self.neutron_db: db_policies = self.neutron_db.query_all_spoofguard_policies() policies = [p for p in policies if p['policyId'] in db_policies] return policies def cleanup_spoofguard_policies(self): print("\n\nCleaning up spoofguard policies") policies = self.query_all_spoofguard_policies() print("There are total %s policies" % len(policies)) for spg in policies: print("\nDeleting spoofguard policy %s (%s) ..." % (spg['name'], spg['policyId'])) endpoint = '/services/spoofguard/policies/%s' % spg['policyId'] response = self.delete(endpoint=endpoint) print("Response code: %s" % response.status_code) def query_all_edges(self): edges = [] self.__set_api_version('4.0') self.__set_endpoint("/edges") # Query all edges response = self.get() paging_info = response.json()['edgePage']['pagingInfo'] page_size = int(paging_info['pageSize']) total_count = int(paging_info['totalCount']) print("There are total %s edges and page size is %s" % ( total_count, page_size)) pages = ceil(total_count, page_size) print("Total pages: %s" % pages) for i in range(0, pages): start_index = page_size * i params = {'startindex': start_index} response = self.get(params=params) temp_edges = response.json()['edgePage']['data'] edges += temp_edges if self.neutron_db: db_edges = self.neutron_db.query_all_edges() edges = [e for e in edges if e['id'] in db_edges] return edges def cleanup_edge(self): print("\n\nCleaning up edges on NSX manager") edges = self.query_all_edges() for edge in edges: print("\nDeleting edge %s (%s) ..." % (edge['name'], edge['id'])) endpoint = '/edges/%s' % edge['id'] response = self.delete(endpoint=endpoint) if response.status_code != 204: print("ERROR: response status code %s" % response.status_code) def cleanup_all(self): self.cleanup_firewall_section() self.cleanup_security_group() self.cleanup_spoofguard_policies() self.cleanup_edge() self.cleanup_logical_switch() def ceil(a, b): if b == 0: return 0 div = a / b mod = 0 if a % b == 0 else 1 return int(div + mod) if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("--vsm-ip", dest="vsm_ip", help="NSX Manager IP address") parser.add_option("-u", "--username", default="admin", dest="username", help="NSX Manager username") parser.add_option("-p", "--password", default="default", dest="password", help="NSX Manager password") parser.add_option("--db-connection", dest="db_connection", default="", help=("When set, cleaning only backend resources that " "have db record.")) parser.add_option("-f", "--force", dest="force", action="store_true", help="Force cleanup option") (options, args) = parser.parse_args() print("vsm-ip: %s" % options.vsm_ip) print("username: %s" % options.username) print("password: %s" % options.password) print("db-connection: %s" % options.db_connection) print("force: %s" % options.force) # Get VSM REST client vsm_client = VSMClient(options.vsm_ip, options.username, options.password, options.db_connection, options.force) # Clean all objects created by OpenStack vsm_client.cleanup_all() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/tools/nsxv_edge_resources.py0000644000175000017500000000746200000000000024602 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2015 VMware Inc # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Purpose: Configure edge resource limits Usage: python nsxv_edge_resources.py --vsm-ip --username --password """ import base64 import optparse import xml.etree.ElementTree as et from oslo_serialization import jsonutils import requests import six requests.packages.urllib3.disable_warnings() class NSXClient(object): def __init__(self, host, username, password, *args, **kwargs): self._host = host self._username = username self._password = password def _get_headers(self, format): auth_cred = self._username + ":" + self._password auth = base64.b64encode(auth_cred) headers = {} headers['Authorization'] = "Basic %s" % auth headers['Content-Type'] = "application/%s" % format headers['Accept'] = "application/%s" % format return headers def _get_url(self, uri): return 'https://%s/%s' % (self._host, uri) def _get(self, format, uri): headers = self._get_headers(format) url = self._get_url(uri) response = requests.get(url, headers=headers, verify=False) return response def _put(self, format, uri, data): headers = self._get_headers(format) url = self._get_url(uri) response = requests.put(url, headers=headers, verify=False, data=data) return response def _get_tuning_configuration(self): response = self._get("json", "/api/4.0/edgePublish/tuningConfiguration") return jsonutils.loads(response.text) def configure_reservations(self): config = self._get_tuning_configuration() # NSX only receive XML format for the resource allocation update tuning = et.Element('tuningConfiguration') for opt, val in six.iteritems(config): child = et.Element(opt) if (opt == 'edgeVCpuReservationPercentage' or opt == 'edgeMemoryReservationPercentage'): child.text = '0' elif opt == 'megaHertzPerVCpu': child.text = '1500' else: child.text = str(val) tuning.append(child) self._put("xml", "/api/4.0/edgePublish/tuningConfiguration", et.tostring(tuning)) print("Edge resource limits set") if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("--vsm-ip", dest="vsm_ip", help="NSX Manager IP address") parser.add_option("-u", "--username", default="admin", dest="username", help="NSX Manager username") parser.add_option("-p", "--password", default="default", dest="password", help="NSX Manager password") (options, args) = parser.parse_args() print("vsm-ip: %s" % options.vsm_ip) print("username: %s" % options.username) print("password: %s" % options.password) nsx_client = NSXClient(options.vsm_ip, options.username, options.password) nsx_client.configure_reservations() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/devstack/tools/nsxv_fw_autodraft_setting.py0000644000175000017500000001064200000000000026020 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2016 VMware Inc # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Purpose: Configure distributed firewall autodraft setting Usage: python nsxv_fw_autodraft_setting.py --vsm-ip --username --password [--autodraft-disable] [--autodraft-enable] """ import base64 import optparse from oslo_serialization import jsonutils import requests requests.packages.urllib3.disable_warnings() GLOBAL_CONFIG_URI = 'api/4.0/firewall/config/globalconfiguration' AUTO_DRAFT_DISABLED = 'autoDraftDisabled' class NSXClient(object): def __init__(self, host, username, password, *args, **kwargs): self._host = host self._username = username self._password = password def _get_headers(self, format): auth_cred = self._username + ":" + self._password auth = base64.b64encode(auth_cred) headers = {} headers['Authorization'] = "Basic %s" % auth headers['Content-Type'] = "application/%s" % format headers['Accept'] = "application/%s" % format return headers def _get_url(self, uri): return 'https://%s/%s' % (self._host, uri) def _get(self, format, uri): headers = self._get_headers(format) url = self._get_url(uri) response = requests.get(url, headers=headers, verify=False) return response def _put(self, format, uri, data): headers = self._get_headers(format) url = self._get_url(uri) response = requests.put(url, headers=headers, verify=False, data=data) return response def disable_autodraft(self): self._set_autodraft(True) def enable_autodraft(self): self._set_autodraft(False) def _get_global_config(self): resp = self._get('json', GLOBAL_CONFIG_URI) global_conf = jsonutils.loads(resp.text) return global_conf def _set_autodraft(self, disabled): global_conf = self._get_global_config() global_conf[AUTO_DRAFT_DISABLED] = disabled self._put('json', GLOBAL_CONFIG_URI, jsonutils.dumps(global_conf)) if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("--vsm-ip", dest="vsm_ip", help="NSX Manager IP address") parser.add_option("-u", "--username", default="admin", dest="username", help="NSX Manager username") parser.add_option("-p", "--password", default="default", dest="password", help="NSX Manager password") parser.add_option("--disable-autodraft", action="store_true", default=False, dest="disabled", help="Disable the autodraft setting for NSX " "distributed firewal.") parser.add_option("--enable-autodraft", action="store_true", default=False, dest="enabled", help="Enable the autodraft setting for NSX " "distributed firewal.") (options, args) = parser.parse_args() print("vsm-ip: %s" % options.vsm_ip) print("username: %s" % options.username) print("password: %s" % options.password) if options.disabled and options.enabled: print("Please provide only one of the options: --disable-autodraft, " "--enable-autodraft.") nsx_client = NSXClient(options.vsm_ip, options.username, options.password) if options.disabled: print("Disabling autodraft settings:") nsx_client.disable_autodraft() print("Autodraft is now disabled.") if options.enabled: print("Enabling autodraft settings:") nsx_client.enable_autodraft() print("Autodraft is now enabled.") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.170253 vmware-nsx-15.0.1.dev143/doc/0000755000175000017500000000000000000000000015744 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/doc/requirements.txt0000644000175000017500000000107000000000000021226 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. sphinx!=1.6.6,!=1.6.7,>=1.6.2,<2.0.0;python_version=='2.7' # BSD sphinx!=1.6.6,!=1.6.7,>=1.6.2;python_version>='3.4' # BSD oslosphinx>=4.7.0 # Apache-2.0 openstackdocstheme>=1.18.1 # Apache-2.0 oslotest>=3.2.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.170253 vmware-nsx-15.0.1.dev143/doc/source/0000755000175000017500000000000000000000000017244 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/doc/source/admin_util.rst0000644000175000017500000005465000000000000022135 0ustar00coreycorey00000000000000Admin Utility ============= NSX-V and NSX-T support the nsxadmin utility. This enables and administrator to determine and rectify inconsistencies between the Neutron DB and NSX. usage: nsxadmin -r -o NSX-V Plugin ------------ The following resources are supported: 'security-groups', 'edges', 'networks', 'firewall-sections', 'orphaned-edges', 'spoofguard-policy', 'missing-edges', 'backup-edges', 'nsx-security-groups', 'dhcp-binding' and 'metadata' Edges ~~~~~ - List backend NSX edges with their id, name and some more information:: nsxadmin -r edges -o nsx-list - List backend NSX edges with more details:: nsxadmin -r edges -o nsx-list --verbose - Neutron list:: nsxadmin -r edges -o neutron-list - Update Resource pool / Datastore on all edges in the backend. This utility can update resource pool and datastore ID of all edges to the nsx.ini configuration:: nsxadmin -r edges -o nsx-update-all --property appliances=True - Update Resource pool / Datastore / edge HA of an edge: This utility can be used on upgrade after the customer added ha_datastore_id to the nsx.ini configuration or after changing the resource pool / data store globally or per availability zone. This Utility can update the deployment of existing edges:: nsxadmin -r edges -o nsx-update --property edge-id= --property appliances=True - Update the size of an edge:: nsxadmin -r edges -o nsx-update --property edge-id=edge-55 --property size=compact - Update the high availability of an edge: enable/disable high availability of an edge:: nsxadmin -r edges -o nsx-update --property edge-id=edge-55 --property highavailability= - Update syslog config on edge (syslog-proto and syslog-server2 are optional):: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property syslog-server= --property syslog-server2= --property syslog-proto= - Delete syslog config on edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property syslog-server=none - Enable logging with specified log level for specific module (routing, dns, dhcp, highavailability, loadbalancer) on edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property routing-log-level=debug - Enable logging with specified log level for all supported modules on edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property log-level=debug - Disable logging on edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property log-level=none - Update reservations of an edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property resource= --property limit= --property reservation= --property shares= - Update DRS hostgroups for an edge:: nsxadmin -o nsx-update -r edges -p edge-id=edge-55 --property hostgroup=update|all - Update DRS hostgroups for all edges:: nsxadmin -o nsx-update -r edges --property hostgroup=all - Clean all DRS hostgroups for all edges:: nsxadmin -o nsx-update -r edges --property hostgroup=clean Orphaned Edges ~~~~~~~~~~~~~~ - List orphaned edges (exist on NSXv backend but don't have a corresponding binding in Neutron DB):: nsxadmin -r orphaned-edges -o list - Clean orphaned edges (delete edges from NSXv backend):: nsxadmin -r orphaned-edges -o clean Orphaned Router bindings ~~~~~~~~~~~~~~~~~~~~~~~~ - List orphaned router bindings entries (exist on the router bindings DB table, but the neutron object behind them (router, network, or loadbalancer) is missing):: nsxadmin -r orphaned-bindings -o list - Clean orphaned router bindings entries (delete DB entry):: nsxadmin -r orphaned-bindings -o clean Orphaned Router VNICs ~~~~~~~~~~~~~~~~~~~~~ - List orphaned router vnic entries (exist on the edge vnics bindings DB table, but the neutron interface port behind them is missing):: nsxadmin -r orphaned-vnics -o list - Clean orphaned router vnics (delete DB entry, and NSX router interface):: nsxadmin -r orphaned-vnics -o clean Missing Edges ~~~~~~~~~~~~~ - List missing edges on NSX. This includes missing networks on those edges:: nsxadmin -r missing-edges -o list Backup Edges ~~~~~~~~~~~~ - List backend backup edges with their id, name and some more information:: nsxadmin -r backup-edges -o list - Delete backup edge:: nsxadmin -r backup-edges -o clean --property edge-id=edge-9 [--force] - Delete all backup edges existing in both neutron and backend when scope is neutron, else backend only:: nsxadmin -r backup-edges -o clean-all --property scope=[neutron/all] [--force] - List Edge name mismatches between DB and backend, and backup edges that are missing from the backend:: nsxadmin -r backup-edges -o list-mismatches - Fix Edge name mismatch between DB and backend by updating the name on the backend:: nsxadmin -r backup-edges -o fix-mismatch --property edge-id=edge-9 [--force] - Delete a backup edge from the DB and NSX by it's router ID:: nsxadmin -r backup-edges -o neutron-clean --property router-id=backup-26ab1a3a-d73d DHCP Bindings ~~~~~~~~~~~~~ - List missing DHCP bindings: list dhcp edges that are missing from the NSXv backend:: nsxadmin -r dhcp-binding -o list - Update DHCP bindings on an edge:: nsxadmin -r dhcp-binding -o nsx-update --property edge-id=edge-15 - Recreate DHCP edge by moving all the networks to other edges:: nsxadmin -r dhcp-binding -o nsx-recreate --property edge-id=edge-222 - Recreate DHCP edge for a specific network (when the edge does not exist):: nsxadmin -r dhcp-binding -o nsx-recreate --property net-id=5253ae45-75b4-4489-8aa1-6a9e1cfa80a6 - Redistribute networks on dhcp edges (for example when configuration of share_edges_between_tenants changes):: nsxadmin -r dhcp-binding -o nsx-redistribute Routers ~~~~~~~ - Recreate a router edge by moving the router/s to other edge/s:: nsxadmin -r routers -o nsx-recreate --property edge-id=edge-308 - Recreate a router on the NSX backend by removing it from the current edge (if any), and attaching to a new one:: nsxadmin -r routers -o nsx-recreate --property router-id=8cdd6d06-b457-4cbb-a0b1-41e08ccce287 - Redistribute shared routers on edges (for example when configuration of share_edges_between_tenants changes):: nsxadmin -r routers -o nsx-redistribute - Migrate NSXv metadata infrastructure for VDRs - use regular DHCP edges for VDR:: nsxadmin -r routers -o migrate-vdr-dhcp Networks ~~~~~~~~ - Ability to update or get the teaming policy for a DVS:: nsxadmin -r networks -o nsx-update --property dvs-id= --property teamingpolicy= - List backend networks and their network morefs:: nsxadmin -r networks -o list Missing Networks ~~~~~~~~~~~~~~~~ - List networks which are missing from the backend:: nsxadmin -r missing-networks -o list Orphaned Networks ~~~~~~~~~~~~~~~~~ - List networks which are missing from the neutron DB:: nsxadmin -r orphaned-networks -o list - Delete a backend network by it's moref:: nsxadmin -r orphaned-networks -o nsx-clean --property moref= Portgroups ~~~~~~~~~~ - List all NSX portgroups on the configured dvs:: nsxadmin -r nsx-portgroups -o list - Delete all NSX portgroups on the configured dvs:: nsxadmin -r nsx-portgroups -o nsx-cleanup <--force> Security Groups, Firewall and Spoofguard ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - List NSX firewall sections:: nsxadmin -r firewall-section -o list - List neutron security groups that does not have a matching NSX firewall section:: nsxadmin -r firewall-section -o list-mismatches - List NSX firewall sections that does not have a matching neutron security group:: nsxadmin -r firewall-section -o list-unused - Delete NSX firewall sections that does not have a matching neutron security group:: nsxadmin -r firewall-section -o nsx-clean - Reorder the NSX L3 firewall sections to correctly support the policy security groups:: nsxadmin -r firewall-sections -o nsx-reorder - List NSX service composer policies, that can be used in security groups:: nsxadmin -r firewall-sections -o list-policies - Update the default cluster section:: nsxadmin -r firewall-sections -o nsx-update - List NSX security groups:: nsxadmin -r nsx-security-groups -o list - List neutron security groups that does not have a matching NSX security group:: nsxadmin -r nsx-security-groups -o list-mismatches - List all the neutron security groups together with their NSX security groups and firewall sections:: nsxadmin -r security-groups -o list - Recreate missing NSX security groups ans firewall sections:: nsxadmin -r security-groups -o fix-mismatch - Migrate a security group from using rules to using a policy:: nsxadmin -r security-groups -o migrate-to-policy --property policy-id=policy-10 --property security-group-id=733f0741-fa2c-4b32-811c-b78e4dc8ec39 - Update logging flag of the security groups on the NSX DFW:: nsxadmin -r security-groups -o update-logging --property log-allowed-traffic=true - Spoofguard support:: nsxadmin -r spoofguard-policy -o clean --property policy-id=spoofguardpolicy-10 nsxadmin -r spoofguard-policy -o list --property reverse (entries defined on NSXv and not in Neutron) nsxadmin -r spoofguard-policy -o list-mismatches (--property network=) - List spoofguard policies with mismatching ips or mac, globally or for a specific network nsxadmin -r spoofguard-policy -o fix-mismatch --property port= - Fix the spoofguard ips of a neutron port - Orphaned rules in NSX section:: nsxadmin -r orphaned-rules -o list nsxadmin -r orphaned-rules -o nsx-clean Metadata ~~~~~~~~ - Update metadata infrastructure on all router and DHCP edges:: nsxadmin -r metadata -o nsx-update - Update metadata infrastructure on availability zone's router and DHCP edges:: nsxadmin -r metadata -o nsx-update --property az-name=az123 - Update metadata infrastructure on specific router or DHCP edge:: nsxadmin -r metadata -o nsx-update --property edge-id=edge-15 - Update shared secret on router and DHCP edges:: nsxadmin -r metadata -o nsx-update-secret - Retrieve metadata connectivity - optionally for a specific network:: nsxadmin -r metadata -o status [--property network_id=] V2T migration ~~~~~~~~~~~~~ - Validate the configuration of the NSX-V plugin before migrating to NSX-T:: nsxadmin -r nsx-migrate-v2t -o validate [--property transit-network=] Config ~~~~~~ - Validate the configuration in the nsx.ini and backend connectivity:: nsxadmin -r config -o validate NSX-T Plugin ------------ The following resources are supported: 'security-groups', 'routers', 'networks', 'nsx-security-groups', 'dhcp-binding', 'metadata-proxy', 'orphaned-dhcp-servers', 'firewall-sections', 'certificate', 'orphaned-networks', 'orphaned-routers', and 'ports'. Networks ~~~~~~~~ - List missing networks:: nsxadmin -r networks -o list-mismatches Orphaned Networks ~~~~~~~~~~~~~~~~~ - List networks (logical switches) which are missing from the neutron DB:: nsxadmin -r orphaned-networks -o list - Delete a backend network (logical switch) by it's nsx-id:: nsxadmin -r orphaned-networks -o nsx-clean --property nsx-id= Routers ~~~~~~~ - List missing routers:: nsxadmin -r routers -o list-mismatches - Update NAT rules on all routers to stop bypassing the FW rules. This is useful for NSX version 2.0 & up, before starting to use FWaaS:: nsxadmin -r routers -o nsx-update-rules - Update DHCP relay service on NSX router ports according to the current configuration:: nsxadmin -r routers -o nsx-update-dhcp-relay - Enable standby relocation on NSX routers that were created without it:: nsxadmin -r routers -o nsx-enable-standby-relocation - Replace an old tier0 (that might have been deleted) with a new one:: nsxadmin -r routers -o update-tier0 --property old-tier0= --property new-tier0= Orphaned Routers ~~~~~~~~~~~~~~~~~ - List logical routers which are missing from the neutron DB:: nsxadmin -r orphaned-routers -o list - Delete a backend logical router by it's nsx-id:: nsxadmin -r orphaned-routers -o nsx-clean --property nsx-id= Ports ~~~~~ - List missing ports, and ports that exist on backend but without the expected switch profiles or address bindings:: nsxadmin -r ports -o list-mismatches - Update the VMs ports (all or of a specific project) on the backend after migrating NSX-V -> NSX-T:: nsxadmin -r ports -o nsx-migrate-v-v3 (--property project-id=<> --property host-moref=<> --property respool-moref=<> --property net-name=<> --property datastore-moref=<>)) --plugin nsxv3 - Migrate exclude ports to use tags:: nsxadmin -r ports -o migrate-exclude-ports - Tag ports to be part of the default OS security group:: nsxadmin -r ports -o nsx-tag-default Security Groups & NSX Security Groups ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - List NSX backend security groups:: nsxadmin -r nsx-security-groups -o list - List neutron security groups:: nsxadmin -r security-groups -o list - List security groups with sections missing on the NSX backend:: nsxadmin -r nsx-security-groups -o list-mismatches - Fix mismatch security groups by recreating missing sections & NS groups on the NSX backend:: nsxadmin -r security-groups -o fix-mismatch - Update NSX security groups dynamic criteria for NSX-T CrossHairs:: nsxadmin -r nsx-security-groups -o migrate-to-dynamic-criteria - Update logging flag of the security groups on the NSX DFW:: nsxadmin -r security-groups -o update-logging --property log-allowed-traffic=true Firewall Sections ~~~~~~~~~~~~~~~~~ - List NSX backend firewall sections:: nsxadmin -r firewall-sections -o list - List security groups with missing sections on the NSX backend:: nsxadmin -r firewall-sections -o list-mismatches - Reuse default NSX section ans NS group from a previous installation:: nsxadmin -r firewall-sections -o reuse Orphaned Firewall Sections ~~~~~~~~~~~~~~~~~~~~~~~~~~ - List orphaned firewall sections & rules (exist on NSXv3 backend but don't have a corresponding binding in Neutron DB):: nsxadmin -r orphaned-firewall-sections -o nsx-list - Delete orphaned firewall sections & rules (exist on NSXv3 backend but don't have a corresponding binding in Neutron DB):: nsxadmin -r orphaned-firewall-sections -o nsx-clean Metadata Proxy ~~~~~~~~~~~~~~ - List version 1.0.0 metadata networks in Neutron:: nsxadmin -r metadata-proxy -o list - Resync metadata proxies for NSX-T version 1.1.0 and above (enable MD proxy, or update the uuid). This is only for migrating to native metadata support:: nsxadmin -r metadata-proxy -o nsx-update --property metadata_proxy_uuid= - update the ip of the Nova server in the metadata proxy server on the NSX:: nsxadmin -r metadata-proxy -o nsx-update-ip --property server-ip= --property availability-zone= DHCP Bindings ~~~~~~~~~~~~~ - List DHCP bindings in Neutron:: nsxadmin -r dhcp-binding -o list - Resync DHCP bindings for NSX-T version 1.1.0 and above. This is only for migrating to native DHCP support:: nsxadmin -r dhcp-binding -o nsx-update --property dhcp_profile_uuid= - Recreate dhcp server for a neutron network:: nsxadmin -r dhcp-binding -o nsx-recreate --property net-id= Orphaned DHCP Servers ~~~~~~~~~~~~~~~~~~~~~ - List orphaned DHCP servers (exist on NSX-T backend but don't have a corresponding binding in Neutron DB):: nsxadmin -r orphaned-dhcp-servers -o nsx-list - Clean orphaned DHCP servers (delete logical DHCP servers from NSX-T backend):: nsxadmin -r orphaned-dhcp-servers -o nsx-clean Client Certificate ~~~~~~~~~~~~~~~~~~ - Generate new client certificate (this command will delete previous certificate if exists):: nsxadmin -r certificate -o generate [--property username= --property password= --property key-size= --property sig-alg= --property valid-days= --property country= --property state= --property org= --property unit= --property host=] - Delete client certificate:: nsxadmin -r certificate -o clean - Show client certificate details:: nsxadmin -r certificate -o show - Import external certificate to NSX:: nsxadmin -r certificate -o import [--property username= --property password= --property filename=] - List certificates associated with openstack principal identity in NSX:: nsxadmin -r certificate -o nsx-list BGP GW edges ~~~~~~~~~~~~ - Create new BGP GW edge:: nsxadmin -r bgp-gw-edge -o create --property name= --property local-as= --property external-iface=: --property internal-iface=: - Delete BGP GW edge:: nsxadmin -r bgp-gw-edge -o delete --property gw-edge-id= - List BGP GW edges:: nsxadmin -r bgp-gw-edge -o list - Add a redistribution rule to a BGP GW edges:: nsxadmin -r routing-redistribution-rule -o create --property edge-ids=[,...] [--property prefix=] --property learner-protocol= --property learn-from=ospf,bgp,connected,static --property action= - Remove a redistribution rule from BGP GW edges:: nsxadmin -r routing-redistribution-rule -o delete --property gw-edge-ids=[,...] [--property prefix-name=] - Add a new BGP neighbour to BGP GW edges:: nsxadmin -r bgp-neighbour -o create --property gw-edge-ids=[,...] --property ip-address= --property remote-as= --property --password= - Remove BGP neighbour from BGP GW edges:: nsxadmin -r bgp-neighbour -o delete --property gw-edge-ids=[,...] --property ip-address= LBaaS ~~~~~ - List NSX LB services:: nsxadmin -r lb-services -o list - List NSX LB virtual servers:: nsxadmin -r lb-virtual-servers -o list - List NSX LB pools:: nsxadmin -r lb-pools -o list - List NSX LB monitors:: nsxadmin -r lb-monitors -o list - Update advertisement of LB vips on routers:: nsxadmin -r lb-advertisement -o nsx-update Rate Limit ~~~~~~~~~~ - Show the current NSX rate limit:: nsxadmin -r rate-limit -o show - Update the NSX rate limit:: nsxadmin -r rate-limit -o nsx-update --property value=<> Cluster ~~~~~~~ - Show the NSX cluster managers ips:: nsxadmin -r cluster -o show Config ~~~~~~ - Validate the configuration in the nsx.ini and backend connectivity:: nsxadmin -r config -o validate NSXtvd Plugin ------------- - All the NSX-V/T utilities can be used by calling:: nsxadmin --plugin nsxv/v3 -r <> -o <> - Add mapping between existing projects and old (v) plugin before starting to use the tvd plugin:: nsxadmin -r projects -o import --property plugin=nsx-v --property project=<> - Migrate a specific project from V to T:: nsxadmin -r projects -o nsx-migrate-v-v3 --property project-id= --property external-net= (--property from-file=True) NSX Policy Plugin ----------------- - List all the neutron security groups together with their NSX Policy objects and realization state:: nsxadmin -r security-groups -o list - List all the neutron networks together with their NSX Policy objects and realization state:: nsxadmin -r networks -o list - Sync admin state of networks and ports (Once upgraded to NSX 3.0 which supports policy admin state):: nsxadmin -r networks -o nsx-update-state - List all the neutron routers together with their NSX Policy objects and realization state:: nsxadmin -r routers -o list - Set intent realization and purge cycle interval (in minutes) on policy manager:: nsxadmin -r system -o set -p realization_interval=1 - Replace an old tier0 (that might have been deleted) with a new one:: nsxadmin -r routers -o update-tier0 --property old-tier0= --property new-tier0= - Update the firewall_match value in neutron nat rules with a new value. Should be used when firewall_match_internal_addr config changes:: nsxadmin -r routers -o update-nat-firewall-match --property firewall-match=external/internal - Migrate networks DHCP from MP to Policy (for NSX 3.0 upgrades):: nsxadmin -r dhcp-binding -o migrate-to-policy --property dhcp-config= Client Certificate ~~~~~~~~~~~~~~~~~~ - Generate new client certificate (this command will delete previous certificate if exists):: nsxadmin -r certificate -o generate [--property username= --property password= --property key-size= --property sig-alg= --property valid-days= --property country= --property state= --property org= --property unit= --property host=] - Delete client certificate:: nsxadmin -r certificate -o clean - Show client certificate details:: nsxadmin -r certificate -o show - Import external certificate to NSX:: nsxadmin -r certificate -o import [--property username= --property password= --property filename=] - List certificates associated with openstack principal identity in NSX:: nsxadmin -r certificate -o nsx-list Upgrade Steps (NSX-T Version 1.0.0 to Version 1.1.0) ---------------------------------------------------- 1. Upgrade NSX backend from version 1.0.0 to version 1.1.0 2. Create a DHCP-Profile and a Metadata-Proxy in NSX backend 3. Stop Neutron 4. Install version 1.1.0 Neutron plugin 5. Run admin tools to migrate version 1.0.0 objects to version 1.1.0 objects nsxadmin -r metadata-proxy -o nsx-update --property metadata_proxy_uuid= nsxadmin -r dhcp-binding -o nsx-update --property dhcp_profile_uuid= 6. Start Neutron 7. Make sure /etc/nova/nova.conf has metadata_proxy_shared_secret = 8. Restart VMs or ifdown/ifup their network interface to get new DHCP options Steps to create a TVD admin user ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Do the following steps:: source devstack/openrc admin admin openstack project create admin_v --domain=default --or-show -f value -c id openstack user create admin_v --password password --domain=default --email=alt_demo@example.com --or-show -f value -c id openstack role add admin --user --project Or run: devstack/tools/create_userrc.sh Then: openstack project plugin create --plugin nsx-v ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/doc/source/conf.py0000644000175000017500000000600300000000000020542 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import fileinput import fnmatch sys.path.insert(0, os.path.abspath('../..')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', #'sphinx.ext.intersphinx', 'openstackdocstheme' ] # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable # A list of glob-style patterns that should be excluded when looking for source # files. exclude_patterns = [ 'api/tests.*', # avoid of docs generation from tests 'api/oslo.vmware._*', # skip private modules ] # Prune the excluded patterns from the autoindex PATH = 'api/autoindex.rst' if os.path.isfile(PATH) and os.access(PATH, os.R_OK): for line in fileinput.input(PATH, inplace=True): found = False for pattern in exclude_patterns: if fnmatch.fnmatch(line, '*' + pattern[4:]): found = True if not found: print(line) # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'oslo.vmware' copyright = u'2014, OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # html_static_path = ['static'] # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # Example configuration for intersphinx: refer to the Python standard library. #intersphinx_mapping = {'http://docs.python.org/': None} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/doc/source/contributing.rst0000644000175000017500000000004300000000000022502 0ustar00coreycorey00000000000000.. include:: ../../CONTRIBUTING.rst././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/doc/source/devstack.rst0000644000175000017500000003421700000000000021611 0ustar00coreycorey00000000000000NSX DevStack Configurations =========================== Below are the options for configuring the NSX plugin with DevStack. Prior to doing this DevStack needs to be downloaded. After updating the relevant configuration file(s) run ./stack.sh NSX-V ----- Mandatory basic configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add those parameters in ``local.conf``:: [[local|localrc]] enable_plugin vmware-nsx https://opendev.org/x/vmware-nsx Q_PLUGIN=vmware_nsx_v NSXV_MANAGER_URI= NSXV_USER= NSXV_PASSWORD= NSXV_VDN_SCOPE_ID= NSXV_DVS_ID= NSXV_DATACENTER_MOID= NSXV_DATASTORE_ID= NSXV_RESOURCE_POOL_ID= NSXV_EXTERNAL_NETWORK= NSXV_CLUSTER_MOID= QoS Driver ~~~~~~~~~~ Enable the qos in ``local.conf``:: [[local|localrc]] ENABLED_SERVICES+=,q-qos Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsxv_qos NSXV_USE_DVS_FEATURES = True Optional: Update the nsx qos_peak_bw_multiplier in nsx.ini (default value is 2.0):: [NSX] qos_peak_bw_multiplier = FWaaS (V2) Driver ~~~~~~~~~~~~~~~~~ Add neutron-fwaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-fwaas https://opendev.org/openstack/neutron-fwaas enable_service q-fwaas-v2 Q_SERVICE_PLUGIN_CLASSES+=,firewall_v2 [[post-config|$NEUTRON_FWAAS_CONF]] [fwaas] enabled = True driver = vmware_nsxv_edge_v2 [service_providers] service_provider = FIREWALL_V2:fwaas_db:neutron_fwaas.services.firewall.service_drivers.agents.agents.FirewallAgentDriver:default L2GW Driver ~~~~~~~~~~~ Add networking-l2gw repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin networking-l2gw https://github.com/openstack/networking-l2gw ENABLED_SERVICES+=l2gw-plugin NETWORKING_L2GW_SERVICE_DRIVER=L2GW:vmware-nsx-l2gw:vmware_nsx.services.l2gateway.nsx_v.driver.NsxvL2GatewayDriver:default IPAM Driver ~~~~~~~~~~~ Update the ``local.conf`` file:: [[post-config|$NEUTRON_CONF]] [DEFAULT] ipam_driver = vmware_nsxv_ipam Flow Classifier ~~~~~~~~~~~~~~~ Update the ``local.conf`` file:: [[local|localrc]] enable_plugin networking-sfc https://opendev.org/openstack/networking-sfc master Q_SERVICE_PLUGIN_CLASSES+=,networking_sfc.services.flowclassifier.plugin.FlowClassifierPlugin [[post-config|$NEUTRON_CONF]] [flowclassifier] drivers = vmware-nsxv-sfc [nsxv] service_insertion_profile_id = In order to prevent tenants from changing the flow classifier, please add the following lines to the policy.json file:: "create_flow_classifier": "rule:admin_only", "update_flow_classifier": "rule:admin_only", "delete_flow_classifier": "rule:admin_only", "get_flow_classifier": "rule:admin_only" Neutron dynamic routing plugin (bgp) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add neutron-dynamic-routing repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-dynamic-routing https://opendev.org/openstack/neutron-dynamic-routing DR_MODE=dr_plugin BGP_PLUGIN=vmware_nsx.services.dynamic_routing.bgp_plugin.NSXvBgpPlugin [[post-config|$NEUTRON_CONF]] [DEFAULT] api_extensions_path = $DEST/neutron-dynamic-routing/neutron_dynamic_routing/extensions Neutron VPNaaS ~~~~~~~~~~~~~~ Add neutron-vpnaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas NEUTRON_VPNAAS_SERVICE_PROVIDER=VPN:vmware:vmware_nsx.services.vpnaas.nsxv.ipsec_driver.NSXvIPsecVpnDriver:default Octavia ~~~~~~~ Add octavia and python-octaviaclient repos as external repositories and configure following flags in ``local.conf``:: [[local|localrc]] OCTAVIA_NODE=api DISABLE_AMP_IMAGE_BUILD=True LIBS_FROM_GIT=python-openstackclient,python-octaviaclient enable_plugin octavia https://opendev.org/openstack/octavia.git enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard enable_service octavia enable_service o-api,o-da [[post-config|$OCTAVIA_CONF]] [DEFAULT] verbose = True debug = True [api_settings] default_provider_driver=vmwareedge enabled_provider_drivers=vmwareedge:NSX [oslo_messaging] topic=vmwarensxv_edge_lb [controller_worker] network_driver = allowed_address_pairs_driver [driver_agent] enabled_provider_agents=vmwareagent NSX-T ----- Mandatory basic configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add those parameters in ``local.conf``:: [[local|localrc]] enable_plugin vmware-nsx https://opendev.org/x/vmware-nsx Q_PLUGIN=vmware_nsx_v3 NSX_MANAGER= NSX_USER= NSX_PASSWORD= DHCP_PROFILE_UUID= METADATA_PROXY_UUID= DEFAULT_TIER0_ROUTER_UUID= DEFAULT_OVERLAY_TZ_UUID= QoS Driver ~~~~~~~~~~ Enable the qos in ``local.conf``:: [[local|localrc]] ENABLED_SERVICES+=,q-qos Q_SERVICE_PLUGIN_CLASSES+=,neutron.services.qos.qos_plugin.QoSPlugin Optional: Update the nsx qos_peak_bw_multiplier in nsx.ini (default value is 2.0):: [NSX] qos_peak_bw_multiplier = L2GW Driver ~~~~~~~~~~~ Add networking-l2gw repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin networking-l2gw https://github.com/openstack/networking-l2gw ENABLED_SERVICES+=l2gw-plugin NETWORKING_L2GW_SERVICE_DRIVER=L2GW:vmware-nsx-l2gw:vmware_nsx.services.l2gateway.nsx_v3.driver.NsxV3Driver:default DEFAULT_BRIDGE_CLUSTER_UUID= IPAM Driver ~~~~~~~~~~~ Update the ``local.conf`` file:: [[post-config|$NEUTRON_CONF]] [DEFAULT] ipam_driver = vmware_nsxv3_ipam Trunk Driver ~~~~~~~~~~~~ Enable trunk service and configure following flags in ``local.conf``:: [[local]|[localrc]] # Trunk plugin NSX-T driver config ENABLED_SERVICES+=,q-trunk Q_SERVICE_PLUGIN_CLASSES+=,trunk FWaaS (V2) Driver ~~~~~~~~~~~~~~~~~ Add neutron-fwaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-fwaas https://opendev.org/openstack/neutron-fwaas enable_service q-fwaas-v2 Q_SERVICE_PLUGIN_CLASSES+=,firewall_v2 [[post-config|$NEUTRON_FWAAS_CONF]] [fwaas] enabled = True driver = vmware_nsxv3_edge_v2 [service_providers] service_provider = FIREWALL_V2:fwaas_db:neutron_fwaas.services.firewall.service_drivers.agents.agents.FirewallAgentDriver:default Neutron VPNaaS ~~~~~~~~~~~~~~ Add neutron-vpnaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas NEUTRON_VPNAAS_SERVICE_PROVIDER=VPN:vmware:vmware_nsx.services.vpnaas.nsxv3.ipsec_driver.NSXv3IPsecVpnDriver:default Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsx_vpnaas [[post-config|$NEUTRON_CONF]] [DEFAULT] api_extensions_path = $DEST/neutron-vpnaas/neutron_vpnaas/extensions Octavia ~~~~~~~ Add octavia and python-octaviaclient repos as external repositories and configure following flags in ``local.conf``:: [[local|localrc]] OCTAVIA_NODE=api DISABLE_AMP_IMAGE_BUILD=True LIBS_FROM_GIT=python-openstackclient,python-octaviaclient enable_plugin octavia https://opendev.org/openstack/octavia.git enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard enable_service octavia enable_service o-api,o-da [[post-config|$OCTAVIA_CONF]] [DEFAULT] verbose = True debug = True [api_settings] default_provider_driver=vmwareedge enabled_provider_drivers=vmwareedge:NSX [oslo_messaging] topic=vmwarensxv_edge_lb [controller_worker] network_driver = allowed_address_pairs_driver [driver_agent] enabled_provider_agents=vmwareagent NSX-P ----- Mandatory basic configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add those parameters in ``local.conf``:: [[local|localrc]] enable_plugin vmware-nsx https://opendev.org/x/vmware-nsx Q_PLUGIN=vmware_nsx_p NSX_POLICY= NSX_USER= NSX_PASSWORD= DHCP_PROFILE_UUID= METADATA_PROXY_UUID= DEFAULT_TIER0_ROUTER_UUID= DEFAULT_OVERLAY_TZ_UUID= QoS Driver ~~~~~~~~~~ Enable the qos in ``local.conf``:: [[local|localrc]] ENABLED_SERVICES+=,q-qos Q_SERVICE_PLUGIN_CLASSES+=,neutron.services.qos.qos_plugin.QoSPlugin Optional: Update the nsx qos_peak_bw_multiplier in nsx.ini (default value is 2.0):: [NSX] qos_peak_bw_multiplier = FWaaS (V2) Driver ~~~~~~~~~~~~~~~~~ Add neutron-fwaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-fwaas https://opendev.org/openstack/neutron-fwaas enable_service q-fwaas-v2 Q_SERVICE_PLUGIN_CLASSES+=,firewall_v2 [[post-config|$NEUTRON_FWAAS_CONF]] [fwaas] enabled = True driver = vmware_nsxp_edge_v2 [service_providers] service_provider = FIREWALL_V2:fwaas_db:neutron_fwaas.services.firewall.service_drivers.agents.agents.FirewallAgentDriver:default Octavia ~~~~~~~ Add octavia and python-octaviaclient repos as external repositories and configure following flags in ``local.conf``:: [[local|localrc]] OCTAVIA_NODE=api DISABLE_AMP_IMAGE_BUILD=True LIBS_FROM_GIT=python-openstackclient,python-octaviaclient enable_plugin octavia https://opendev.org/openstack/octavia.git enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard enable_service octavia enable_service o-api,o-da [[post-config|$OCTAVIA_CONF]] [DEFAULT] verbose = True debug = True [api_settings] default_provider_driver=vmwareedge enabled_provider_drivers=vmwareedge:NSX [oslo_messaging] topic=vmwarensxv_edge_lb [controller_worker] network_driver = allowed_address_pairs_driver [driver_agent] enabled_provider_agents=vmwareagent Trunk Driver ~~~~~~~~~~~~ Enable trunk service and configure following flags in ``local.conf``:: [[local]|[localrc]] # Trunk plugin NSX-P driver config ENABLED_SERVICES+=,q-trunk Q_SERVICE_PLUGIN_CLASSES+=,trunk Neutron VPNaaS ~~~~~~~~~~~~~~ Add neutron-vpnaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas NEUTRON_VPNAAS_SERVICE_PROVIDER=VPN:vmware:vmware_nsx.services.vpnaas.nsxp.ipsec_driver.NSXpIPsecVpnDriver:default Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsx_vpnaas [[post-config|$NEUTRON_CONF]] [DEFAULT] api_extensions_path = $DEST/neutron-vpnaas/neutron_vpnaas/extensions NSX-TVD ------- Mandatory basic configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add those parameters in ``local.conf``:: [[local|localrc]] enable_plugin vmware-nsx https://opendev.org/x/vmware-nsx Q_PLUGIN=vmware_nsx_tvd FWaaS (V2) Driver ~~~~~~~~~~~~~~~~~ Add neutron-fwaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-fwaas https://opendev.org/openstack/neutron-fwaas enable_service q-fwaas-v2 Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsxtvd_fwaasv2 [DEFAULT] api_extensions_path = $DEST/neutron-fwaas/neutron_fwaas/extensions [[post-config|$NEUTRON_FWAAS_CONF]] [fwaas] enabled = True driver = vmware_nsxtvd_edge_v2 [service_providers] service_provider = FIREWALL_V2:fwaas_db:neutron_fwaas.services.firewall.service_drivers.agents.agents.FirewallAgentDriver:default L2GW Driver ~~~~~~~~~~~ Add networking-l2gw repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin networking-l2gw https://github.com/openstack/networking-l2gw ENABLED_SERVICES+=l2gw-plugin NETWORKING_L2GW_SERVICE_DRIVER=L2GW:vmware-nsx-l2gw:vmware_nsx.services.l2gateway.nsx_tvd.driver.NsxTvdL2GatewayDriver:default DEFAULT_BRIDGE_CLUSTER_UUID= Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsxtvd_l2gw [[post-config|$NEUTRON_CONF]] [DEFAULT] api_extensions_path = $DEST/networking-l2gateway/networking_l2gw/extensions QoS Driver ~~~~~~~~~~ Enable the qos in ``local.conf``:: [[local|localrc]] ENABLED_SERVICES+=,q-qos Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsxtvd_qos Neutron dynamic routing plugin (bgp) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add neutron-dynamic-routing repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-dynamic-routing https://opendev.org/openstack/neutron-dynamic-routing DR_MODE=dr_plugin BGP_PLUGIN=vmware_nsx.services.dynamic_routing.bgp_plugin.NSXBgpPlugin [[post-config|$NEUTRON_CONF]] [DEFAULT] api_extensions_path = $DEST/neutron-dynamic-routing/neutron_dynamic_routing/extensions Neutron VPNaaS ~~~~~~~~~~~~~~ Add neutron-vpnaas repo as an external repository and configure following flags in ``local.conf``:: [[local|localrc]] enable_plugin neutron-vpnaas https://opendev.org/openstack/neutron-vpnaas NEUTRON_VPNAAS_SERVICE_PROVIDER=VPN:vmware:vmware_nsx.services.vpnaas.nsx_tvd.ipsec_driver.NSXIPsecVpnDriver:default Q_SERVICE_PLUGIN_CLASSES+=,vmware_nsxtvd_vpnaas [[post-config|$NEUTRON_CONF]] [DEFAULT] api_extensions_path = $DEST/neutron-vpnaas/neutron_vpnaas/extensions IPAM Driver ~~~~~~~~~~~ Update the ``local.conf`` file:: [[post-config|$NEUTRON_CONF]] [DEFAULT] ipam_driver = vmware_nsxtvd_ipam ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/doc/source/housekeeper.rst0000644000175000017500000000645500000000000022327 0ustar00coreycorey00000000000000Plugin Housekeeper ================== During the Neutron plugin's operation, system may enter an inconsistent state due to synchronization issues between different components, e.g Neutron and NSX or NSX and vCenter. Some of these inconsistencies may impact the operation of various system elements. The Housekeeping mechanism should: a) Detect such inconsistencies and warn about them. b) Resolve inconsistencies when possible. Some of these inconsistencies can be resolved using the Admin utility, yet it requires manual operation by the administrator while the housekeeping mechanism should be automatic. Configuration ------------- Housekeeping mechanism uses two configuration parameters: nsxv/v3.housekeeping_jobs: The housekeeper can be configured which tasks to execute and which should be skipped. nsxv/v3.housekeeping_readonly: Housekeeper may attempt to fix a broken environment when this flag is set to False, or otherwise will just warn about inconsistencies. Operation --------- The housekeeping mechanism is an extension to the Neutron plugin. Therefore it can be triggered by accessing the extension's URL with an administrator context. A naive devstack example could be:: source devstack/openrc admin demo export AUTH_TOKEN=`openstack token issue | awk '/ id /{print $4}'` curl -X GET -s -H "X-Auth-Token: $AUTH_TOKEN" -H 'Content-Type: application/json' -d '{"housekeeper": {}}' http://:9696/v2.0/housekeepers/all curl -X PUT -s -H "X-Auth-Token: $AUTH_TOKEN" -H 'Content-Type: application/json' -d '{"housekeeper": {}}' http://:9696/v2.0/housekeepers/all Where would be the Neutron controller's IP or the virtual IP of the load balancer which manages the Neutron controllers. It is important to use the virtual IP in case of a load balanced active-backup Neutron servers, as otherwise the housekeeping request may be handled by the wrong controller. The GET curl call will run all jobs in readonly mode the PUT curl call will run all jobs in readwrite mode (for that the housekeeping_readonly should be set to False) To operate the housekeeper periodically as it should, it should be scheduled via a timing mechanism such as Linux cron. Plugin Jobs ----------- NSX-V ~~~~~ error_dhcp_edge: scans for DHCP Edge appliances which are in ERROR state. When in non-readonly mode, the job will attempt recovery of the DHCP edges by removing stale elements from the Neutron DB and reconfigure the interfaces at the backend when required. error_backup_edge: scans from backup Edge appliances which are in ERROR state. When in non-readonly mode, the job will reset the Edge appliance configuration. NSX-v3 ~~~~~~ orphaned_logical_router: scans the NSX backend for logical routers which are missing from the neutron DB. Report it, and if in non-readonly mode delete them. orphaned_logical_swithces: scans the NSX backend for logical switches which are missing from the neutron DB. Report it, and if in non-readonly mode delete them. orphaned_dhcp_server: scans the NSX backend for DHCP servers which are missing a matching network in the neutron DB. Report it, and if in non-readonly mode delete them. orphaned_firewall_section: scans the NSX backend for firewall sections which are missing a matching security group in the neutron DB. Report it, and if in non-readonly mode delete them. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/doc/source/index.rst0000644000175000017500000000056700000000000021115 0ustar00coreycorey00000000000000Welcome to vmware-nsx's documentation! ======================================= Contents: .. toctree:: :maxdepth: 2 readme installation usage contributing admin_util devstack housekeeper Code Documentation ================== .. toctree:: :maxdepth: 1 Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/doc/source/installation.rst0000644000175000017500000000031000000000000022471 0ustar00coreycorey00000000000000============ Installation ============ At the command line:: $ pip install vmware-nsx Or, if you have virtualenvwrapper installed:: $ mkvirtualenv vmware-nsx $ pip install vmware-nsx ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/doc/source/readme.rst0000644000175000017500000000003500000000000021231 0ustar00coreycorey00000000000000.. include:: ../../README.rst././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/doc/source/usage.rst0000644000175000017500000000010300000000000021074 0ustar00coreycorey00000000000000======== Usage ======== To use in a project:: import vmware_nsx ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.170253 vmware-nsx-15.0.1.dev143/etc/0000755000175000017500000000000000000000000015752 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/etc/README.txt0000644000175000017500000000046700000000000017457 0ustar00coreycorey00000000000000To generate the sample vmware-nsx configuration files, run the following command from the top level of the vmware-nsx directory: tox -e genconfig If a 'tox' environment is unavailable, then you can run the following script instead to generate the configuration files: ./tools/generate_config_file_samples.sh ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.170253 vmware-nsx-15.0.1.dev143/etc/oslo-config-generator/0000755000175000017500000000000000000000000022155 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/etc/oslo-config-generator/nsx.ini0000644000175000017500000000011400000000000023462 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/nsx.ini.sample wrap_width = 79 namespace = nsx ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.170253 vmware-nsx-15.0.1.dev143/etc/oslo-policy-generator/0000755000175000017500000000000000000000000022207 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/etc/oslo-policy-generator/policy.conf0000644000175000017500000000010600000000000024352 0ustar00coreycorey00000000000000[DEFAULT] output_file = etc/policy.yaml.sample namespace = vmware-nsx ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.170253 vmware-nsx-15.0.1.dev143/etc/policy.d/0000755000175000017500000000000000000000000017473 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/etc/policy.d/flow-classifier.json0000644000175000017500000000030600000000000023456 0ustar00coreycorey00000000000000{ "create_flow_classifier": "rule:admin_only", "update_flow_classifier": "rule:admin_only", "delete_flow_classifier": "rule:admin_only", "get_flow_classifier": "rule:admin_only", } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/etc/policy.d/routers.json0000644000175000017500000000050500000000000022071 0ustar00coreycorey00000000000000{ "create_router:distributed": "rule:admin_or_owner", "get_router:distributed": "rule:admin_or_owner", "update_router:distributed": "rule:admin_or_owner", "create_router:external_gateway_info:enable_snat": "rule:admin_or_owner", "update_router:external_gateway_info:enable_snat": "rule:admin_or_owner" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/lower-constraints.txt0000644000175000017500000000152300000000000021436 0ustar00coreycorey00000000000000amqp==2.1.1 bandit==1.1.0 coverage==4.0 decorator==4.4.1 eventlet==0.24.1 fixtures==3.0.0 flake8-import-order==0.12 flake8==2.6.2 hacking==1.1.0 httplib2==0.9.1 kombu==4.0.0 mock==2.0.0 netaddr==0.7.18 neutron-lib==2.0.0 octavia-lib==1.3.1 openstackdocstheme==1.18.1 oslo.concurrency==3.26.0 oslo.config==5.2.0 oslo.context==2.19.2 oslo.db==4.37.0 oslo.i18n==3.15.3 oslo.log==3.36.0 oslo.messaging==5.29.0 oslo.policy==1.30.0 oslo.serialization==2.28.1 oslo.service==1.31.0 oslo.utils==3.33.0 oslo.vmware==2.17.0 oslotest==3.2.0 osc-lib==1.14.0 pbr==4.0.0 pika-pool==0.1.3 pika==0.10.0 prettytable==0.7.2 psycopg2==2.7 PyMySQL==0.7.6 pylint==1.7.1 python-openstackclient==4.0.0 reno==2.5.0 requests==2.14.2 six==1.11.0 SQLAlchemy==1.2.0 sphinx==1.6.5 stestr==1.0.0 stevedore==1.20.0 tenacity==5.0.2 testtools==2.2.0 tooz==1.58.0 vmware-nsxlib==15.0.1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1542525 vmware-nsx-15.0.1.dev143/releasenotes/0000755000175000017500000000000000000000000017670 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1782532 vmware-nsx-15.0.1.dev143/releasenotes/notes/0000755000175000017500000000000000000000000021020 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/.placeholder0000644000175000017500000000000000000000000023271 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/bind-floating-ips-per-az-142f0de7ebfae1c8.yaml0000644000175000017500000000061000000000000031034 0ustar00coreycorey00000000000000--- prelude: > Enable 'bind_floatingip_to_all_interfaces' to be configured per availability zone. features: - | Enable 'bind_floatingip_to_all_interfaces' to be configured per availability zone. This will enable an admin to ensure that an AZ can have flotaing IP's configured on all edge vNICS. This enables VM's on the same subnet to communicate via floating IP's. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/block-all-no-security-groups-47af550349dbc85a.yaml0000644000175000017500000000062300000000000031561 0ustar00coreycorey00000000000000--- prelude: > Enable 'use_default_block_all' to ensure that traffic to a port that has no security groups and has port security enabled will be discarded. features: - | Enable 'use_default_block_all' to ensure that traffic to a port that has no security groups and has port security enabled will be discarded. This will ensure the same behaviours as the upstream security groups. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/dns-search-domain-configuration-a134af0ef028282c.yaml0000644000175000017500000000045600000000000032253 0ustar00coreycorey00000000000000--- prelude: > Enable an admin to configure a global search domain. This is used if no search domain is configured on a subnet. features: - A new configuration variable in the nsxv section will enable the admin to configure a search domain. The new variable is dns_search_domain. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/dvs_dns_integration-831224f15acbc728.yaml0000644000175000017500000000034600000000000030101 0ustar00coreycorey00000000000000--- features: - | One can enable DNS integration for the upstream neutron for VMware NSX-DVS. DNS integration extension by setting: nsx_extension_drivers = vmware_dvs_dns in the default section of neutron.conf. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/ens_support-49dbc626ba1b16be.yaml0000644000175000017500000000040600000000000026632 0ustar00coreycorey00000000000000--- prelude: > Add a configuration variable indicating that ENS transport zones can be used. features: - | Add a new configuration variable ``ens_support`` to the ``nsx_v3`` section. This indicates if a tenant or admin can create ENS networks. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/fwaas_v2-9445ea0aaea91c60.yaml0000644000175000017500000000027100000000000025700 0ustar00coreycorey00000000000000--- prelude: > The NSX-v3 plugin supports FWaaS V2. features: The NSX-v3 plugin now supports FWaaS V2 allowing to set a different firewall group policy on each router port. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsx-dns-integration-extension-8260456051d61743.yaml0000644000175000017500000000043000000000000031461 0ustar00coreycorey00000000000000--- prelude: > The dns-integration extension is now supported in both NSXV and NSXV3 plugins. It can be enabled by adding 'vmware_nsxv_dns' (for NSXV) or 'vmware_nsxv3_dns' (for NSXV3) to the ``nsx_extension_drivers`` configuration variable in neutron.conf file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsx-extension-drivers-b1aedabe5296d4d0.yaml0000644000175000017500000000052100000000000030624 0ustar00coreycorey00000000000000--- prelude: > We have added a new configuration variable that will enable us to enable existing extensions. The new configuration variable is ``nsx_extension_drivers``. This is in the default section. This is a list of extansion names. The code for the drivers must be in the directory vmware_nsx.extension_drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-availability-zones-85db159a647762b3.yaml0000644000175000017500000000072300000000000030575 0ustar00coreycorey00000000000000--- prelude: > The NSX-v plugin supports availability zones hints on routers and networks creation in order to create them on the requested nsx datastore and resource pool. features: - The NSX-v plugin supports availability zones hints on routers and networks creation in order to create them on the requested nsx datastore and resource pool. The availability zones configuration includes the resource pool, datastore, and HA datastore. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-bgp-support-44f857d382943e08.yaml0000644000175000017500000000022200000000000027173 0ustar00coreycorey00000000000000--- prelude: > The NSX-V plugin suppports BGP for dynamic routing. features: - | The NSX-V plugin can suppport BGP for dynamic routing. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-edge-random-placement-9534371967edec8f.yaml0000644000175000017500000000065300000000000031216 0ustar00coreycorey00000000000000--- prelude: > Support randomly selecting which will be the primary datastore and which will be the secondary one when deplying an edge, in order to balance the load. This new option is available globally as well as per availability_zone. features: - | Support randomly selecting which will be the primary datastore and which will be the secondary one when deplying an edge, in order to balance the load. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-exclusive-dhcp-7e5cde1cd88f8c5b.yaml0000644000175000017500000000041700000000000030277 0ustar00coreycorey00000000000000--- prelude: > Add support for exclusive DHCP edges. features: - | The NSX-v will now enable a tenant to deploy an exclusive DHCP edge. This is either via the global configuration variable ``exclusive_dhcp_edge`` or per AZ. By default this is disabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-fwaas-driver-4c457dee3fc3bae2.yaml0000644000175000017500000000030700000000000027725 0ustar00coreycorey00000000000000--- prelude: > The NSX-V plugin can suppport FWaaS-V1 for setting router edges firewall rules. features: - | The NSX-V plugin can suppport FWaaS-V1 for setting router edges firewall rules. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-fwaas-v2-driver-0b78d5e2c4034b21.yaml0000644000175000017500000000030700000000000027740 0ustar00coreycorey00000000000000--- prelude: > The NSX-V plugin can suppport FWaaS-V2 for setting router edges firewall rules. features: - | The NSX-V plugin can suppport FWaaS-V2 for setting router edges firewall rules. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-ipam-support-6eb1ac4e0e025ddd.yaml0000644000175000017500000000056100000000000027762 0ustar00coreycorey00000000000000--- prelude: > The NSX-v plugin can use the platform IPAM for ip allocations for external networks and provider networks. features: - The NSX-v plugin can use the platform IPAM for ip allocations for external networks and provider networks. In order to use this feature, the ipam_driver in the neutron.conf file should be set to vmware_nsxv_ipam. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-lbaas-l7-704f748300d1a399.yaml0000644000175000017500000000022300000000000026276 0ustar00coreycorey00000000000000--- prelude: > The NSX-V lbaas plugin now supports L7 rules & policies. features: - The NSX-V lbaas plugin now supports L7 rules & policies. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-policy-3f552191f94873cd.yaml0000644000175000017500000000061600000000000026273 0ustar00coreycorey00000000000000--- prelude: > The NSX-V plugin allows admin user to create security groups consuming NSX policies, both as regular / default and provider security gruops. features: - The NSX-V plugin supports the concumption of NSX policies through security groups. Depending on the configuration, an admin user can create security groups without rules, that will be connected to an NSX policy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-router-flavors-8e4cea7f6e12d44d.yaml0000644000175000017500000000046700000000000030263 0ustar00coreycorey00000000000000--- prelude: > The NSX-v plugin supports using router flavors in routers creation. features: - The NSX-v plugin supports using router flavors in routers creation. A router flavor can include the router type, size, distributed flag and availability zones in order to easily create similar routers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-service-insertion-32ab34a0e0f6ab4f.yaml0000644000175000017500000000073700000000000030712 0ustar00coreycorey00000000000000--- prelude: > The NSX-V plugin supports service insertion by redirecting traffic matched to the neutron flow classifiers, to the NSX-V partner security services. features: - The NSX-V plugin supports service insertion by redirecting traffic matched to the neutron flow classifiers, to the NSX-V partner security services. For each flow-classifier defined in neutron, a new traffic redirection rule will be created in the NSX partner security services tab. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-subnets-dhcp-mtu-c7028748b516422e.yaml0000644000175000017500000000043400000000000030102 0ustar00coreycorey00000000000000--- prelude: > The new extension dhcp-mtu of subnets in the NSX-v plugin can be used to configure the DHCP client network interface MTU features: - The new extension dhcp-mtu of subnets in the NSX-v plugin can be used to configure the DHCP client network interface MTU. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv-vlan-selection-ec73aac44b3648a1.yaml0000644000175000017500000000025700000000000030114 0ustar00coreycorey00000000000000--- prelude: > The NSX-V plugin can decide on the VLAN tag for a provider network. features: - | The NSX-V plugin can decide on the VLAN tag for a provider network. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-add-trunk-driver-925ad1205972cbdf.yaml0000644000175000017500000000037300000000000030363 0ustar00coreycorey00000000000000--- prelude: > Support VLAN-aware-VM feature in NSXv3 plugin. features: - Trunk driver for NSXv3 plugin which allows creation of trunk ports and subports which subsequently create parent port and child ports relationship in the backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-availability-zones-8decf892df62.yaml0000644000175000017500000000065500000000000030513 0ustar00coreycorey00000000000000--- prelude: > The NSX-v3 plugin supports availability zones hints on networks creation in order to separate the native dhcp configuration. features: - The NSX-v3 plugin supports availability zones hints on networks creation in order to separate the native dhcp configuration. The availability zones configuration includes the metadata_proxy, dhcp_profile, native_metadata_route and dns related parameters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-default-tier0-router-2983c6de10dd465a.yaml0000644000175000017500000000045100000000000031164 0ustar00coreycorey00000000000000--- prelude: > The nsx-v3 plugin can add default Tier-0 router configuration per availability zone. features: - | The nsx-v3 plugin can add default Tier-0 router configuration per availability zone. The Tier-0 rotuer will be used as a default for external networks creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-dhcp-relay-32cf1ae281e1.yaml0000644000175000017500000000066200000000000026622 0ustar00coreycorey00000000000000--- prelude: > The NSX-v3 plugin supports DHCP relay service per network availability zones. features: - The NSX-v3 plugin supports DHCP relay service per network availability zones. When a router interface port is created, the relay service will be added to it. DHCP traffic on the subnet will go through the DHCP server configured in the dhcp relay service on the NSX, if it is connected to the router.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-init-from-tags-bcd4f3245a78e9a6.yaml0000644000175000017500000000062500000000000030131 0ustar00coreycorey00000000000000--- prelude: > NSX-V3 plugin supports a new configuration option for the transport zones, tier-0 router, dhcp profile and md-proxy in the nsx ini file using NSX Tags insead of names or IDs. features: - | NSX-V3 plugin supports a new configuration option for the transport zones, tier-0 router, dhcp profile and md-proxy in the nsx ini file using NSX Tags insead of names or IDs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-ipam-support-137174152c65459d.yaml0000644000175000017500000000070000000000000027340 0ustar00coreycorey00000000000000--- prelude: > The NSX-v3 plugin can use the platform IPAM for ip allocations for all network types. features: - The NSX-v3 plugin can use the platform IPAM for ip allocations for all network types. In order to use this feature, the ipam_driver in the neutron.conf file should be set to vmware_nsxv3_ipam. Currently the plugin does not support allocating a specific address from the pool depending on the NSX version. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-lbaasv2-driver-57f37d6614eb1510.yaml0000644000175000017500000000037200000000000027664 0ustar00coreycorey00000000000000--- prelude: > NSXv3 plugin supports LBaaS v2 using NSX native load balancing. features: - | Add NSXv3 neutron lbaas v2 driver to support LBaaS v2.0. This includes both layer4 and layer7 load balancing via NSX native load balancer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-lbaasv2-error-no-member-635ffc6308289aca.yaml0000644000175000017500000000034600000000000031551 0ustar00coreycorey00000000000000--- prelude: > NSXv3 plugin will mark unused loadbalancers in ERROR state. features: - | Upon upgrade to Stein, unused LBaaS-v2 loadbalancers, which have no members will be marked in ERROR state, and cannot be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-multi-managers-b645c4202a8476e9.yaml0000644000175000017500000000042400000000000027771 0ustar00coreycorey00000000000000--- prelude: > The NSX-v3 plugin supports different credentials for the NSX managers. features: The nsxv3 configuration parameters ca_file, nsx_api_user & nsx_api_password are now lists, in order to support different credentials for each of the NSX managers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-native-dhcp-config-2b6bdd372a2d643f.yaml0000644000175000017500000000056000000000000030720 0ustar00coreycorey00000000000000--- prelude: > Starting Newton release we added support for native DHCP and metadata provided by NSXv3 backend. features: - Since now most of the NSXv3 deployment are using native DHCP/Metadata, default this option native_dhcp_metadata to True. By default, it will use NSXv3 native DHCP and Metadata unless this has been explicitly set to False. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-native-dhcp-metadata-27af1de98302162f.yaml0000644000175000017500000000061200000000000031076 0ustar00coreycorey00000000000000--- prelude: > The NSX-V3 plugin supports native DHCP and metadata services provided by NSX backend. features: - The NSX-V3 plugin version 1.1.0 allows users to use native DHCP and metadata services provided by designated edge cluster in NSX backend version 1.1.0. The edge cluster can provides high availability if more than one edge nodes are configured in the cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-switching-profiles-250aa43f5070dc37.yaml0000644000175000017500000000047700000000000030733 0ustar00coreycorey00000000000000--- prelude: > The nsx-v3 plugin can add pre-configured switching profiles to new nsx ports. The configuration can also be done per availability zone. features: - | The nsx-v3 plugin can add pre-configured switching profiles to new nsx ports. The configuration can also be done per availability zone. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-taas-driver-1a316cf3915fcb3d.yaml0000644000175000017500000000041700000000000027474 0ustar00coreycorey00000000000000--- prelude: > Support Tap-as-a-Service for port mirroring in NSXv3 plugin. features: - NSXv3 plugin now supports port mirroring via TaaS APIs which integrates into the backend L3SPAN APIs i.e. the mirrored packets are sent to the destination port over L3. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-trnasparent-vlan-fe06e1d3aa2fbcd9.yaml0000644000175000017500000000024200000000000030767 0ustar00coreycorey00000000000000--- prelude: > The NSX-V3 plugin supports transparent vlan networks. features: - | The NSX-V3 plugin supports transparent vlan networks for guest vlan. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-update-provider-types-aa1c20e988878ffe.yaml0000644000175000017500000000041700000000000031554 0ustar00coreycorey00000000000000--- prelude: > Adding support for Geneve and nSX-network provider networks. features: - | Deprecating the VXLAN provider network type. Adding Geneve provider networks (with overlay transport zone). Adding nsx-net provider networks attached to an existing nsx ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-vlan-selection-30c3d1dc1abe41d1.yaml0000644000175000017500000000052600000000000030236 0ustar00coreycorey00000000000000--- prelude: > The NSX-V3 plugin can decide on the VLAN tag for a provider network. features: - | The NSX-V3 plugin can decide on the VLAN tag for a provider network, according to pre-defined configuration set per transport zone UUID, noting a specific range or letting the plugin decide according to min/max constants. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/nsxv3-vpnaas-0b02762ff4b83904.yaml0000644000175000017500000000022600000000000026325 0ustar00coreycorey00000000000000--- prelude: > Support VPN-as-a-Service for VPN IPSEC in NSXv3 plugin. features: - | NSXv3 plugin now supports VPN SEC through VPNaaS plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/octavia-support-2fa83d464dbc4e52.yaml0000644000175000017500000000023600000000000027341 0ustar00coreycorey00000000000000--- prelude: > Support Octavia loadbalancer support in NSXv and NSXv3 plugins. features: - | NSXv and NSXv3 plugins now support Octavia loadbalancer. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/provider-security-group-2cfc1231dcaf21ac.yaml0000644000175000017500000000115600000000000031147 0ustar00coreycorey00000000000000--- prelude: > Tenant specific blocking firewall rules to be managed via Neutron security-group API features: - Admin user can now create a security-group with the 'provider' flag to indicate whether rules take implicit 'deny' action. - Provider security-group rules takes precedence over normal security-group rules - Each tenant may have at most one security-group marked as provider - New tenant ports are associated with the provider security-group automatically, unless explicitly asked otherwise - Supported by NSX V3 - Supported by NSX VSphere, version 6.2 or newer././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/qos-support-d52b5e3abfc6c8d4.yaml0000644000175000017500000000053400000000000026656 0ustar00coreycorey00000000000000--- prelude: > Support for QoS bandwidth limit and DSCP marking. features: - The plugin can apply a QoS rule to networks and ports that mark outgoing traffic's type of service packet header field. - The plugin can apply a QoS rule to networks and ports that limits the outgoing traffic with the defined average and peak bandwidth. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/rename_uuid_config_params-b36c379f64838334.yaml0000644000175000017500000000125100000000000031103 0ustar00coreycorey00000000000000--- prelude: > The 'default_tier0_router_uuid', 'default_overlay_tz_uuid', 'default_vlan_tz_uuid', and 'default_bridge_cluster_uuid' options have been deprecated and replaced by 'default_tier0_router', 'default_overlay_tz', 'default_vlan_tz', and 'default_bridge_cluster' respectively, which can accept both name or uuid deprecations: - The 'default_tier0_router_uuid', 'default_overlay_tz_uuid', 'default_vlan_tz_uuid', and 'default_bridge_cluster_uuid' options have been deprecated and replaced by 'default_tier0_router', 'default_overlay_tz', 'default_vlan_tz', and 'default_bridge_cluster' respectively, which can accept both name or uuid ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/rename_uuid_to_name-e64699df75176d4d.yaml0000644000175000017500000000070300000000000030067 0ustar00coreycorey00000000000000--- prelude: > - In NSX|v3 plugin, the 'dhcp_profile_uuid' and 'metadata_proxy_uuid' options have been deprecated and replaced by 'dhcp_profile' and 'metadata_proxy' respectively, which can accept both name or uuid. deprecations: - In NSX|v3 plugin, the 'dhcp_profile_uuid' and 'metadata_proxy_uuid' options have been deprecated and replaced by 'dhcp_profile' and 'metadata_proxy' respectively, which can accept both name or uuid. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/notes/universal-switch-41487c280ad3c8ad.yaml0000644000175000017500000000057000000000000027427 0ustar00coreycorey00000000000000--- prelude: > The NSX-v plugin supports universal switches. features: The NSX-v universal transport zone can be used in order to create universal switches as VXLAN networks over all the nsx managers. For this option to be enabled, the vdn_scope_id parameter in nsx.ini should be set to the ID of the universal transport zone which is 'universalvdnscope'. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1782532 vmware-nsx-15.0.1.dev143/releasenotes/source/0000755000175000017500000000000000000000000021170 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1782532 vmware-nsx-15.0.1.dev143/releasenotes/source/_static/0000755000175000017500000000000000000000000022616 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/source/_static/.placeholder0000644000175000017500000000000000000000000025067 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1782532 vmware-nsx-15.0.1.dev143/releasenotes/source/_templates/0000755000175000017500000000000000000000000023325 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/source/_templates/.placeholder0000644000175000017500000000000000000000000025576 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/source/conf.py0000644000175000017500000002145100000000000022472 0ustar00coreycorey00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # VMware NSX Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'VMware NSX Release Notes' copyright = u'2015, VMware, Inc.' # Release notes do not need a version number in the title, they # cover multiple releases. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'VMwareNsxReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'VMwareNsxReleaseNotes.tex', u'VMware NSX Release Notes Documentation', u'VMware NSX Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'vmwarensxreleasenotes', u'VMware NSX Release Notes Documentation', [u'VMware NSX Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'VMwareNsxReleaseNotes', u'VMware NSX Release Notes Documentation', u'VMware NSX Developers', 'VMwareNsxReleaseNotes', 'VMware NSX plugins code for OpenStack Neutron.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/source/index.rst0000644000175000017500000000025500000000000023033 0ustar00coreycorey00000000000000========================== VMware NSX Release Notes ========================== .. toctree:: :maxdepth: 1 unreleased queens pike ocata newton liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/source/liberty.rst0000644000175000017500000000022200000000000023370 0ustar00coreycorey00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/source/newton.rst0000644000175000017500000000021400000000000023231 0ustar00coreycorey00000000000000============================ Newton Series Release Notes ============================ .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/source/ocata.rst0000644000175000017500000000021200000000000023004 0ustar00coreycorey00000000000000============================ Ocata Series Release Notes ============================ .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/source/pike.rst0000644000175000017500000000022600000000000022652 0ustar00coreycorey00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: origin/stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/source/queens.rst0000644000175000017500000000023200000000000023217 0ustar00coreycorey00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: origin/stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/releasenotes/source/unreleased.rst0000644000175000017500000000015600000000000024053 0ustar00coreycorey00000000000000============================= Current Series Release Notes ============================= .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/requirements.txt0000644000175000017500000000345000000000000020465 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr>=4.0.0 # Apache-2.0 eventlet>=0.24.1 # MIT httplib2>=0.9.1 # MIT requests>=2.14.2 # Apache-2.0 netaddr>=0.7.18 # BSD tenacity>=5.0.2 # Apache-2.0 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.2.0 # MIT six>=1.11.0 # MIT stevedore>=1.20.0 # Apache-2.0 neutron-lib>=2.0.0 # Apache-2.0 osc-lib>=1.14.0 # Apache-2.0 octavia-lib>=1.3.1 # Apache-2.0 python-openstackclient>=4.0.0 # Apache-2.0 oslo.concurrency>=3.26.0 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.db>=4.37.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.policy>=1.30.0 # Apache-2.0 oslo.serialization>=2.28.1 # Apache-2.0 oslo.service>=1.31.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 oslo.vmware>=2.17.0 # Apache-2.0 PrettyTable<0.8,>=0.7.2 # BSD tooz>=1.58.0 # Apache-2.0 decorator>=4.4.1 # BSD mock>=2.0.0 # BSD # These repos are installed from git in OpenStack CI if the job # configures them as required-projects: neutron>=15.0.0.0 # Apache-2.0 networking-l2gw>=15.0.0 # Apache-2.0 networking-sfc>=9.0.0.0 # Apache-2.0 neutron-fwaas>=15.0.0.0 # Apache-2.0 neutron-vpnaas>=15.0.0.0 # Apache-2.0 neutron-dynamic-routing>=15.0.0.0 # Apache-2.0 vmware-nsxlib>=15.0.1 # Apache-2.0 # NOTE: we require octavia but can't depend on it for Stein # octavia>=5.0.0.0 # Apache-2.0 # The comment below indicates this project repo is current with neutron-lib # and should receive neutron-lib consumption patches as they are released # in neutron-lib. It also implies the project will stay current with TC # and infra initiatives ensuring consumption patches can land. # neutron-lib-current ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1782532 vmware-nsx-15.0.1.dev143/rhosp13/0000755000175000017500000000000000000000000016476 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/rhosp13/instructions.rst0000644000175000017500000000172600000000000022002 0ustar00coreycorey00000000000000================================================== Setting up RHOSP13 director for NSX-T integration ================================================== This guide provides instruction for updating the following components on RHOSP director: - openstack-puppet - openstack-tripleo-heat-templates The tarballs nsx-rhosp-openstack-puppet.tar.gz and nsx-rhosp-openstack-tripleo-heat-templates.tar.gz contain updated versions for these components. The following instructions provide detailed information regarding upgrading software on the RHOSP director using these tarballs: 1. Download tarball in RHOSP director’s home directory 2. Verify if an upgrade is needed: $ test -e /usr/share/openstack-heat-tripleo-templates/docker/services/neutron-plugin-nsx.yaml && echo “OK” || echo “PATCH ME” 3. Copy both tarballs in /usr/share/openstack $ sudo cp ~/nsx-rhosp-*.tar.gz /usr/share/openstack 4. Expand the archives # cd /usr/share/openstack # tar xzf nsx-rhosp-*.tar.gz ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/rhosp13/nsx-openstack-puppet.tar.gz0000644000175000017500000037037100000000000023750 0ustar00coreycorey00000000000000f\\oG+2neDl9+ w%9vWD˻wA0h4ɉf3yqnî%_}Uݳ67+ux7ȫ<7^j*1n/3UYlW_K<5xzٟ v_(MamԸ}~'/?;~Fs 4˗`%pjx4zy;}_ϞoH,RSY|\hgZ_֟e4N¨&[HlȆnX;_/2wG{{t4{{ߋֳg`J k >|6T)O)y ,ix_mQ=߯@ J)ٛ ͍oʙm2?]Oy\ai ^.L^+)Q6ǧq@BqA|8UR׳81K6AgUЪq:SzV(kŕ`q!,+QLI(ĸaܔD'O/>Vb)FŰ;k9woG0)$:Q NX$o+!2b S!u%ȃ Ra&r m>!4DL(8mPlQC7\MXfȺ@ @`c+E_WyUR 8l^ 379{l߻j!Rq ?["DhEi n"\n ?gK`Cqj@fD8J DAj3Hw  .<'N{r;ɣ};f4 (v4RI\oN$!UY ` *[OU\A :{/ d0fb0$0ܽ-?8y0aEsncBb4N\WAXdQu&R'Q ?yb-l }@L%@)C2\ l%DH>^̼WD-`FqeASY']QP;nG;-  C5f \ UIs2ܑ1SFfZ՟$ԩu]عkX"Q$0gqddtt  [J-G`)snV}\m[U%0*e `O=Dv⠞3F|2B9X>ӂ^-% P K5I!)Qhy 8m$10iDUȗ`-@~?+  ^}'#"#hpig Dٴd#4pEl{& 0#t9̃b/Խv[ b[9 qCqkCipjl&M&bС8Ճh ^¤Q<};n$H^ԫA";gbӓ"a]}F!=I @/`ӏ7dpL@Q&!V vJpt" TER<Qﯗ'k-x%B@MG4we?aE3|0zw[r?D~}O ȟY`pq(tG  sk&CuTtZaJ`w%6H1C3<hο9tZ꩙ư؟$PNӀu5f1JbjV!wt[r_J(CZ<#̀ "5xEE CKK= |*P>9fSx[9&MubTS5PՅvm)Ƅ2eoc 8ph:y#@@P1&px&PBc8'NQ2EmbYaC|Lv5E1([ OCR~ԤXͮk)56")%XZN_^^ llxd}ǑE=Y2v=4@h҆YI=#Sdv^zAoR 6R)CߪmW^aGB{g (/ ׫0"d,*n1* n<*p9&9 љU a=H)|i^Nulrl@ǂwWs{Xrf&~\g)w (%I-hv8 gkPe-C]'>OQ ұkVzt0+mP]bكD3F`1P 1SAvwD`4`ε=IO2 Rl`I: ~Zh7W@ʢN.&kHjDq\Gi{Zk j9,PG+rmZ( B~E"dx] ,\r8W0ja֘.fc(BSmMLL!oVCFj0" ^u&=͏sG恑 qlNL>Hb JHl&m SXLF-;H~m As|^6En &U9uwڵ(ReB8%b枩=7h(^I>ʲe_Xdc)[4lffA @P-Gt׊v=G@-!vP=gSWrV=:J~jUT,2 8:h$>Vʺg£ >1N:1dCPwP)tCM2CVFw\[Wm]H7! pBLP)cɱb4h$`sgĈ H%̏+qR*݀ v* j.`Q}KX1s$Gt?X5=hDlnfI6aҦ'Z\9{ dM= FFKxj*9dH9LxX;SH¢6)r̀v`i@9c;&@[s\r\Ղ.H@4-\z`m᩠Qwљ^Uhc@N7:)pa%Ed]R,{p^zy^Y **FisO^+4as- i& !n*}hcR7*ڬJ 3us}B^\}_?b L/@ S6)RCj.: DZنkKO%5E5j+0 嚓vf1h wƲ͹%tHŽ<* =ƐA TR6ٴV/瓵^&__:~sB}"%$;/G>|l wxD{.o`oí|__jq Ì*8:?^R1%kbs>Ꮺ<@TbyDžph`Ğe^_ZI=飒[u%-GD̋}6rO˞!,P*wPT=x إ橦|?3㮙V\eZ:׬f{P}/N4@XJU'|dKb1&_WiYG֞E\y"WA]-tl\OѨr@W)ը?[g&x΂a"@W[/=iexWօŻ$8}sP+XSxt_9%L>X_\lښ Țs:J<$%~M%~y܋-Ex'{Ď;3{j;73vF|q4s=~jT%_xz4uA WP<  ؜ ʏ#˶}ttvN}8?P.\Qt Е ÔV ȧޭh >!sRH2yk)D|,3 %)tn~m< $v _Pe>aXKx}1HDI(P-  & s3йS1qDzS/W>\ O;cn$bBxa}Kޟ ~ {jp>SHuyAWbA" s:I|1u=W3'_!3]ei󴉺!X>>tOhH4SWߒՓV]v)}wo>gޤhy@ᇽ_?G;ߟ]ۛ_K{E5]a+_bpIl0?̥Y<{{{\\pv;S;^t=Ȁo$ [ja4R;pƿ8_9 /2Kk k96rSeo![.0s|{|&@ňW3}Z^!Y+@ѻ%p!bW?]nk>FoHBso1m3qFJ'J|ۃSNMJ3{Kcmߊmtks2yic~)8,k6!= (L`c[;o`(铊TAHH8K~ ߭xMЂ)Eي?s` rm:>@+2#w(i5tAM,cꍑpMҋC:nT+DΛ(|-_lq G Ӛjx_LIXuTG$ v]ib.+<Ձƚ27^I%f9lXWcZ+ "Emzw|>#A՜UHO$Շ?*$E wm>Fb]A+$,M4wa2'HGQ9[v1)SnlO4iI*#2}$p?lE\#XNDd; ̳Z_aVÖ'1'iT{&NCFc52cDԬ@ }/9+t(%*W1ɳƘQH疹8 9Dh<&03rnb5pV>mzƗ(8%ar}1W u% oYڂ2Cit>0QGvK>3M`,Ld.&f9Ӣn3,D;X /D,$m?X= <$ /Q\!92^#*GHfqZ0\"1gWM%y]RbĨnQ(XL9Z+ZWN/p,N"( &X')t8EʇA?Iڏ. X8pu<Ջs+%׃ &hʀòwd0*N4$OrULɽac3F|jv::5=o5fsX6>*hImƦ~,$#d*яɶ(D x/4{a.qWKٍpG:vi 4$J_mv6YqY鈃!_9I<8&7[- -7"!qcZyE60fhQ1wEv]PųBc"y ^4 1J1'i4E0 '9<1y)RgQ|K@8.JNS\ת=ϕ1 OoQΩDiC;QL4#K-$G䀄qzhJGw_h\(ƭE,CҜ>*rYD3Ԁ46 `ֵ'od ;',ᱽ$u-9H^Ax % 1w IEKhߊBijBe8{%Ww %"Z4^Bs,q#$#.PGZ/a'~r \XnZcS*LAD %%cPBGL{~V&nxHQ=x$:SO¼ܪCڹeAh{.`[zOuW74pET4/ݤ`<8=_99?=^:իO/_/ N^w_/ {Ԏz$8*S ғOݢ-9]UG9 O_?bŷN_^  s>-|D8;y W^?cj˷}YX+" +7NFsOc\C_4+ufOndF➕EkV:S™" (2a>); iy5A;dCe0.1p_hsKvoY\)(N[B69A n烱E>P)#nF6`%SI6 'id[O\% :]@%F;sV…RMlNxhxۆ  e# {$oQNF!>_L!"[ܼ|pQ !2"NPw%0߀ &A7|^qp$ŖO46OWȺս,,n\h*IX JN벝tdCNdˈgʃT`[w`%Z!ڗ}N_hoAo:&R%Uw JB,J 4E˰o܆([\WFD(8}1@Qf/?Nss]D=<IZ͢>Xߖ 1&SsVneƝ[l 9U Q6zvg'Sg?FwbhxTOtqγojPa;Cщ!3Ї+9wawMnXo?.KUBG8MP`v>x:!b(j;(qg[`4;6P .P,WZ͌,Jڰcd|NG|0R:4ܳڭҭQѼF@EH3n)Du&ZsM*3" XF 5Z{0O"BXIQ_ ˳R7=8E5lDq hݨ1DVx׀",{bYx3O#J̱U)7Go_W>w-w (Dmtt34=Tz#4' qGأ$$O=Ni, G*굵Ł]:Q`I0j)i}$O}ڷ1酌ѿR[1ʱMʈB3jk2,/q4Od9o_?{#Y?ckȺk9iM;kJHdJ xoF2^ 78DW/yƅ^xqx-c!NÒLE~qf}@s-j>PC[U4yP_mN+XgK!y<败WQiF\knU| MjQ&}Xr}5n+0iGF B5.HI2x NvL5=8FVU ؗptUp3uStRWSw@%cKyd¨7;cIKk#zJ?)Q}~_vm2GJ :OZ*Pme_!1+t!v߿oT~yB6U ^){o%HP/qY1!U{_!voA>{Ã]ta?_}񺪭.Q6Js߉]e<ЉEa3b E{GFe+KʋR-n]5Ջd""J'"+E9;IǝDD㗚+i\juUݪ~,"ѥtw7jm\0j&$ֺ'to*zGGuWe JiT3P(/3VbTEu/&ݸ7n*Gzs_o6wBR)O 5ק:'m2jSB^pE.X.-[-,TX+p' n Y5/B~`kR3e`\.opOv%+LQ_.:Mk $]Os+T^vӁE +7VZ'h#c -BpdѾSwmRb¯ٍ-ʀӔF#aZm_A]IJ;DԆ;jn7j3P˰j*`I+JŤ4["-5tbzasR\o"=jפF7ȸ};SAq+YNSGGs9A^ XXW3O²&vlfOd@KȰ)E@s_>OMQAU6[ϨGޕID o Ҙ˰Ry&3;8ed00*2(- nDJ@߶Sm QYeNg%ncYjt=ϿT*U.AӺ'dc'V ?n"N8WStr7eiXImt뇣T 5KW3rG]r`B>rN=]MM7#hBQprv*sN;T@t!S6@-t2m}&׍7ÿqi:$=9S3¬"&9.hDr$c8t!.ʟZ-/,*+/ ôg+-vSxt)H e,lY䚧6I<Ћa3Mr(qUduEEҋҖF[M3W k ovhJ^hH?a\0/1_r N`Ci1,edkU: NB>](?gfkoOt*%_`ߵBCg[5v@ 0%M;Q1)L6/,Y¶LNb=mEqz<ײ]{a}(BUlT7) yQq 2J* 7n䲋,l\XDR\܁7֍}] xrגw=`m+0O}FXA?!z 0(͆+ҩůiG %ݺ(J􊲈7T0E#sVЬ ޓ+n 9Xm"2c!qŐrfSY٣ D-!$vh*87@(茓w]-M&[nz]լmn Cw:dۛ-N{I0,Ku\>NpZ ,1z J A{Qdb@7vjRvZ:VjA|3*tƔ'r. ӌuk0?3FR{ 2 B,ʌ\rMc.OMVL_zv T^ceGd6IxO8ei;5A3 b+fΛV O,@HӾKֿ\)uGG+<_|ֿ{j̲2tӬ3fsX2_]$ik>x(2G%<Vm]yhmQQrq,A^ޢbh{O&eEʆ*͙REo1-\!Iag1&X7TdIxLV胂G"/f;{L-bv~P*杆;QNU@(%#aRQb1%@"ubm1`+"߲z,Nq/<߁g.Y+`NW0}s{Rn0{9|q`$^|$"c0 8>o^^H-kZp?;tR_=>?yq'@#?ǡl_>7G{ͣf㠂gWNۺиqǗXd3ٍ2 cQXC( 7^Ze<+OO?Hxd#<@Tl2un+ cTYSp dD *J=UlIzM.h]MEMG;Mn8l iR0j<6uzꙍ\ɳZfפOyZ>ݶJcnM֟vV뿌go$Wr@ {5Vy$-};hù*Xͺ@[RGk C 8@ՉeP 9o4_G{NDڝ"ΠJ V&0X=},a-sIk+RV}nwYjnon'( _~5:Yӫb7oYڊkkN$c^|U_uJ *P`~@XqO&()UTndp/=^JȈ8`bQV 9Xf`r 0E!d+QQ:a @6Ey 3<.J> ))|.iwHFtB'- ;ljfvhzp]믏7XߺJkXF'quU _q+cu7`Y}?aFIBk{ة'E34knw%UfnTT{ ̣̒;J[68o#hۆ{}X/#gLWЭ ޺aL`=ՅYY1UX|D)uֽzX+ O6[5͂z[ Htes,!#WZ/'?ë'Qg^|̩o5˟z={m6sj99?嫧64r'/) k˻ñY0%wNWWkb` %B~pă%AcWjKx௲,'HFjų ln],D`W)ayOaɷ5B<DuV;?o|_ww>z?Dk?0Sco h=Qq85eksf5/}?89+ S }<p2IJ=AOOeN8Kԣꝰ5yLh@8ZA kc.-!ȿu޽e.UXmGsk8^fVxhĴThc ZK ў#!)}&pYj?NV;L-ޮ#v#I IIQ4h{=)A\qqҬ?Yh+̫46nq "qA\D7 Az"O%h|mS8AcE*/ŅM챎$R!Rk^' __,'tdLhM4,q:9KP گ0@ҮΪWEMrd7+E^rUC0" Ymw l^})"9N6@7mOplPܷeOR(_%"lʅJxoc ~(A td6^XP  qra@Aȴ3覮FNw<{;" &F9Qp*cVU6ʕ|;t,71K2+c_`zc&{E$w9ƺ˧/7´{  ʃhn:Tp ~ݽ KYb1O$ϧu֣Gm gcHf@cQSc &?O٘igWjemo4# <\ 4M7g!Eol  Iz 2ZN>\8crGSA) MLpK0/H'mS'@21W=_NƜ8(fʃS|TsB[$@0c_I<*@^φ}  z9~>Oj!tae shegb6W_Kyp^ u{8\_ k)ϢQkt?4OKyJ񿹠"Pv/~5H!MAvtp%Η\r(_"N/B[ kIuHhHWpgON+3篶O_XN&Pq+:5 5MCB㷩P#W!ݹ"9+jX WzK4N`< cM0h++L[N9Ax5ދSy]h ہ~QRF}E-2oI*nJH_{C.mj'2,Wjo|ANV_z4aožetT&/[fk}7%V,-u Lg2Z*M(Rnbk(j]GS!;dH t 2G-ث4 6߳W/7"R {=fʜ'v;–κk Va[߇]v7-rf -]~.7K1wz4_Ƴ7dlRO39j?KyJX>?w; zfg)Ϣֿva+2"Vh2瓀9Ax$ 5U<%_H ?vn]Ur4O;,nBeT[uaDb]׀wJt;vo\s߃ /Y@)8rwWOϮ.PxzGOx9GQ. 6+Sy2|S9ֿhF{οɿF ^=Ypeˮ:K}76Ч(oUP+&YQf<+pWoYnÕrr^݇ xx)zg|t+~33<+o "ֿg)Oߛsd}t{WQiϹ\e< ^?52?GKy fuN& R<ÕRů?{)}p$;ExEf}t˛AXe< ^,oRYTk%[.Y{.fo):ȱ@Vn?Jg^_pwG0Ŕyv{s+- rImsm=p)c\~Ӹ ı*e[nLi),kq$вr|*Y )\$g;O7ݫddwb {:뜓oW{] 9/Y9kk.5B9Qed)}p4((ňGQOuu HpZ@duzߡ[1bn #j;i'ģ~Ӊ*饒|I]Hp__Wv[1_Ӛ.Sg@t"lLkF7J X#D⪚>qo`&KLM)H383kk CGm`j*P_:JE6j9_|~E)R2aS8o盘H:\NuM|m ='fgTR =Ns3y1i/k^t;X7a: Q7Ɠ6Ym1Ī-2 k/ׂfÎfF;x~/Vߖ,ziˬS.~zF9_ r/)x vNM^8 KD9sg9=F(͌s)O^A[/kmUN cs""87 /`/iI*7TzrjXeXwAQ8^dI*:n~Ac~N;O֮ecɷ=e9;22H'vT-MalQw¦8_~LB)l-Wo6ԁF Ձ4٪[RW1+=J6L$jOG*vTXa0L ;7 L>@h+TtG557IW9UFCyJ]Յ)wd遴{LāP$<o,:6{d$<9;%hYV_ lI;Mp]N e_M=ګ0 v -@ž=lZ9؜$ӆQ\dl e}{[T{[ 7/~kg57аS۝!])vN2T윃gS9kn=sH ak8Wc咛"~L_ifmUQw[lwh"O*Rz̞o9B7UH4jS<0D>[PAc6VsM`eY;yeL2n|Ư>,粂s1wg?Y?^ԏhIi`&+43GTP:JX Fxt"\ t2ocﴻqM .g;[\γ N*r{9suw.GNW6 3< ;!.thG+<6-( /婈}@tE Jŝ^u σUߥ<_9y8Xep _Y 3 /Y`W9O97V~sS[+ P,^T7xvx_γϻ.Lg)O9`q}Y7 C̎Wg_0?l?,j?Gy?@tq{raQW<[;7] WR{ErGwV,j2 zp܍taO?8Vw2㾏̎V_KyBP?_S}ڂOgH*,?qfMs)B?G( /)ru,WO՟Et' #V<[qFVrO`i]]aY|/+<]? o6Ky񿵺XxE?w;8K?@+~!}\$U,) +ZV[Ȩ͎_JAiY>TabH-FVLv' &){N}gctި"Om2X E`xg?$?* #3.{ܡG1k2Q\3ut@63 ^sy=/j\+7W:|9yַ\0h_#GfrCeSrx#SVV!.%TQZ!ր%e`*`֑?t&@¦ v([$ /Q+\9NHzXL5]ڊqiς}#@%3zi/j÷o"B|vq_k{E՝rr Ui̪ɚnF*[߯MzA V<;(b-d(UPOv~9VQ|Nm`M=պ=MozJi2+9Zzr$}?EwOCSeO9섚_)jV~gv]3w~8 uCx#qHg)B^{e)OC@W=w<dv\/YY9?WKyږ!A˧Oj>~6 kٔ_\An 7[G U')NwI'v 4@)1#QOE{UWaмSE':!3m_aԾݙdo}*U~Jy7Oިl_r0qcE2a\ DnmЛFp܏adD`Z_w4HMW^ֲ#5ŘXTxr: Ln|Պ =pB*3fm,rU4 Xslpk^L=t^/nx*ۥ+ەpŽ,E A!Cb}{V.?U7- evE`[v{}A8e*28+[qy=vBn4 ^u{ϥ%q>}&B|kv刋mtdnxOpua4W_m˛J?e:lVmV+s{4V+gfvlkHm;`GZd~=Ǧ[LlC6Jn|muUqfճ$ $pY@;Le8JTS%fzzjYkYI6\j KYs_?m/ߥ<]Mp'*Rswѧ X0A9`9(IӸӏn@7K쎃i @;Bb[H+M!$.vRFX:*x[۬'GvB`Ѩh)5 dL3PZܹfJ_jTM+6Ƨy[lQjB۲n ݤ;xGѫ'AhXOz5Z8;K }z>~l=`@3嗶 >P oڗTTʖV]vUth2q .yn FZ &"-Y_KYHG!cO;=W4VJGN߁k-x,qe(&'amqWzYWp3w ?'>*sGkĐW CRe-2zޖWG/c)Qnc%-Y+X{G\_?{ҟRwYny63,May=j?I2I0Ʒ/+p(ix޸Zf6 k4C/3FQ3G\u!2olx%%0tO|m<1͸بx}+ 8Wc߸m ؾ+ vPkzmͽhԛSZE,RX| @!, `Q*ߠmh_{kӵ֕hsXWUF[Y(W~EaXU 'ܪ!W!Zc_i9ݙ/p,t&% A w.m-r?X-Y+x$`_YSėgV77wsO;3#zOƮޤ+xx X-dԏVLe_Gd A#y1NnmAP/0ؚ~ި X6i-06=7gi=pwtSxiᜒU\$)FƯ[!x-bqi9Pi_&5G@1W ;dxA'VBi;Rͺ7QE_{HiPI֣_ &Om’aw2x!?s m1q[x7: QۜjaSG ШTi6~v[0*Vhd}eZ@7Vw+,lȸ n(`M ⭱=ƒx4rh8>E ۯe ̘ST98nMޮBm]Q)Y~~ަ1]jV!ʦW̖Q `dhwB ,2*hw+V1AtWRr)LZ NL5Oz ܙUP+V? 1C_/[\8x< t-6Z) eg=Q_~z;6fbKbT/L'>T iJ>f.T`&k.EM_bpW*rMOFhm%ŗxs?w4Z 9´W7W[3ךߊo x !’CyOlS}M*M#U-/G(qtt:7'p$w 0-5^ʳ6\*RC,>8`." oyXSߩ|<^>uW?nqpx=zz賌/acw-) Օ\?#!{+9ς[.g5G+LJVI׈w~n˱lgKYyf +gGm,J P_%صGkВ,Ex3[;^JW`GդإGrܓUF`4_~8?pt3b+ua_4IOw+$3n"Ɲ IīPg90 _.1Pq#vB;3Y_#bNh΁?$ѭ Z0NǘI\$GY=M{H4;C)J,vd-:)_It`<M(ddžb37](5`_(cB>䵂]6a?ko|Aworq'g.c+X'Б֌pVw,4c1a3^]oyL@ fϥG{+ON Y:̈MM ۛp]{ g1:Dc҃(m{82?׻(P?V)c௜جUwfm(DAދQrS)É:Uxa"A&4D[?$/ϟɳoO~x،lXKvr ۾y[sHe+?-)'2vZ8COJ-gC>C%nCgKEܥWyYiҍVD; qvscM_2Ih#&Y0b9^4fɊ|p<)?%LS"+W$W 'kS3d` + 6Wo4Hi'EyD)Bȡn ƽ:ra/x9}򑤔=(YXB_,;tP{'S+Lft?.y8fv*{82Roj8h%C@ ІjdaG pƗCf?|,JV 98nWLtve~4w|B#hﻍ5{+Ĝ j?f9Ht2{a,bҙhgyMoݯ6bkTD}!S|-R y'*dn~4\UoZ:9ЄȢX ~'c mmKbN}\8} ވ+m{7}#IwE P1Swu o.rou6qx]ݦq7ņ?n} E:Ѹ\䱸YLQ߷oz5/)?zNKyR*yYl4-"̏Gq/2@.WO W,-ntI?? z lHk*#u X&O.R3I@v'Q7a$#{r]fԚkOkT93?}N xr~W_+Rit'pb $\] <.?Ne)GYh?KyJ^`A']FڔծӀ`.Fy11k}p#!i&G2\*L OiOU5=%tDƠ߾Eԝ}=+wzf1_*RY䥱[]?sGn#`k9OV +o'+ėc?sbl#^%<ƀIXy%5V^-Fca<.?cl;Fhgw%/)E^"77yPhfoc鸟gn/Ԫ*{*rb/nEJ{|>FkOt 0@Ax\qцgq2Bh1*[ a6Y- av$jOG~RHE8bi1__oŒl;~tcg*jHh)DJW!cyb鉩BDu;NR@-h' dށ}:2aF .\+&\z-gD}zLAAN[8Ԑ #igX4}਺W!lAй W'S,W>Ne/pպ(O_!O.{9F kG{<Ỽ/;h5/~Xmwd"lR3GH^lZҲmEOjPit9nSqeI*iL<It7PZ:EaN8=I^/.é^KwE\ 9_q?xr%Fx8b`,ckA*BhlG+k15oTq[*t8sn цk!f7B|d`, 8 i&)2Ľ_o83~֏4RJ%Q"ĸHx5T:dJvjw㴛e?Lgg/)E^ڇ-8[a.Xޡpm,ḑ<; #cowou)Ouo.xc_8'Vb9g,癁E^pƎa4.`E#V4>~JgPݣ­Hzp$Pod5W9O%o,&|[~n] %Zd?+4|ws?CVTˢ׃p ՊXqXH0V\|J_sg? ҿd.Frn~1N,/QfCkG*^QdcE72dУqԍx-l[e@ OV`߷wBtǝ.>q]^i!w[p2bjSֿb`rGf="%մh{.5kwNi~W2B^Y g}P#/`h6+RbYee[(KE&A2\>szJ2n5VKy?.BоWǖ\̈́ҳ9z~z}޶Q,<=:.,]@ Yp @Q8y49rsYfBDf"x&=wj2&iŽwY߶ws&;|ΑQFl-;ÿoR=(+3ҿ s?RuN܏'_p;]f2Q0$P9 ؽ=W RϏh$dwQ ~gޚ& Tg?2nhDXn9W+o O]O`tirX=w{t{,)cpƽӹ'O_(!+ ==r P_NI1:ۑ( ]]>j8=hm0xulbmN`JcL@3-tƋi7_ Ka-0(܉0~F8QS8x޶{ GKjN8MۂһsGI,"$|zlW[mVPZ;r4Wq<8R" yCPGՑXBm/O{rEk) pa"7 \!PgeS6-#%{a5KTir8d K$&\l̮'`h)y!ζ~0z`׫`EsPS9*K81k%)g^)6%xwUgdtwifmcUm緭dhjv>X2DO uԒox3 iKMcdؽ8N*a[ C3Ng ᐇ)<ݜ`EAghi%pw?;X.)=D`Zߏw+3U$mZUe>ww.e_v\Jyi@NԱ9@EEl¯=KpȈV h0~e!* 0Ǐx~#j=BG pN2pqm|I4 %MVp  F]h w"߆|_d;{h/-$\rWdx~]Q?SJ#Q:IwnA{Ã>4 8NgXBl׿qgѨj%᮫?XKy= $q|y5 6lQ x9vkXd:JƭU <%c/q]!8lj<aZ'3N ėGGmw ,.>o~&Sn%Nӈ(XD1F8 7$)[݋rA8s5Z;;777z[OƗ St90[/ΟmC̔ 6.Ep7x^#(Rlir1|г%\ k'zV^`tɋק΃'/_<=}}68yxD1cGGtUqA #+ud~8ԫ%~Ajr~<'t̠xSCj1ib}2(0QažAoCdzzߐ+, A2M`8t17B~j%ZFܹh$ &.юM۟ L{${j; /0@AYfX3!\$#v${ 1)c,`+=Y e4^g<ldNQՎH*z~.W_i=)_x.:i<՗9/* !Rd2`6q hamzW#(q^H:?C{8h[c=}J(&[Ѩ: -rUvPc ?ֿ5^$OUM Mi$4inE?F^6fQs2l:-8=}dhC2WR|Fp *pu:~ujkTFIeR\]Fmtkj`uj0LйC ly.2uhSqcg畱v۶4[kVZjQBiAiڝKmD?k-REQQe {o wBk+ZPfOeCۧ7fӌj:~kx? 8 N?N_NW}Pz?+ϝ?ǘ [Bʣ8$JT =^3zn=VTtg0+ Ȃ4^ B8;wg\9+! :0p/ żjۢ fe͹?;ZHA.N_wP7O {ܥCONO(,=Őc3ry >MVnʘL~LLM0+3<w5Vq\2.=ٰŝ ܫq }BK.nA`ۧL2%qJ|XvJly%WWk4ύg)Otyo~8*e*`B.-]ĭHȍ[ ).Hxq4ƲK`F~&^P]NһmkIW ZjNN So3ZHE%a\`,m oVG).U5*#?I59ˌ隷E*Jiʕu*9sNj1Oم7NDjꃔgNs̉-2V29D5duq}쭭R\0Ivja7N z (t򷨚oOgٿ,v3/zFvP3sbG0&n>{""HdW~5Yk?+XQ8Kor5u(R W"A,KP.H,g4,!>9!z,D5QE>`fШUzSd~K -YbxmPތMk,8bܟPO0O :m-'IW2 mYaZj$@WٶDQ"3.\\FZ}HyJQL^^3 k̃bnE(4@XģB xR`MP\k-y<]3}wRB;SGBϥưgwO8d3h6.d<;|f+_GC~_&c-OJƭ F|vAw}@=P@$a/bȓV#Pj4ow ,.>o~A4 {,J*#hDT ُi(*ڝRAIg&ѭxrAR4=(z2 Sɳ϶TaRÍ*C&^#0Ubr@dwkd(.`|=|+w/x ~:ygW/>}~}N_< +2Ai#RTꚼ/H.1C+9 N~<'&d%&$'o+?Y,e"cVj$#z/J9M *JCXar){q\/ji/6fg'qbQeetR⩴$pBeUX( ~-\a)9e7 [0ѽQ|蒊FUFz=FtH ^a7|'厲5{yh0ލ}Mׇr, fs רZB~am%g&i̴* ė+-&FIW$I6kFX7!E"p9+B Wҧ2i lѴOjEM^3'4@W475a]m wp:UɖkFb nq?GWiLL jp4(KUKC9n}tLE'4TXU$@QJA+,U4ں,#oE14HML 0j8!6PxD_ePrW!/zor :||BuRi$R?eL. 9{VEmj2 3|/uo,x{rT-c =$0H{r$Y򓕪xl LÀ9IS$$3 mBv`Xר=]8ԁ]J:5dxx/ns[Ԭ/%&4%B5bd%(7I_"Lnnҧ9o標gZ핌 GPx|d5tQc 5I8=eq̀_UQs9ct'9ŏd\o8ADž4x 'A/!Z&BR#IN0L"`y"@ax{ΣJ}鯰,Ѹk0*d? XGݶpAdAaJIM;;+fLL[O !)#Җs8t˄vo[w×+-ZJnKf9;Qwm3z\83_x:8B[~E@YFӵ<<;X}km^7wӒhg2:vȐ֘0, Ȗ\yCd.=o'=S}o'l=\ `>S$6WϷ= )'jla=TF̴2X! y.ݤb7`Si Mw_ܢׯXG]%hSٰ:L՚\?uܚ`$(fT6dKY8 ^U=G(,\&Hb0fR8Bse|$v/K<4`8uz{.LbpaH=J$G$ ʀ%k2`=A!('"`ʞg(l#8kc|0oIR}+*Rv/x=ON,"]p,xrz@#S9; qm7+ Su(\ZaUi3 EM&7N錍KL\0 _^L(KZBy  hAfy⺳avL|;5̙!.1 V;1"`3Fˬ"|:茷c kZLR$+L ^]Хz&ftkMɛox}ْsw&m9?<3k3Y?T|i.RݷGg ȍ$q$2 UA? {r 2^kϏu/nh8mSjh̉㘿PA&JG]6x<¦g8/MtJ䧸N8T/M.R.^s):{!ٝLЍ)]OC",6g=_/ qB<]qn ,!l .@̡z%|X`WSMB(PFY#!ijYT".IJ\'^{`|], SJ$L`H&QD0pa,4!fOYP9WGٮvJ 2/Og6t1SChMPN*̃D8KnSrWKI>^Bd[DkohohccQĄQF\t'`s~=dC^38|㰋XM˰0O:o;RbYIFwe|ӳ >Ex @}#x3A"SE.IgKj+IP< -;iG! (ف1 ʈHo,@2̀AWH)~* QQP11E>#8@ c2wYb;lƽ<;>JyR)ȴwwQ*bRWNUR-WYJSLr[w 7gdEfsB{{e|2 ݒbR3ʥD N^m3ժYOjskЮ`3PؒsRrUARbgϭ9P,2۵TZlX tUAVʭ(xճ''= N^|WFu (jc%JIڂLwB2zIٹ 1m]\w[S=)<6콾i϶373d">t%['L?T"gw1ny)ZJ{|OQnѻď"S lSL=΀iKyT/JI*} ,h{A.hR\㯹y*I.(Μ.kQ ypu0lnzR9 ;ns[)f@r|~|j'5rY6|#0)* vgCIʿ0َNQrDDВrٺR>p֪  FAн#(-Uȓa̶Xu?c {~1Pa")O3o?oդ):< 6HzR8 X#+R-;P^ߎK O,[eJ =2MVH.&: wYE@7A]ei4=Ķq} 4* O -nl^(;h{eǪY\+5$4 dFfoyy.>y,09@ݡ9]7"2Ff%4ڷ#m HS*wT'*v&p\-W\P!o$9B Ck;-*pP3dRsBS_5SdU9n)Mj2f/m:n ]/逍HekJIt~EX~;92+6=;Mܟe8j|dEbFOحpgg[j.G n:`H 3Qo$!PFtIL 1C[ <YIlf^m~JC0c~'xS=[Xn?m/V=$Ou#X~}y_(Rfm⬢74(Yuݖ73KSm7bTv02&7{) =ա吢 N>tbBNql[c4\/٣ dT3ʼsV[Ϟo+?TK`Bx 6mZX+3'A-RN ޜVG?lU)B4lsF0uK{-Bg 瘥(cw$M `lh(gHJa_~1uNbqe {'m:'掠 gF*R9G*Id"rz;>) ,LaU6{@Kk(i 8Dۿ$D7!^]T$ #H.. /+7eWF;k3V X.pn=x5,xfhEU]^j~+ asԖs9LngW۲S-#"L'{OO11@M*'b& 1풰66yVRd!Mi8&)urb 09ԉ(w[T6/K| '=<(吁L(A5ZJp_SnR.SkN1S]u{M b <{u'mAmwPq_9M=JM/ǣ;R!Ṳ.4)p{pegO F\ ĶA6/0- H;AgaR؄"܈8nX)>2N;7,)M.՞cRB:_$q$^5X߹ǤхZ?l$sOHkkeP1YٕbWޣbU%x@Քm ITb{1mVm&/$/M5G>#EZDp5% 4_U4,ndPL0p_ \[ݓz46-ti呐8INdCĵmd s4E`}d'0q@"bP?M^ײJ@@_ dF+a֖9rwGpg;x?p; +٩W[4Ё9 !soiي ĕ$*ܡq9m /޺V1wVQv1GBMD;uHQ08&H݈)Z>$OCe)t@SJ*K(Я/G<]tp퍶?_{]Ux ^T1 \[5.XMavV -匏=g qDz^P"Ս-b.{?̲wy$a.h0>t* F]Z~bGы?#:K!ĥ$ #yN@F'y4[IJ/+V"M[zE5$9mrku<TFɽR \yA4[!Ү9KmٶV2m5mז:=rj3c ~[*mz*lyM R* so>֟=AVj{5@؅%$6&Ppl@J?vw2Bos,̰,Lӫp0\X_i{sg!22(UvE;^Yɶ}sWC4^ԙJwofKv7lw#e#$F2DޜVI`rOCoKkh ÀL 5R͎3-u-Z=rlL'fiGvSŲ+p;&[5ac#LtM'""'f+3<!fiDOw+.]!t A.0NI8w+y}Wqg@ S99q- 2W-1Vb0X?!" )y: ̘L!9zeC8hfF,?6PnZ}l "-7-e~u;)YoN9>/Njtm^ݱiUaYv J.B[+$\nET[ߵ' {;"N1H88͋y$yHJh4iOA =]d6!b~a' ry?+e;Y y1˷ϯK(KݸOno Zoeeť6kN9g?3u $S:C2`RUiZ͸4iq,F/a(Zq{[w{0p*?]o x&NE'a[rlOM9$* ~ .(4.J~ ZEkKa2A05||g0 :;hfx?dz3U3Nɘ4 ٪/URZu @ZG _֑8 O9~lPZ5dzGٺvJap>`4m@Aps*&, dnm3*Ɵ>;{gOW'?{չdfAgB.BaE|V0wo^y2:o|oS;Z4H"6j0c E˻1e*0"ϑ݆a/}fٮ܋i)[S ,/ &%f@1NJTpc٥hs n>dɉW$oPG9T8=0`ax.9`lKG|aO4O+.Sl8k2( ؂aɫęn$^%³S@~K%2LQkMc7՛{궴W(pkxjG3Ut\ ?|3G<%ݣ=+dJ6JUiDhxNAꑸ[Ϊ'(u6?Vu7`(F+@60t8YvZY? ΋>=Wk2K,4CG(N<[}N**y6NX!aͬEU c^Q .PqgskQmV1/9͘]$Xwү"Tp)Dq*Q(_|65 免@%#p8V[bFW+( STJMe^0Y4guX\x)mgP@ʧ, Ʉo^XMuPMx v*,0dz]lk{}/3h @;'4]՜Ď+v=]׀5[Ԯn5kLgPța! !:T<.@8i PϨk@HǽێmB^G΃jY3`n<w^ Ng37'[ (;J^#L{f4 fڛ%Ľ73 1w # j1</nj3n:lY?xͿ \r}kH3Z c_&`X#088#NEEulћVdp9av#8dQ15k)cr^l."[,8bYm/IX[u1d{<в9 ͧNC?C%b>“[%9LW̃L}L X7)]gcݪb%@haw<(!}B=2H!L),B+k, |P2hS{=rfxypT)AYi)/+e#rv/M,QG$ ) QPF Uk +itm_ˋ'@.)enVEB0ԋ zG; *^} 2C;}nAN|X>-/T<)6{C"RR^KY+T8҇̀@,z/"1ŃKtCJ0ด|nMs^,R8,*EE+_1U2*63&3:;A6wcoyxo-T(q41kYT7e;:K($ RDՎae-"GפO<'s|3)8z߆3i@?m…e3<%7a|m4坩>_<9V:V(SXSv&?;p,2f?v2yD3R$=f) Z|f#Kgccw?]}3dօLbg1|;Vh^[J? Hk)x.e_؃~(ci7V__) jFJζRf3F^eze| ϶P;yڊ剪/HfMPX[a IF^3&q" J]>hL@Qzk%&vڏ":@TCk2"lq~r JmS.\Nb7kJH RncFz[;ΛSnU?R̵NmDçJ'GgXI;)KDfaXXC6XO"LF @uDuՉ׏- 3%u!)F{x&E>R=iE|t` «#5<a,;lw;ȵkeQ&MXw9 A//`ɞx5pw0AsCSǔ݅)-ݬ!-h0E?=tV-]B 5K?t(w:Tv1 r]`|FT0^h<7'wu LE`h0N9(!X~Ź w@@Q 8Wm7e ycј0uã<%:B>tb^HLrr1Ǿ^>}\z4cS2`{i1{-`RY^GO{i]U0Sieo mdﱙ%ןZʺXq|x \pp {oG<ﳩ_<~9~M ﵁<َ pm8s?IDvL{ӛq8!̋ðVNCm-1rvXʑՅJk),Eڄ J QIQ YVnB5r23ǕYsʘ)R ߜ|^փE2ZfR#U4W!9 jF:rJFN7}M-P6ض{#LwlZWz#g[2Qp.z p DBOzYWZ;GL uֶg'ڶ^ K; RZLU7X*f5N Y^/v'^Th[fZ]jx,Yʦkx^=ߪF'Ьni3Ũ5r}A1;TT%=BZXM8Mhba(cbH ɂ&|ofWaiUɭPAጉB`wږr>eG'& ~1h0%}8 Ox^= =@h_S"B4Xp^= )&KKdh{LB K'lBw|01wbAUZ;;DFh7[ ۝x<×kfhrd3b3׭ֳMs0($P ra۴Cwߝ|fD(O:Q8Q+ B,}x*]%wx (E}ϟ X_`1 o_bxT|'C\Q^؊[x ^;GCa3j0az1"~A~Lp.Ua%48~wGG{ok:"I+2.DJ:'JYDo3ӽZQ=1˽Da;pwiTD^ vkhb=~Tˁ!-Jx=x=KR`y-*1܌a" ﷣#{0rstH gdic#h @ܞDHG5sЊ/!~Xgt }鱍7_i.jWs>l In4b=VOZ@Q>,%o͹{![5 .x\|\Ei?w!luc K^7h{u~ꭌ^"y5 0c w(Ռ;7ބУONe̊-jw#f`Kk"f@&4 wtCWҷɔF,ow!$q!.`A2a:@#o;'Q 8'˝G;ݝÿ1HQLF>c:y]9E$ !n@U0d( #fXZ jQVpP"1gH@ ˄+4C]'PoI 'lFWbMD;eO' yRf4{sYV{i&"3!\@Yǘ#(pEk X2AB*0yT`=g^~bc?`""';܆>_[D*;# ٤B 2ZC70ʚHa6U~O¼;0܏.#d:*3񄻀I !q D'fyDQX0CyyfQ3x&t0A] [I0I0GS3{g3aQ)(5oBN/N\=#~Fhi,\N7066|jZ,Kaư~q΍8 ;e ߀RǸ@X&vDN< v%K㨏WF3!jB.NQ[,}9ɥEa ^~AJ1d)e KwǞǰZn""xN/Kϳ9Po%z2}}Cf=Zv8_],t^j]ܧ0ѝDu eKDGUzU<%^߾~9;&A*9dJgr Q[d1z\py!bwZ}n=/ +GUf0 (i7Vp,go2-hQ@n8cfogPL=PwB:= Fs΁xs&Y{z` avC0;q\gصtac6S=w1yxf09dg"W IL5z5qY댹Y'1c+1avŷd.gy ;_Мșׇś{vtYy 9SYcb)y U~4 X @փ3T} A vṋ`ks+(1Zu ȟ4.aVR?.WX靭]E{*g]:r/+ \,59O6Ȭ Vʋ *!<ÿz|O@Ma|^ӏ2`BSl x,KXj;+:]f!U*z9.sBol45 Xq1*2d(#6 P|g^B܎$'GVzQ##S'@5ۑKCKsCGW9x9FN?=<[ h=ذbزNyT߅k~ wcT ~. r 8ڎmJʹNP_2xUd`*Tl^γz t].sk x7jfqʏ{~"8xnwEҌ36~l&m4(a|&,iY\(-oL&Ҳ0jx ~tq3p4v.ڼTnTn5Jt,FJ^E;}0Wc_|\\gb"_>lG0JL1-'~1?~޳؉OQP1f-*͖ޭ`c n)}[Lg.ic+XG>BCHjL!;/_[¶+vɋݎz7J;N4;9ct Gz!DPϼ$y +*^БQ t qHu0Q,jk`HKo_xa5R4&ck05+8("!@>k/~Ԝ.܂9Ve'3A.`0)%")A /1#'6P:Ջh3ޫdW'BкԿeS#R ysJT*VYr"ꄡ01Z{: fL=A}cDI&]h$N%{Jia:S7F嵱@ByxlvCo6 9I7fz37s{Bm1&bQ\|#r7~2h%'XE"~x{]'ʭ ݦ 78g$,]DS#/Gs_ǍPf>$ $Ceqɸ6|4!%b X=(xC :"65fE E"lB&7PqJづN~$p4ن )GA&LٸR=P3ga׃'j+dVVދSNE9VpD  i|2 假8w" #NAHwC.X"8,_r1 cJ]n`d6,P47f"10ѿ(7D"sryʱUCE W<_xY수f`%cSR:q`3rW;(&Ou]}EMN>M@?69me9 =< {!pz?ԇ&WD 猠cL^r K? (Ӕ$ƌ08{,.-]S&"FI^taVDx^cD2%NՉtCGs9f"6dϘX@4' ^h`@Ĝ& 7RarCDa؞H&ڊafn^Wi:D6C`e[,,阴%C4}0̀n.2tMLc^o$-꩙f 5 F`h!Vn !k WGa-·Д:vr~®CɃfMUOx*4*K46M7;~'ֶ]L8Boh{=i7nIY׊h ")[Dfhjeun53 /B'hv9["9q̬z"B V!|@ix8Q !͸D[  &5F9ٮ1Hzv;bQ96=3=цNI)anur됦;JV8FF@I(A/XLU N)1j"ɈdN] =5'U|yF)i%WtTG!޿*5*01*6pwS1ȗm8+b(^м@UIA$`i(qOaR7l kf[VGpKyA^Q4yɌI.O͜x{]9}8"nܧjaHIJ TثYWe>-,U=z UsT0UZ-p-Z^gnUtؖmgjKUjm rfU<Ös/j0n<^(G2ȯc}At443 Ѕjȑ3Xr:D*xa[x2̡-|35Hg^1SgwtYBWmY̏a{ZLk~˪ Q,QC96F;tlsL;\y^NPڹ|ǚ-F%lбdbX2Gx s"Q9.fHser sDv8z%α2*p; 0NJ7eqC|2`t, jgYLm$92=ٺ>済q@6E܏o\f"6<>J%lq.'e:=N{%|J,_~+ ",78X/kWʬl_s-[ذ3=3ZSɢcf Ի Rm$i/w B@ˁ3ay|:#T֥ FhbWLP<IKiorsL&9J({7FvnBkqzX(%|[As?k<7{vec15a R@3} Hzs3Ca77El^>HŠb\2!b0mX6dBmJ@oSύ@Ŀu{?7?( )R=*HL+-y3xʯIΑ iDZ\5a̘;Qz l0'@ՌL/.gȊ&oYrVxRa*Yo~Gm}u1v_-Qy-asLNDC7 K@0TNF  2 ٺիj+: )LqGEb#Ji\#y灲bd%>o[gBjBڸlBxQ_{t7>}GCPh5 yL:˵֮N{)9}K|w~0W.>{um%6bҫ6pc'/a"c1[JK;dswke*j#]bIC"D  ۠?~8}~ǶaAoz2w awV$ev!s9$o6L7GosI_,(YV3?E5UfigjPK(3tՖfӯ|zXX3ðUU(3qrUi,yjZ,f<8|8wYڴqZn]d(iTd(iܺ*)L$30/ ajY.{ ?7I1q96GA$iqfeuBtIQ>[J*F^UKϱetslS@p=$h,g(ҖR)$6.9Rbɡ%%"DždCM]?SivPH]}LjuO]]L=(q8dOxU!a9ypg+;p7wo Od|FP_q$6đи&aܨ=֏;*s9lMԄυkk")&n=;%HS0[ɨ}NA^mS3cE0(ά,mSm*Fp3{QFuv7NanĢIWV'lezjsȨۢBAeX\yJX|S7*QG'zΩw?@x=V*534kRfdIuggWQNCq:d3̜y1sNMoE(s1U8'o=o3oHķy1ڐw{FNykT̨=@r=6bۢW /G(jcuw:fiDA ۻ ۲~9%}yi{mQ^MŌP}D*?EA@Uuҋz *PEH⎱ƾGqZzW&=>&w"CdJj\TTej^V6-j&QE,?OdEtl:E} ~Ta}* k w1 T .9ܘs;7F=qrJJoCi}l=0<3Z'k,̅mc ٺtͷLR&'VEJNJI*LBL%΢IU%ȌTͤ@;ۘ 3ۀ G:fK{W;) pP|a~I [43Șa0X⺴:`bA56, }& OeSDkӷb'/=0;K3ak #Uκ޿m$۟Yʳ"EWɉUl'[Z3d%I@ʚg@ )>;{׽j3f*\ͽL1Uȁ./'CɁyT'-n,Q׷sL.ocȻq8d;*Cuk0h9n7HAChFWSP0<`^.A,^DwqVE$"qyjڙ]A7Y4_h5'UZF3#l83G/(rZQ+J> SD`rqtڎqЉ)GJ 7%ȿVU z *2)ٷiP_HWYl6B4 IY+9peh̅GHQJ|Z"Jԡ>+?3A+S|vXwJ^3RX8m^_9*c_X!#9JGM v#ݖ5CPޯ2wɂN>oG;-\8k+zղ,U>0Y㍇E,gTSh1KB";8xR(Fo߼>?^|?pW 9 | tF7kY̵A1@5'%Ε-f}~|ݘ {8@8ހυe<:I YIa@1 2G }zď,IĪgԹQ{rHa'F>:h=&(6`^ȭf.R7h z'o(Xnx` 4L>S@ Y,0_`b<09$&{>R(1F"{qb8(OOVm87'ߝ\8ZSO#djcecm t6|2\!b~!$JOrmkk*[*!R!:BeO!(xo.%iXTR^>$m} ^_1 D  ?ysz~vwZ\(8)86$J0E^+dyB mENͲ;Y<׊!27 i< /Y@\oR yV)[-ȷo;[=;!?CEX@5QR[X~fJ XCAeJWm7.3Bu%şB'|Zgߴ+.7VC{OcLUoy~}1 )4t8/)mu0'-1պjtq>PΙ'(A;ʉNiʔUy ŕuA(-x! y e;QBHǎߖ +~1So($]T!^s.J)dnwm[ lo:Cl/Z5pZݶ=sMnC4Qiu~-pփף r& < >/p/ a$3{iʌIǸvq)Xc?J*X}m,v8C9^ے|Kp^S:*~l,Fum_־ز.By]P#^(yɆ=C1r`㉙Fڇ {' -;MkXy[`(HȘ52$@,?Og9x4+M^|ًu6^lv6qѺj#~iA Bf{8N-1X[+18  !薭KrOo~<듳7/.ֳ `}o fM&Bf(|qH= AC&0`ɼ-zqoQAc:-7ƣ6P%O_^T{:O{Z/<(g@  TC8 J(y9;6Ԫ@ef7)&jiB/]OG,7d.vÑTQq>#io gj6r;z)'XL2|B,XPm~ 5@],fgA M( Ez.Z)Rl0GŚbY(/pſ:}~~) ~)K8 Gy?!^V Ǫt 5GCD$A_SÖ* kBV {j?.[K QXpvf).=:̪ňr+?k1\ZO wʓLG ;ra$c.:>M/iy#XfJMZGeu,YVέ63\yb>!H× m{35?^uisewZl^W.^ `mNSwR2g(Y$o۰A>ZL9 & F=[3H 3bkLpDw] uۈU5d]K>>?2Q0u!*ž? ^_*؆nQ HT+6gߋ3!glmqwW/O~ёs EY3xRRB ZךBC+XXbfn2!'ؙXJ`!>B/F5bܠ+R19o2Ȧƹy~[o "hzu{)?y`MvG;1w0%B)?ѵrѣrm 6FYo. {_$Y5{:8:^1/o~0Qk)ʅ' 0sZjrUL'%@FJЌlBϭg0\e5˂Q*dG [)_}Y(ғJ*w4/}ݬ]/+f|0X؊Ökxjk2w>E$K{OlDh:!eeZG,:;ߑxp]A#*Ot!@_Z%cz3+nH^i, e SS/5|_D2uL6+ݔ;?{L][ [N;WwRB?D j;sx< # )SAT"x50n% kՔb{|g l FA%h޵"ݜ a` )%D(|dHœ:4ҡ1 aپ~yJ {Y 蕜Ehhπcɚ/FòNq[7YYc-2v#4+075 >*GW8w2r32 5ԡɆ6.04s#ag|R  A.>^oGS!dX}v'rSs1qN3ˣ(56Z)~3*#[U~[Q!Z0A 8!V}E@BWa#x2&T<1uyrFkIVN`q7^ w r *Υ3y/g=OFgL07lx^F e - WB=O mP淈i{ǣ10G  &)dB^\N6DOםcrp2{XԂPܹGlQpN8fPt݋n%yK"Nd-eVfR*c& 57C!h^F:9;^=X AΒiSMX_$}WJ\{YMk{6s[7xQeomY KZ&/!Y5<17;Y[+R\*!h#31sJ*Ife[mQ^3W%vdcc~PMZRԊ6o}~򋒫RtY!UZh^WC=U9=Jxk}j*3׻+5oF꥝F,3ƫ%(}t<MX 钚yhJȚ LUmޘ[rҧY휁cԸ1 %d;[}5._eF m< "rp:~S4͒M?a8xߤz+W@YF1`[+'„@hEcL/H2~=vDϙ/alS5SMSZR Dk]٩.FX׀q 1R>]80g/*aD% ;E*/5%unB9r%DVS|c,iFm/ @NNC/nx.+a_^{MZ)#Œ"$t-rٶ9 @d6O{2PVnV߰lɸ1kI׷1BGK[]8lv?-v!;C2֙ ժ;E̼`x `0$}Qfh*-fen~Y0~D _8 ]J.?LFI!B['\f~tL{|ͽFF-ֈ|Ql EN2I^&5ۂS5 " D3—F NʠDC@ZB({-u jN|'AqzpR ԑdi씟bsAcS47!Dlf>~)9Y亟504]T]N PG%uUȯAeDdrȅA ֌&9|}0S&a(x"dT㼔L3DvfW~C[z>QB)`5 )4M„҉?J!_nx E͐(] 6b,JC$V`8#{4c0 )r~4F`w}xxlXU~)w%Ʉ4|@zo}\l߼7o.eK,B5R'}$,{B2iǦ0KH՞<&`(5eq0…D?fҵD#i% bs5y#}+puE/ i1>?%:6C~ (Uc3!8N֣kI 3k#@%/KO"{ s!sm],kg=/IHXg of~$9pȃгI4Lj$g-s.-e:K-0R)4 Uu&z"!md(KH,ɍhMx!V^ 6oWSFʆ.CS|,jyPÀ'l 95'qoEﵙNBYarS2ܠ= 蓯z Lzɢ)oYzai& ^S vf1v1ǣBnz̓tz`< 41KXboT",PJJ"bTAo uqEo ɑ2K.6=]9+"V3AYqj%)2G9im'JD 4/PjiLe GqtAI['7m,a=|HhF ݲ*KjOg6o eVxjXg ~hg8؂j+%g.s3obOTtQJOWW;ݏj9xa-kWTD?2_0E]aV<~E[&bT_/Uo 5i䅥npnb!6>!3"פj8h{Mpd=5y b,53antx:} ?Qh."|fPRڋa{nuE[ z6x+gԯ`A.Y]u%n6HA(o X}9LVF~Y>`Jiaԯ_\(nmadnMAּ\h&l !!f C0t,ThpE{BC#(S5E9M˖iwk-"a&L{m =$T|Y)ʒ&;,@ϲe-E5- 8b̴*B,b>ϯeפ:_zҪZ]#6VDȞD]z n S}ĎkpZBZ5+V Xa*n׈`2FGs`MfkZtjfপk^iBfh]΁dNZ+鎪`l*F`Ehcz^cph-jlz $æ[}y?N6_eGXv6GfҐu ⎾J|=@fX-_9c ޏa{6+<#<9吜~^]ÐLHLqD@%Ӱ)P5P젮)Jl+1lbq"FѰ̨9[<(6 XUjaGE!t/{?s[7)gktl[X+6{ #^U/F):}#>o==PٟFR~=8ytaz\Ħ${-8!8owFĒUj$Ĩ\b.tT䞁vx= gs@Lrc^3uJCKvr:l\5;Sm:_$>:l;5QAi~x?J>V.mt^^Hi0sT@1뒶w̥ܥRʓ/j&Ҽ0(ٻ%;+CHf"9%B!8mylLQ7xUiH"g? qx <_/'Va*;_PjY]g6g:Y&L-XHm<>j#ъ7EQ|}!= t f LDKxho{=S>ڛ3"JGO~g;G&$A̓IIBۨ[<@Ixqs ];yDFE:4ۯT mV0 'B81 VÕBQ; 6u[TYݥ܃4`϶.l,1z*$=s?]fpO?;7Taڴ~ai.jIh4xE}8RBK $?J$6xRk#DJwd&">~0J^U>/L=B+1d%,6%8DX#\yPt1KSGlHQIT Ȃ ˕0tl:U!Z[)q7hRՒˌ0RdJrŌ_'A25'@Ie+ygqE,~F6ZV$+5) A.gzi_xًw죴˧ͧF2pj iHV>zc ;W{O6DF,>NO*䡨gFo~)j;B&J+B8]+jXIz<2xзꟇV C꜄ɍro&5@Yn(ދ@dd,@U,볙@խT'D. Xą"!lJR4E+RcCHߑk"ːZD¡Dm~קݾ] :swzE:nzwQ_%x鍽wԜGn9 L2|z^ 4/\spg)-!. :Oh(Dv<3B"QE86u8Y9W uy2@X?q! a@!FQx6ȷzwHРdwMYE<;X9.Wk> ȟ8ǣj34Z_WKj׍֙}a?=YϭAWgv -{˓ir; 7tv6WR._}^?Ε!ô`q"fBHhd8|4X}KFZR׌+~+rIF0 ,DQlH]&,nvj1a';"pC_b=_߿d*d<]-7PԈSdYX9e>yas5cygUY9nVԬ_M*,KLE*#ps {K3M4E!doaY/^k: 6Oq.Y{{_[ty1-Z$/ǗK;5aыRεe}p9즔{kc-NэaV/T]290S[*ZK -of #8D @[Sio]|HCNdOJK*׾ǀDƁx65.}ɁX;M Ro> #{ mJղd俯Z-Ѥae2// ʝeFrftz]wQ6/%R ʟŗz.R颱Fx!ODt&fIQ봽}GTUlwEul̔Nu#wt o_ԺXs9ˀCgUo:0-.j! kD ʼ6n9Իfk#`ioTؤtvE4ඔ^Vo~i;h"+L FW3SYt8g Ut`f@0kix]3Q;`m DU\~2|ة[K,a|*R8X[sHA!AgqU]ܔJ,۽ne-eYeVVNjaPJsGint9=?MO_fFkp1MAձ5pdz5:;5ԤUhM CԎ3-ԏz3Ԏ)Vf0cJ?Q Lff zyj^)mFIkDsdFh$т%]}Dcg i& s*|gK&M{;KEOX޳{GJV.9 )-jJe98W Ƞ@זx k|-z߭vu^mOqݱ3*%Jp~FnU{Z)SOQh}qjVaN՟PP%X# @ve dJ Cg+Z,THx- M,l]7È-<$%[R&tH_3D>k ҁ$2քVdh+09p!ʙ{B2x12:} 4!Bu\jJtOiZ}s9*X27AyUjj)O?V2:6@X؁/w^K%A2Z_\BHqA\#kB>!LPE%O - YC = ke@eW_#3 fǰ(rn{&ղ?oVlBz gϕ]Hk}SFM;Ku{w'ecb$2/n '֌;qw  $ ,J&הzZMOJ2ӫ܀d,]iH0%Q\{ վj`,h ;"* $6 vS|GV!; )S)[nNؾbX_))(ȪP,h&ck߁H?h!\g/&Ne&M?e0NE>~lTl#)wg7S [Ga1[@}x &nqi(/0_$@z ղ}16ljTGJMj!s[&H#bQ7Ğ[fO/ sw"`w^=Ӛ?ȇ8- !a%S)3 =(sg~L.ɡ\Llb& nQ&5 Pxp dkjb 56.Jx6uPE1B1\g1.*nnz']WP) NAP{ݦzRyO]շ+=y<[^ (tǶB6 kcn:.;'7eg{ Oz֋i0 qƧS%B3r`cO pgviC=i6\@8Qg9!vןcASA[$߰N p8A|yx=!CY,ܢhatuH ng/._و2&!l@.Nq\E>EFa2 #-m$Ab=o{w'Oo~<듳7/.ֳoN_['gzzԉ$K1g.jO条dƏA II0 .7ƪx;#옫©qSշ5]]Jg;Us>1I;ชa#Or`82ikS7 !'l [pu!D.%tuϻ)2l)Cqc ׇ$.CPp |_a#:윪OrdXnĕxNC!y4c Hד`@°qz?աH?Gb,}N5`s{~ j,p.Mŭ3hM(4Q~UWXxO+;i>nlH5'*S;DCr@ٍ2NpbAg/*D6J?<ћh"]Q_n 3^#-TP1LȠ#[~2H$ƈ) ;} :1DzBƼHMܭEF+e ] Iȏ;w|D=]HmJT>*r<n0uXDBXlxzvSwR _QrX|B,_]74oXQD}+(0yw\q{kF xz.74NBO4" -[K*o^p>(6d?0aP6>h1c-æ֥;'٫K WSODzII*)&\](ı9rIK>'I'F` I KJeebEF@IZDWU'Y/8ۋwx{XAw|C|3[>㐰*ٕAW&"Ti1;YlʧQ>V~ΫP2R-=_PUeէK/G&F"%.9%V}zA%FA@ 5i.G REWLd+\SA;"HKF !߳n~^U+4\!#C?<9ut.Ҟ\x@0@>eˆg_͚̍vO̲j!)S>(O}z3' F]kE)zd񛦄QJ0q50=)uSϗ$mjVTxهH-|6 +h4p*o `Q/e4%S@L\< : [#,"#o<9n0ҶnGe2kPziՅ+{?w~^}TsI٢kͪ(ݬ\tVbX-}f5k P=OK@:}Qi6ΨVy&ha5h"#b>QӛNR u đShs6QL%gfa^|e:Ėٶң!YN%Ec5mO/NŞl]rmeQOC)f7Vmslcr^NU(sa 1Qj yr:\s%“l /ƶZ6A4*d3(k%fYw7fe~Cl)){J)}v˰f頋,tmt…k(Xj[v`/ֳjrQDNt[Н0O EөS} (L,a-!cᩕ,fƟASĽ0J/,õJdt'^vgr>8E!97ٳb;9to7nˡc$y.-'0nf7 e&Дpg@fUe&XY>BXK'Cp`sz͓Ckҹ1b[`60%]O]hF4l鐢-_\THE~gW0uᙡ>l@ i,#&3qO>`Wo狓3a燂O T.Sȃ@ggL0xL)ŵ85hqiU&b.?!1tg MKYmLtneu t@j{Ū|l!n?7Ǐ_c6lUkf>)mjaXp\& k.iP,=4;4X.ہw1r}ֶ⃲OiƍrgVpy0|!~crXkjbkʊP}S?v&4W?S  S~·RAGwea'!Jӯ\1ȧ{&:;TS8?)j*8g/2Zmʻ MȘSENj 9;^ [ꓗAr$78]a eKz׌ <ɸ єt //Mg\0{ @DeίdEM  >CO W~"!Jav3Џ5n6l_}>$^}c khcN?JRr_*NJG1;b{sk-O\-vADJYA |S>e~A!sMb>1^ (nY/`w wdrV*咗°`l+}z^mϦ(g!CmjdVj2:W~5f+b~5Xf߶[Ov]I?Ae Q&!Vsp{vx]fLF>@S/D 8[yLr<|2cqirGغMn C]];vFQ}KJq`]- ψ9 v!=\pClL;OWޠ7=gjLF |eƽhdF4 "_\ сoVepSg^OKLR6:/Anqvj0{Mc訶ڈBݐͭ(7 (hu5XL|$h=Z[n’ :k6܏")NM"D"<(=0t4{jg̚cF$*-BLgrza/;@.{J+wk!t!3k'aqLZ^ˢԪ>"m0\ (8{B2$/<_FLM7mF`Y74SEitĚ.WlP(&Ȫ;d˝Fsy?uT/la7?ܧ_mCNmz]쿛^eXYg, Ⱥ"_\hyM߾=}.LD;!m} J gZE`K{#KUdFR7O !#q$Ǜ`:Wlة~ \CwŽIj SI/fㆁ5a waKMabTrQsHq W4CZ1vl2 7R%PhI HY"Bn:{JZVWQ-ыt S+$EY&m(4޵;%px 7/}!!(Zjx$C7x# NQ(B)d`Bƅ,<"#L"ʦ x-x*0n\Wb(ڂ}E3CHD ;k7vhtKuԙp7E6o8mꃵB^R2+>xQ0H4ZMTDRK^c`C4fY9倭K+mbzTҠgf5.bf"\ӔtOʨDS{+-ʘ/%/SS2>t˞׿oǽu-8Y;r z-Fo߂z5uӖKT2)oa#Na4 ozڤ=\<@ Ӑ>|蓶44ĵe<\T3[Oٌ{CIxx[$xw?J!wT5\E mwO49b͊ݒJ6:%vI5$m搄Tx?K9 KN'o_9̜2"4(Hf@ȍ {U?AkNy5CkZ^C\"/oW<պaUBqR[T|%78rNzg4Ȗ#͍?)- ҿz>\&c ed-F;Yg?UTH@Ȉ^ {츼"nCQJ%]^3x/F2?BHY,!7DWOW+T 7Zu^K_^PR VDց~&;ƹ aYZ/H!w,X~^ӝYǼeMN0w#}8 :Z?e_j1|+޿e,n h"^T!m`Jl z?|] wT|.3<0 \xqЩx{N,RT*,L{,w[phܤM`AZȞ A x5_-=0\6.65" | pP^EW[]5Ф>s+zz\ j}!/ښUR$o#N+1w/^j`}5OTZIאw{10TҤY}7g/,4B ii`FN! TNV fSl65{0CYBqQ4v@[CDRY0*) S\1C^,AT/jaLp{t™Ͱ  n͝3o0A5beG s'jX*d;xny&K'#Iawc 07(aJ1bh-06 bN7 5h&ELar"V[6%"6-yIX)}#a=rb1 &CC8Ldy0pIRțJZ|~ a}ybdžJB+ N g;G'4͚"?Fa8 Ve1b@3Hq}C0݄{ݶUHc9bIrr`Q<78#FMϕZٲDH\'5䒹ڪdog?5o4)CQ,ڗM;#4oSmxxnDV_|Jn׵{62[?9l@ 4*NW(U{VJQ$HF.RM̷-ZC{N- :=jYoqvWfs;ѥOn߂vE!M?;"%1Fnp~CIFhjyelfÔNBp)-Co_ *2Т(늽Ta.!?d }2 S@ @ G6axݧT;ǃyȞ&<v I8~{P}YQrܪ) 6;E0nƚ&#βڸ(ȏ?q{+B_ܫ od*9k#G muydq/|*U=ǩV{>H9 1 nw@̂<9E~q+k^Q`UTZ 8A"cNL _ 8}usast#WWAVGS"hQy~ {ڞDO,˟(a&De)poox[Oͽ&J+%1(A"d$_KJX8C/0Y $#T*JD` ,{K':dGPY%B +o (eP˝`N!j\߰<>`/ZJpF ciee0b\J=c f 4W@gfǐX}EG:?:.gԔ{Zhv@O$,\4}1>dbl ./_yy*7| ܋(XJd݀&ʉe[SЛ1>@\?J F} Ɣm#qVƣn0_ӋeO8ev7*^5(CR\VRk(4Ht'aPZ tuZ@b fY~ /> nA&s894lja<k\2yuXvankmwAozҲgTuL]/c;;jQZ٩rJz02дN:RQGTr)S=΋P=غk>`FAKMiALa0L$r¥>'#}Oz(a23H͎O$' XxRUrZkTSqZZ0Z\!~+Ͽ˽?$HP;@8{w8|̗s=fEw\\8:#םX~;X6r^me.V Xo=ê./X:l^7'fbG9u3;!J8l7 =fi݅_j{w,;YgyAF!XIǷK75_n֏~LJ| ?ZXD ނލwx_Auw q}|pR}4Ш; "vt~~UwyMBcߵ#Zd{KQ-I^V}!Z*1TU׿ˇcφXҞQxFb6NG&ngfx x[,KA^ |Eƣ`vПjwika˯._!"cxAJbX{K~>>nR3 8_Wwi9hH/\ }T˦1Ӹy>0Ƿ'xѪ,.qtk'S|T_4t# _j]Id>u\d1t y|%x(J8ӭ.Hz_==}}y QD[mmƞTQݷli@:=;klWvRQxGobi@\ˣʠ-?=Vc 9.(w+,q2EsEA :618 'aT-6 xYnf;{֒o:g{t,ퟋ諙VjAQvCUF>\]j`#L1s(9(=qEE;[ xѸJ1,$krȻT N%ۓТ#> KG&L:ϖL2pkC9X~k/F1O:;"4v"Po)t$B˼%qF.,%+1 @"7Z`O:y}vf^(חOo4h|uQ oi3]]bdǓĕܢ+|>JAmRB: p:CTQ/TVTLC6nPà $[@>#4kᩂR,.\*bl=~x VΌ6.L57f/>XnPy쓼ݍfʋb|0ӹG#nޢnSYY%U XBjQfeLM<Ȧe;\2ffI "Ht7U0TpBpfDJ%^zpDB8j0< E R(+wiU 4UPDÔ*IV $f<$\\R9@ n^ԌM B[3qв0(lf5V{R"gBޏh]l"?r߳ԯ̎Z8\M*ۥ6tՊf)j+ɇNa~%lsA)lB!"0YFde+P&-I<$U,]?ۢU`lPgsV,>t/f[nVg7{J񟟁XN?0P!bapvNT wщjU2Dd\H0)| ޫ!NzVǎ*“Y>{<)3kvL{UQt1'@N|Pv̯!<y۽Xi!ۘC!0~$Q =>^s@ARe$DD3~&=x?X$ K`u ngmVU;)w_X/[tV}̷X/ilT P9hѢ :Y&4H|~p^LDa3ahSߙHUєP]EGv0 j$"cфbc/sl|p"_.MӋ3]<{+HYxQ4kw5&r&q:Toeۻ cH4zVفJUxQުIr= gA|諸k@ݝ0uEY:HZsԇaF Ģjۏm O._ 7Cngilf.d#gv||isϻ%ҟg}0Z'D_* WYhwwVS5[e3"ʚ(W M;Oi5;/ :E衵.OEъS%ΙcZq:aV8#WT!38ꪧQ.{ٕ:k97YZfN^p]桶2LcyF0k1J=ȍeߴ"pCge:@zK2|F/X$GYou(U5p%6PzNk! @Ni)99իwZӋ\;2Ofsrl- hjTf =e C UBnaxUcKl_C)\j_R, eKV:# &Sx{TTR8) mLYZfYrh\eΪzIw7eh@n"g Do9ݭ?(FiU)YiQ:L!286sFcN灚EӘEoT͢ˆuO30ղ ȯoE Lf`ja"*>^Q᳏8U~eE:#\ #Ba[S]64ʹAŋey8<-i Rjn\N]NTV~eOH~-*.? yV1^,{\~U(_Y/rAxcQ9D_w?n4}?=~b݆ k@` r5̓D 80 n0IL7+%N?A\x=!,hatu8g/._EWd'F"$687ֹ|J| 9p@REUVK $J^;N/N.O/Nx 'goN_\Z篭ggOߜNnSԜ ``GGre8e]- qpTD ݤ(? rDܤWEk>SYXp8C e/eyR WQn+c]0HhakBf#e6d/Hr`` Ƚhf+!6쏃7 a*!q1CY+`c,+Q %\dT3āC2 %sG ҽ›],̹U@5Ujt%ʊ2v.W{ ^_#*`$| :!xb%TT3dp >Qjx6ǗRFV,k|_PMՊR}{\ct,S>у8})Z%[.=zyī+|G8ax=bQaN K}3lu)t>~6k| {1/QffEQL+K]Yf BQ,Wa,RqPû߂q$`ҷAHw_kv1wETc}qyŸ4+ N_IOY62Ut 5 хg/vW-f$ˍ۳α)Um ȨmV%.,*<lRF t~;d:2IMLe#*bݑ騠6Q#\тY+"cxUC(4=ڸjg]>y͋חrfo:^L-?Fl?SX˪,3Dc?f0ۘE M'Jsd¸#-beKqc'ˇ|.+DͳkߋZx%3'Pʹe04оȟgnk_R|?e8=kU9kg:Ҏ;ǬW9Xrb pū 2DC _%<=T2y/S1Z7՟W+RV=D'L\H껵uX:mW]XY5@CHqsHHL8Gʆ$1,>(TL\!I p&왘ޖܡ7z1O7eQ%ώʨ_PYH+\i8 8"*sfְ0L_L&h騡81P)'Xw 54 $n?0',ofe`lyTfnfOfa;FPL̷ XXPf+ XxZlGJ><[(6  ul cT!5hp\0a".~9psIkNL h҈5H) >;s7y h &!Td1KRq]룃6_ęׅ=ʊlm}n//Qa $E-ݕiJE ٦]sw&??]R 謮0\H;Dy%K,U\,hiN(j[c炘(GFp6]#[;.؜I;7g`DȺ!}1~%Z7_cB i} %u)lLrFBز5p +Ξf5WٌaAYsރl^KqD+PH~(3!0 ]3|8K!hc'}쑏Wէ|Ā%J->#mM<W: 8x2'F}P6H2V~& 6NW^)en24v(rn9->,5g a~,%FJ:g8iӫЈ!NEJQL1V#^{+o)͔RI@IrJgrBm6LsD]M=׆/AbZ?A,=E)09{tKDB蘒z5qtVV}4_.Y3ך]"$ #c9bˠ7ۨ ۴>`|ɵĢ%J<He Zf֐h34-3ljE"dy2,Tuwq82!/#d)Hn^_!З<caj X2 㛭@kE8/]#̙Ga^x K2o,f,<?@'ga @ҏydVaz_ޏlvG Zt3r:44ӱ!W dd8 5]lɌt{ |$g ʓ[TTxD=R1IܔpU{y~'g^~g zPǍ2ǩ 54x+1t>1JM:gn?ݐK; 6N/2HJeWH奯A75Lak\Y;Jrn.MSsƥ4̸iI]^'tF\7˛NF4 d0rE(Lψ,Yd(2_ fe-B+am1ZKb~qg|c nU1[M 3i,dSYPS6Ѻ꺪@r5ekx6-jIK+ѲauƼQe+̵z{*CTWTYUsZQV7SlQclҸ@ҫ66˥5*x^2qƅugdKBR޸LQcMQ32 ͇ތ"YRB,{/Z4V8ojLa$e?>?;_S!.n gՅlfe; |+0`;|<_z^;xҁgm@,_[h2k͟Zl*ˠ(gʰAE%$EB:?*_+ ,.c h<2 sv0>x_`f]q\O١ƋdX8>4.\zcrU,'RV[.Rګl[fS[=0I\1۩\յ8%mqHz uG&}m>g"pq+KJB+wF #$ZYKykUu&0gtr8jWm]@w#*Wd[Z .v|kvx(~|a Y~cUgFujMhu2z HvyЄMJlrwB3,`Iaozj:Birc \=}m_u$MݢG⤭RdkvY]=<\E㾿^uΐ#7Cyg2`C1jIs?ݽU f!;*@6a7;*.6?}ځJulvUI |g-~%c}lf|_Yc}_zSlQlWԎW&R.]IQ0nzK=R(/q}Awt =4vX1zt!1DG5 aBV-'& 1K.&pF:?խgYhx~bgrF|/˹)kM5R~dgm :+Lq&%Qfv6_K v`$y֋;e-JdklG 'x㭌վ%d|Ō)}uxk7 I.k+H OJ7nlM^1m&qq8A/4jb+]ύT|=^|ݟ//t~'zX ,V ĕp:,zچgm1v]@'e)A.~Q0e'a S@,*ly#X(>DW-(zast1Kmr\?p Y۬C" / ?S2_0/r[׽o|1ſ +VeC`|ϖ{ ȱyPUZм࣎4bA(yPf ¬QZܠA\/ ?h͂|:OE@9 JČwgi踱Z=NbegY&`KWAo 6 ?ZMdHuh=lݭgL_ jJ'HU<4ч9Ϥ܆W7)vS(_hjX[a 36zf=;YOK <+'E5P+Fa+30_E*(o0q{!5VSP~^,HBHѱ"!-,3ii0#FO̔}},&)t6Z"ǘ 뇟-]OoD/7pZFQ'Pͪz5S4Q>U 60T4>9,e (lLH~H/ʛbɲaԲ;Z*f:1֭y;*JNa~\3 =s%P']Q4ؔE 9@w R^) ߸nO*r礡Ah@ rWbުe:mԈӕڧݩT DkbUf7ݲ./c ;nFmUgr_Xa}7aCSg?ZL9 &Rߏ 0>41hwOƻ]OUX C!qKNK7ZG~O$@y8Oet@d.Y|HbCgϜatpNnDj)_ KiI902䊤n/^{Zhß9Fg *bRb" 62`5ڜS<RRƗ` e-k(<<_pܖoZ,xgӛY 5%x6 ]kЯ qj(K'܍Zȓ&l-AP,rkS L52@nh5rʈ>7-fk<1Nc&5۫1`da/JP+]i(Z #$*k0AЮfAa!j5Hb?ݱ;ͬ׮?(FO8+̸sYL?UJ곅'HAv@2Β"/0/ena5ߏ$?̈M1KEٱS9 c\wS#]  !Bn};iH먀{U\Ԧ7ύ:P/ͳYͣg 'N.H:3TRLHcG jHMf)=8ܵF[Np+F+<ǁlnϚُuR1pgG]id SC~~9xQs!3NfF; /{bUrL;+Z?6/VmGRkǏZs ra32uD8. w$tx4!Ț`p=|LaF`;SzArВ./}$K#ڽ<ز[>#S7* *8ZVce&f^Tʺ8N)"%Sa\fXSMhiAdP؈Th( hl9a 6DcK 4(B S6#FBc=Xm6UA}Ft "2 m-8|\sm6ST<|emonZYr3yP0N_syFd0J4jGl? fՀr<O$*nMgkVll@,Z)H?~H'ID8 WG~vXKW>HmQ2>cx% @ Nn,/#?].}[3wnJքX5&}}dC8aQf5cὸLNW,JW~ aP;fvh)uz?눚VDc5LON*ˀT_*L,~FrOZbV@KNPxqډ}`6m~~h 4Kԝ$J*je0R^4^!3@+[XYE#Ɨīs`?R&(5[WOpV/@(`!֝ZSN<%kz^%X_ES0EHi뵍O JPa S!9i\ !=*g[;<;PY8vZS\Ƙp4ɣM{a|d0@[g;==a< !yev` iA 3iZ! ~ A6s9WB[FA*>#ۚ}0#;? z2M<ήr"7-@˺=_j80ʽ@^ޏNreoGY8ZFxMk!R͝+_=b&EAƇ͑V *] [׋/af)Xx1:-J:7V|ЊG?75+?oB9l;ݬvQ6No74/OGdoYhZ^X/?x'myCVmÙC.P pHڀ^sy[g:SRv%fnvV򿝔zh; @YNG 2-H9C$颔vn?A%U -g"Vg~C -Džf]V<$䡷NiQ6[xXe_wsVU]<3+ ?< "|đ,-h04qS8"/a7>  ?,oWe[\dG=6QnگbKP]#.o&[z§;`|0iX^٧[D,"j},"/V!ݖ  lҙñLs_ Z^;^mW[ve`SO_l;!cBVB 0¥*ӐG v[}׶@VI =Pey0ڋEx#?J_]my<14YԽu%w\{LҰ~ o@0Qa?L9m@/&ș ׷1瑹G.sS[꣫y݋⪻QowtO~E%2l|!>f"ҔQHQHuvL rD OnX!\10|ȔVolDA))-21 SƩ!U31!PpDZjLA7pm#IVz'l',@Xoik.0X PMEܟ/<Eٖs $2>:)}:=] rnNߴ+RDcי;`"r EgzRPp$YO-:=ӪʃCPQvNhZ^Bbﵪ?;)9ߤ 7t9D^;p&TEE7uCnǿp^07\%YPñ s\I&-?Du鶎:Ѹr_7*nY&=O4L8L x1dIe[$U_\y%le[o2R8we_N3*7%Og5gD~V5*&]U2{t+M^~8 L&Waw~e5](\!5˶?Ds0E șŐ֗CU]vW7\`WDGQ]egT n?!vKu@#"e?'~}t[GQoiu:4?vש3/w~W7O׻Nxq#`4s߶+Nʊ@K? +EKH ! |Տ5q  #3r# Ʒx>L 'A🗳|[YV\'{V  ?JfIg'WG=18ƹ&z artdR Dcy韼);ɾ㙻j(زl'38Z^o ?m >DF #osi˕)s N0CK;A1'Sz}r7ms~^>K0t̛,[N3ZkR_/KP]?DG=S$ "!sm xJ}'܂GۈmAIxU <_ﳕ;CkO]ǽ:r;czv^ߪNQUL47^BO%{΁!cF5%6܆%{3D@D`% 0pܚC LI1 ꀑBI"?{]͗ ok5^(ݬUwSřtժK]!rJK%HXUEտs\Ƽc?g&W6fkɦQ(|͝ ؽH/ɸVQc<4Ǿ-9_RhO[׏N; $Q $nO Z9uA%9 8v#6hƦ8 Y ƷI0{ ײĕG7C}`2>e+\7A^!bNԝ^Du^G Pr[NկNJ?J V ahopqF~%yaFA_گd h-ɯ֯pxO^( `ꇋD+)5fω7d}_EKr66Dl>8"!zDxp: a8ꋹ$M2A(=x:F&" > = H ~0m=2'[[$A0lE-{ 6 Z2V$l 7p28.#>oN.]w8uL&0'̟د5pJ;1 nK(y`&[*cm?vkgtnrZVb$lL8vP16;^X_ry-q9sB_ͣ9:+!-2oW4_pNЅG θO0L@ (cQ+ xd[0U .K[JXR*Q=+VBo2Ɛ=B3[4{Qv;Ӯ7ܹk|9ϤAC|N%2 '&@\hwA l qO/'-cju%\U\vbˮ.*pQ ȍ|at!Y8">(6ML(d߼ OAqF</#-ˬ0z0;;EqW'Wa'Q3h6VsPbv+b5J_s~pcP޺ a?-L8uZk'd9X ?w$ăа0H&Ć+C:2iQ{V$M`}!ݏb2UUC1Ier?:}{dZMwu큣e-/w'WM/2 s/?ie ),pu9Vy-"9Ixu 3tqw\IhROh D BGoۗo@GGf"Tt+Vg6L"giڃqT?xV'שNQ{VvQB_l1a,}-;Ix< @|,*g)wn0\3/w]po7+No.#3]؜0Lra|%h Ye[7Ʒ3wS?\$uh#|<〱 8,ɂg Lɭ5© .1fķl1Cƙ%C/`{?ĎսeTw{㣎;n GΨowiW?;)Enqvzv_G v[X 1c )@0cARÙODȿX2({mRePXCH69Sincv㎉+[oP]!hlHOFkW =[vQ|9QG𷅿aȈpmydZkvGV% |pe7_fg @郎ebk NwWT﷪lŕxyϸu^ Ѩ?RV'gnUUx*{+(fcͮ@>O}=R.ѭRIB2;^-' ID( dZ >6 N!}o c%#U ̄MR3(tz)f:DI#?'Ln)X:)B:Qr"d֤-ľ9>Ƒs[~, Tu^OjFo0qRvNK,|@=`-f쩉io֜V3JKnLS6˄1z`^;lk vN4l3{4%{_I ~g(\ Yo~gp;݊{`e_&ߕNJ^Q$W4u.26ߌ^Bƣ ֻ3ǹ́n0$`pvq0QK'1VZS 4I)璛k&Xi8+ +P>[/R8 k2cni{G7^%z8e_mfy7l j_]ftf\ҡ &Ul{ r?!=CbRYzqNw4*. Wo7+37b ~ѹ39fکa2VUg~+rG?W=l+p.o?vI1)W6x=F# y4  u}E%T} $^u؃[w4-RnwQ_׊C·wZΨu=t 8h[{'?lk£7ɎLqR!3_/s};;9^f?M])RBٟ;I(N EaM' I0 kDDM%䆐Hvrzvn]w/6ٽ^ Ѐ[Dao # +P-Oe~ϤQ Um8 nSns<n{*V3ÓPL׋4Ha89CP[&JN;7}F@d>x $eq06G$ "[!&M CS sdʹ ɮrUyd@" SM~ Z?)ʶAzͼǮ?([ 9Wg]߉xcP[oz~guFQze_WwR}MC. z4N<5VwQCoRqeWgQ\nqu*nL̽;^Qt9*~w48m?EGrn6[Rnf4o)v„Iym΃eT`8_X+cDA Z?:SP~郎;1f7- Np| ?S5 Ǝ6dfu*MI-MBYE? Ē{aO݊Ekd1Q=V5tbW_)ٽ5OEPhkA$3qrhv9$ˁ:\L<D8+{UPzSsۭIYA ?[.IubBM(<D>d $b`J鷫+|as{9uh>rV?Q])o;)F='EQ`)BQ * mM}zSˋF>TP?[ٚS:#X u8b1r;+̂ as?NX-s̮**ʃ<ߩ];p205r Ξo|wpQbl^?T?;)fyjC I\$9x̓~)AĂcD8ݽu'cO`6iJ qdC4Xt5'CڝNFqP#Fh8eq6~`B ^kC44Yakp@&̙%F`"%;S\٧4?#RHښ^ ]@{ܳG^5~>Z+s6(^uSۭ]ʽAwy_([`0ié33<-o{GZva]ogWUx>Qbr+$0<%JstTؼ{J._`pݎ7nzAw2_!^Xq1 @pK @OTFpDsӳ3uwf["@kyC񑉭pOX H16xvmuzQŒM˃U_ܙKa bx9`d:GwYx]`AhH@c`w4>dG xfW TrZ!@8r^ + kv. RB?h@ b+QWz/<_~Wjb!Ug/o-i{U[vޭ?Qa0Se;f5bK+K9Ŝ]}ߙAly!-H <0:Mz! ȧx2Ѩ8}{?>6dYް#}8|?2@K(*Jl[ĉG=A 9Υŀajdj9C45r @}q`p# ][Yݯa<2;ct)yndGK!~yiտr?rPu~{4w^g0nVNQng&2pa>196ϮS  ?@C I+%P19LZS{C:«Q}a<ʝs {uM7]onyv/0:w&@/(zޔҁ\M>԰~b~2p[Mf <6chȎ!gz$9НFȀ<ѽ0 #.&Au(2DyH"X:62?V pމxUWWf7Z#: pfW:f{;.yc;t9UYY``_ʀO 1qCH:j_e 0@2WQCfFt"44HzN,Buya}v` T"_ It[w[]vwcW׌^,ܓvzT,/y ']ǵݎ?|*%eU3Oֳÿ@aQ; Cvq&ʌa&h\-䦸٘rO ,!d_r԰!sM1K$>x7AL3FdE8%I[HRUI‡%(}@jDQ. e0g&l)ldYْhmz&Tu [ Cm`Q~2Z@,̙ I( alooGָiwn'uw$ς< HF2+b4h.g4 Ẅa^moa\oo#;6W)v߮?vRrUϯ /Ss*Qe~ G;9ϖ]줘?Xn)o@Zn8.fhE¥ZC'$1=S@@ XCMҳL@ZQ8$hBlʡG+WGsTWOyh؋2 l*]vmXd? .rBCR^݀38$pA n8?8Ǩ8@H@8P ѓ[sfjԄ +w8T,ɣ͵zg<,/mF78{*1/qOCASHWc<\h0^oVe+/“?LX ^m{ne@Uye_\^m7 p^qr`5 An;Г ?"qD™` l 5 șSAj0E4':EuNjZވou+]o\V(&/ !Xˉ.27fL͊4C+ ~6Z5~;|<8+Y_WM1`j_=``ak4?! B`",mB(ğsC@Ǝѱ?&_Ec],x~}7!-DFD#LCye&[?ݳ 糕;nXχzv?n~>/[YWvSV"w˃p߮.,H$wZyGݭC.2o7s?^M_ئuk eb} wx| 5$ PT}'nul;~x!]or?g7k׏qi A(]C 8V{6c^;"$(L>ek%o~h;VglBW]oenI ܔ ]? ;B$|φR:5bKTNJGk0"(nb[,`#x@x+ O6 n0yۍbl1NFEhv6j#I'mv6}Km8 e]Wk {y|2dMWIxn8 ụ̀f|]?=%b OLC8Z`$&2F9 ,]?Pn'Fu!t>2c7`~ q_qZZ?b!,P t&m卢fASh'FĀ#RܨU:j`D~r>qZ~7=og[M)y^@y ,4 }(<h0'kv ,ȉ`}~] Esh^ ֳ:#zu팜tA>;o[*NJ^6½FA_JK@`4vX7P`0 `rw& I#'4 -<ݤlso|=oZn-ȳzyfuwRrC|hێ/DAswOz T"r ;v ?ؽӴYa8SƠs=u;=V2ۭVnw)_Y-7WckWK`b( ׾ur`=viC=i6\XS4\=: 80E2dʜC+6t& c6uI=aHPocX2(&6p%MYo'if^wm u hJ JnsR^2SEl4 H^1+9Q?Hj>mhr W\ LUrw\>_?+vEڣHމ&ҹN+Kߢ,l^HX//B?/'Xu` g$QӕX}?t8jQg sPCV a+9Au掽̙KZ"s@ '7nF (QyOba*ڼH/xXb,}TOqV`*CObwOSy #y9{ѐfh| M夭R2B 3f>md]øM$6t*y2ʑjaP@A`K@ } 3$0~6- Z]Di, # e0 b Q7*4oo_H"P34^}I1cU[" AlJ@M(oTM~O{졛.5v?llf-[(MC ;`X>;x.4gRg7 Dw >K7Vǒq!0HgHgMSaBuhT%B i"$9 @1p Y \jܣ M:!gБBsN(Z5& 2aVw:4U0*>֩CqjXVnr񊲜l\+K!N1Ag5<(Jc{ b-n'yӴ;IJZЙhkWooIJ_9RD&g(naTBL*Oh׸I% F&w2($%vq32ypT*`MIk' 9.7EF0Ԇ=_.pdB&QGKG%aк  J9DT^h)(=X'x:*~'lp ץB꤈5HAT@)=wIGXRYbՑɎM#RDyˎfYV:spZ\2{ O ؄ OՖNkK/O,t$M6caj.@Մ/\ ,#ɢL.iqn!2r;? n0`9d7,hb5^Sd lc1Dp0 'Lnp4e/|fYw]qQY2v{begeƉlLϑ]M*, J8JfW+}ؘ)R)uS7P27Kw,/Npp&d !WRqFWo.R. ` 4V'(GvkRc5gIpqSO҄XFҭ2Zb%!倩v'k,_{=8D.qݓ->i>tUINLG. $_Ӊt A-z䦏J-S8]ՖD:>fߘ`ڏ"tFEE8XĚT+oڼcrʷ}+y冁9 GmɲFE&z ?&bM!JUgVPjqFƬ_VC@$8" (A9@cRs}nQiڀ#Px@?d~CGoxbA[Ī~=T4zו mY;cJ`<ڧ3rpvG'֋.N=3fUҾ:o 䑣=.$ŀnjid:_zK6$DDqyQ $#}V׉3R4i:P[;-E)>% +c`6˃o4DL?b? J5PӠ 3?0o0rp9qM (v +'w9E4= LkBAe -دY)GT=P;H\!,g q8 ͊Z8"Fp${y2~U_R*^3A[ 0xɦ4kthot<?6ٕi;HA7X 4bϣsd9 [ Qwp.%ו䟵쫓߻x-$Z|_^ \MH)$n:0?k"}]Ͼ庒krt_5Y_MZuww]IME_[{r]$e$0J˜nMP?߿r-כQ>sJܼQܪܪoVިoUެoWުS+;)JR9x[9]qoWkP*{?T?TޯP탊C~Tq?@P~\iMtqn>C?RV?Uf>8[L?iQJL߂hӲƽ.|EOM!a!2"0-- 1ɗ_NC-#SD @K) !Eȗ韊Tx;MD+}{jf+nv'WnG֭KYu\/(ф}\kQw]Hbиn;W_ZoBqO2is ъ!30 s y~%o+,<9Nϩ0эzs 6jsxzsd۾H1]4:{|<~Dc*٠Q/v³YerƜq7Ƥó_67[0ƺx 7ķ[m6llo}C3kD: 9F~o TmʙJW81 ܮqoB_/w^z-%?=?iu%Ǘc7rT<HZF-òfU6@~J(WN#χpľ-%CIYqf 9A (љ='W rE)RRz[>;dqGiġ40='`έﮩ|,4&h3SVq%L *pt4xGRj3?;<U#+)#՘Bx^ 2(h9>|5B0rB~GcD LN2M'Ofqzgdfb8}6ָ|,E?X2rۆdGldmXCOr J05 賏j($$ ,?x]Lp]Xb7P߷ڹxv;h,Og1'c灰4SMOP )y6ǩpI-3f5̀uGV@2/RRK># Z! IeX٢PFɦ Ʈ|-ި/I= ۊġRˬW(^s<?^=%/]HX*r'#)o&7ՀFf=ɼxAHrU`"Pv~-h7jFXXh,.6+n+Q׾۵/UMqݳ՟_/N<49b02_TcD<+V:Zpl+#xrfzW2@b]3(ښ 5d>/Ί|'Rϸej ~A5n앴:,uz%5"`qv@$uGi`Nо:hR/ yKH ,">i݁ a&x&~>*LKa6CK9M),\`#J 4r9IESͰ ߢMW1xHH5MrC{: 'LAVUZ>ÐrQY*=[n kS;ԀtNNle]\~'Wzq Ik>%v%0ͭ^r\'2jbf K"NdI-{uCe?(_ Q2(Xex!!4&}|`VghN/~QO}aipq&Z|_V{13ˊFǩ!֍ ~WCP^7u~~L-ޜa)d8m OY^8D U  Zj n|dmsg  sEC4!&C|i"A_Q?=aX/9␎ ׋1 wfАc-Vg# a}/,D9z,1}WDcb8qߤ#};HA0:F}'h "Enk) 3hE\ uŠS4G;ۙlT}5pepns):xk:T:0K+vKCcۢ䝶e{!V=ۨ(]Ir1sc'^a:X| b2`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/rhosp13/nsx-openstack-tripleo-heat-templates.tar.gz0000644000175000017500000177761200000000000027036 0ustar00coreycorey00000000000000Tf\=is8_|HC>R*ŊUDzr{{wkK E2([3)dYNDc xxޅ'$Z2|kƨ$.Ly>SyIr~">$MݺZ0a-*.m> J9}~=~>=>=~C_/.(hBoh@6T_EПG2r{>%'N!QI|ێB{4( :_#1@KF G#esI2+ `B>S&1>Џ2fxj4s& ԟ8lB#WvȻw ]"`2xSHI='%"6 ]. 8!  $'P{Fr]TG?4prxdW|O݌bº[o@쁧K‚9 Bx~9b>B,jcw2 dP@MQ fe~8dr8: 2ZiTITt]d: ;ϠbNZuU JΌ^FT;LZb)uG(Dk#29RHFjl4ՎP BYƀD4&GR b#\ʈ;U}}pUR*GҩFOYaIK40&zCߏ5?>>?7q}U8wE7-C< lgI_qk<8"[qXz_X4ܥL h5Nư_DNvySlbg+Xjg k[᧵St]3ǚ`ي?=iy vbn wAS(c4xdu 6(z %~$?Res(C7)0V, un,m0*T)4gMy $-5do5Xog>/l\Q"XЕ@8LuEb`KȾE̯e⛄L6D:Cm߁NM[5I:th* wf$εPpzhQ<d{]ЋE8C,fRr҆dU1D@,U1狡'󷆹V Q=UT=Q:M"Hi?rQP ^?gψ^dCL63 'Td37gǖ> ?{/*Gz Vr]B>'fNc3 T?XԬ/ԬRZkBPej iT ydkuT-,^P譐Q?] +.7Wgs.G-l1>6KSN^Iol򒦹,B4+ EV'C̲YYgmXiu)=iIQXfXtj; FBc*T/NgM:JC5Q˻VnJ[7rog+aYXАHr|c Ss\f4DKL=b}i(]۰s< A"FçۇԮK Rh%JִWڝwhY֘NHO*6}J!R4w;SW/ǹ۶|* ges>{"kU(,rUixBf\ؾ|]TU&dpCQY"aEcHΙXY)iy1BT+Hȋ AekkŅ;ٳ:.]>U sLVQ a9S~Ea5EfZuR{[/hQjiȝJzn~ 2m=}:Os)"˞q%WQګҺNƢ_WY'uS3+ !O5  Q]۞V([dJ%{S*BV/~Hį|*uC2.\rρkjҴSڈ̌wvrR1s:Bꛡ e )?&!s]a7$DL ^!尿m'T%}m+\nʭ߱.ٓx! 9} )?v/5P-1_!͉'9ӕ?>^oH;;Ol"g4ۏ_g豫i>5'cIΨLM!\(ǻgUMS(wn : M]|,ܥϯ(\Ra͖ ObҐW#q;VCWYNY邆XfzTu޶924 T,5N氢ba"fbP!?EMnER¦Ÿ"Q@]x@>ymW|R{4|w[[Lܥ#b-?3Isڐ:H1#G\W}ⳛ^^QdǮt~]pB@)tTlWKK F ~{4zm?T7&uo!1,Uw=S5dܥi.֚GUO -(l[RCs5[lxˮIю)9l<,θMDQ2ޙ7g. mFhLW&BԓaPMƾ?vN삙 IeLߖa6^wwG,턑X܆)[Al˾n 'sSC ? GsHlSs;x+ts4U#Xޤ:N4H ͊*W\eqW"ʉKrJZeNfGasƿ4tz2 7X2ܥX{iI| nK 6dNsoS7x$vE/^p[1CIi$t#X[+#l2a<^tl˰%]2su:O4lEV94^@e9uGh_,s&* U04 p p4vz$DVYIdg5Tf^nJ5@D*Z 7dNrPʑ>v@o?&ˬrl2OLd7{@b<* 9{(P. ;C)]|ǫqq%}gi3]BSҬ~i[ӈoʩ)WP7/?*-=w y'gٛ㓋7|Dw돇ؒk>>h'^\_O+ޗD}U8e. }8`ExzI658&oX*Dч[1F`fO: Hלqaܨ{$Fdj5v'"~$wXV*2/ n?G˜Nk&%$ GGS.gG@!='0/dAn6ӟ/ O]w腕ږZ1>;=/ jiAibi *kA$ZTnO#tSVHͤ_Bv!QzyXdGJ™R0WWKw%b`.#ĉ#.G?=oxu:c*#]bhHB=fȶՎVP,f 3!qulwVJ h\x2y2IN7 A)D9GѧV#}[3,|avxoW@xB]zqٽ yf&V{tn[o{ZoiA׻Ȣf6AdR2@t蒥UbR`:T@+$;@]M}СNͰ :ēA"wˎR(PfG( G9erT+h_)5<4陝ߎ;SeO\V 5gF鬪=.SBnI)DK^m0ǿt~Bl9cKch-_XA@?9>`/ߟ?ٙ)_FrD?F-]c/lF( G"*Pe4>sc7Ôt!7z!]Or#.G&n{Qo'dJnt.[}?ltc^r'쐷F u%z1恿 $4nVwc@?nۃRc|a9LYFAO`gaUs T%@{w]k}b}%Zo=F^@jNapkW5[KX^6p|#LRq]XKŀ7TPj\黌:TR~VO {"IW* "a7Ǐ]5.3;F);[f..A.$8=eIF;n^!D`IWs Gj ?E* g=ާ'aJV{LE-Ȩ:LX/G8|:Qԇ.z.e[lisZ+7d8-sedd?98;yuj账<)HA6'nRTU}ކ~ OBd\)ĎvFmH]c'?b~?/L0K봢VQO,VU?ůN-i߶+R [oӃ$&`gR7H+eG $\3@YaxAC9ovه6 _8 !#jkmGͣi| /x#0UxG*+cKA)JkPŵߜgϳ ٨Bmdzo)f60ȧ3>?@3i|s_tw| SAݬM09oj:vʢչ0ʾ!.[T,r p?V #OX!j,9ƴ_6<et!]9ܟ󽁃i$nm(wʁa6~}{ٜ]9w蟌ټ>`(n7hC=D +V: xnwlaeznݚNCy-{ 6GSG|Iƽ9LjmUOoF AqBq\ }/.gљ7d"J73IP]ۍjF?y3LVw`PA u?6d16yqևɦ0&}n}g>y:2!ɘMyV#.D>P:@,_f5Ǐz 1& tX?j_RWCd/%zj[H8[ 8. H* fU o|T.|WN̎ɵ<sBd )wũYu2 aT1C'}( bL+G \ "]-4=loQr36ER-.+ "K<=F޵ UU3זoꙿF^q s:_\On<8z&P'ch6POj'.D< 2Gم/SleQPR Җ 9J}1ì~O'^i_9%|1r~i-yF&+` [0k+PF%gӊl:y^'WO#iHV4j`d.YiqP)3srlvw"<__X_ 3~a˙4ޕ3V>|4?uQ|k杬~r]uv;e_zbR&/ȕu=%=_/JnJ[m._G_>A?w%?[o}=ɳls4fFcsװ{mFv~$~+iY^!oOns#yV4!<7y3%M[ȳL$>r'ҧ k†!ħSдa4MVp:dx!3p+YRWQ0*y-@SV 1L !}U\Sj 7 ߂@atCZ4<F2>C1(`@fgҵJBS6sA͉\+0,U{d>#B2]kk{gs Ոi5B?iK?͠R50JWnRe3%"К-.?Y-~l:kN򜫯iK>_{BWÅ:fVK s̥2|/Ɩϛ.r9۪6?3j=Wټ7 SoB\__Cmmt5:哂@o5%8̟ۭWA~n1(Cǫi]LJ5ȚT\ 5k*R0B"JO񆽉>{c Yo;RX0r4"#3J( Rͮ{˚ƒfJ|o]9e9G|M#˃ zr CK) 0+ZP\8sZ>) p}8~oߵ.]%v+ E+Vr,fVT#ms үY6,Fߊ7V8{}b ދ8U)'vΓB"U=4`dF\dDX'ב^њ fU->%IҙflWVP0y4y%FJW3Z1G.{qֺlMTi6ٚ5*O8DQ?K5<:ؚ= Oм?MXf:yN~l,Z/60ĆW4MVGmE#, ^ 0:Jo>'h>Şt^{ CA*k֧7ZfgfMVogX?u۟k6FhxVVWS=laLp[)0WeḒ}flpr\bߢ{v$CvK[yXR֞ =T80>P3{̣F[;V_鍡t]v9P*B8{e:U/CZ)G/KhF?Od:EG=:e7摳L:ǘ7pJO/z͸CړuM fڿ&Ťj+lԒEU{-%~Q[)*hTt9\) =COqIHj&E:tx'69܇btYts\X>%~L{jf q6<oT7Ӟ9Fk MZ}6xnI&PUhP=ӌmx ~)L^{u0Cʵ8 Y[,ܺBp1Tl"n%T{y}igpVDatx*2sagvFıJ|k08|AzkQD)o4f)DE sŐ?V9j>M9M!YgYh0SSqhB ={_7Ǟ&Z xMSbN(uIBdu5:;Uݪk"ҦLA$FRED|+y|`f%ئFSd󀮴;OZbsdbÄ4(k#R2Ⱥ+OC_:g,ǰ/~^O4Fa}[ oa-쿅_d1zFو"ӓ<bW_+'Q>_O3֧_ u>ŞMK߭J ^ϓ t9QoN,TAf 2郊=1ݻĩUx=7Y|mϣPVgy߇X oa-쿅[ o_ ۲ZfKw*XȤCda/./c"NNqD_˸݉y<7d>O1]~8)bqS?OqS?Oq{nW Ob;o AbK;&*?#w]HTx4% |î5evM^5*hA%ՠ~Z41Ӛ D#JV[ fےkux;‡r2fmjxGixG4 Pw pKmxG668Sbv01ۆLݶnZnȄڱ-cI٩+١q14촴촃t7;[7;Zovv|i—jګFԭFQ(kԷmkַڹFk%Ÿ "M/XZ.Z\6@h48m #0 @$0D"P9aKWά Cֆkk)2R9O EҎj+"ҤBYe~nq~TQz 9E _XޗBb*Q@'ijF֎Jb(/Q $ DFv+$1K?;e t<#< rR9q =wS !Vt>"}D@^eͶFΎFA qQpP)(_>WVEU{Mmh=iJIݰwmw xzXڻm.I*in/ ԻጲK5Ꚑ1Ꚑ1BƨBƨBƨK]u]u]PFC2Fc1!c48^ Ǘ"y)DEa1 ]j"0c1 MF1 ]*b&ELEз#uc""h"hqKb?}  c\OØm?n[6ah6mM0@FSF['v>au}heFD]0Qlt RIlpIl؈IbCĆ.dIlD$IbC%FD$6BIlHIlD$%KbCĆ. ]ɒ؈HbCFDIlD$Hb#ĆJb]N,^ HD*rSW"K9$혺DXS:퐚u}Ԭw-趔Sͺ=j6QAj6 MT7l57M \3 H9DuXSMi;oU(U94J*ش4 ]4 >&M}LmKfS3fi6u1lbq3"ylbf+ض4$Vcq%Y*UCX/??5\Gst$.$*yck렁"9b㌙F?ڵ*(]rA$?Sgxf{NJ=ͬ}e䔜*c넟NՙPi蠳К;q(揰ڛM֟XZZ8œ)s($`epdkӞ*ǎo^A$2?#͒0sz#sz &Baзв[k]۞&%yDL>G=qt`Q]\/Pvdf8)%ˊMXH^@j#r8yq Z |l&v{ǜGWg@ĂtѧycU>)7K|sBMa4a|/+8PדP=A%n3L2stWΜ&z$(;:=9uK;F=;* E+Vr,fVT؀;}nA>\&k;QoP)[Z׶wE3x/6\. Q?(_>6إPP.@ځQI7XGw2nRagFK-z~ZQ|Oi1G37Fw^ nfm[봔{c`姰{]muZRF}mYaJ+xXke:[&}:WF_0(b't^) 4$H@GaSP]f&v< D [q 2X2l-2pNnB  ]CXCxq݊l?y /E]-)?Cof4O$ؙ5J+TnqARK{?.&BWgԢ ^j.J )[[AOV4gJW|\itdσA$+~I;u8^mKc.Zxg40ԢGoX'jUDus)YRWxD2hC^V#b4@s~*]*4N:fmW[d җ|?;D>bѦWӫ5q`p}@WWWF{-}9p;SC{/5'~- [̻3/J`]{0tҹw+ƽ[-0P w5 Y! p71nͫ=[!j( r؉ ȝ@DrɎ2 `l)WL@CЄRY7h֛4A#Ьo;֙&Ha3MjgzЙ&Hc!՛ wJ3MfuDp(՛-#m# ~#1jGJGi4Qi)՛&_]6A f f4Lm#ˆ~ڍQ=5\.st$.$*yLck렁"9b㌙F?ڵ*(]rA$?gxj{NJa#=ͬ}e䜜G*c넟O0'/Ag5;wPau7?2,q:'1S;P8H>צ=TG⃃ie=~F'ϛ%a F M̡ào!e׺[=MTK84$ujzqw+3Pvdf8)%ˊMX%C@j#z:@ Zbl&v{ǜGW g@Ătѧ ycU>)7;|sBMa4a|/+8PדP=A%n3L*2stWΜ&z(;:=9vK;n:;* E+Vr,fVT؀;g~nA>+YE7;U|'ֵt@kfb ދt jBwJ.ϭEy)v8yh vu ֑bYDQ+W/? SlXj81G3oHR>5H|2uۏk䫳ek͵֬Py׮}z6+E9*yZ̵[ ^[!Jz:!t+mmm'ЖVۉeIlsGm3V-u[-oׄNXk\HP(Jm.com7~4);]҄vOauTҞ2{Pƞ%Y&6. ܯE͘fG;(ZAj2W3uLR~Hjg6@q'h^(O3T"58:΢ FlxEÏ۪>OѤxfeay;/P*tB ~ %?u 5ˏXs 4ofc̈́>jX}/?QONWG>LW=L[a0%<=tkJT+ф5Cr Uu4C%-PfA'44j}+jQ +=ˁJe_Yd%V)YN23VT7:IxgdW+-EdH'҇}V( q^NgSo6bߛE'y OaypS Oa)?_gŎxgƾhcQo[ ?S[,b_rG6Z5w%{r_J<~i5U5@f 2[ 7M˜5S62F1ZFa{X _a+W -c>|辄3Q ( PSP1>Ø(I|]>C"t,\vOA!7 [g_jڝ4_ ό-o(>S [,_ arb_\<ʳܮxXكcKk#jF;F$pV/(=ҬQ )j^KZY䝗R JGF [~nG<*bsQזj%,WMeT X^P#Puk[Jg BXok:ğJq`=xN=D"4 K h]؊ ZXIg2 -s;=~씎m NN=kRya Dh#͍P.L$ |6e6y ּ*uV[aدuK4jD 2ZmuKn:@.?lS;J;a4`lmp޷7@m:I:arjId d_>;@}BF(Ё7uX:ޤg NPS>>r2Bo.a:gˉ?_ %<@"S(݂ rɧj+<⁗>Qϭex2F@?wJ1ۆ&ddZn=n.eFv[vG5|{[|{_5w;N3|E6|aEUwv)ܺ~PoԍHuzԩߨou74|:(q`wh ^l4H4pF(=Ձm}mԏ0\[uG[Zoa*j5aoA ~k7iii'Iڹ&~ؖ&ǫih~[ ȏ* ju!拉!jF9}#EN 5@n#N--Ufk[퀟Qmk[F6pTA^_#F9>H C%杖yG1üÉ1蘃`0j/ɝl s *!uhܹTH2{;:$i U/Z!|Q |h h R ow I@FNK%;[zѻw$F7$O;cU;cԉ/zK6\{zGQR;b;bw @ithbW!!Fhb4y3ڨWkKahbڨFtT Ch>*a(Ҭ˥hQ gjp6H̪orPiVzV$A/AԪbIXthZb hiZK-Lm]71ںrbChsd lkhw8]ze  :p5U÷Jd2Cщ0o3oGg^jalܻsV{4HlEw+½[Rw.U c[rvDlsBl2e& m]lre;&W5NDDND(re)Wv"CÇfGff]f]f=:*ͺ:*zGYGYGYG٨KlQi62X$SLi68^ M4Li6l2iLiLi&SF S&S&SFT4 U4 )ͦ.SM]4Li6Cl2eJз"з##2 eJ˔fdWܶx:7T;f>_VR[*^GidI]EO|[8JL/4b[`d /.y 2?e~ڛ·Ņf5( <{ϖ53YΝZ2wLť^[ h?_(?-wZؚoL:ϯi$޻΋īf`</OaI5@C.*q.yg\H#Y؟-.p-m?:F{ir79 s{< ’#sWK۵nq y3mdV*9%^+t#W&l~G3])K@ ۀD)k+9Ss+vөpUb~&Xup|6||n%3>KI^hO.NiOA& 2fg+{BGz\|N3=ͨŜӺZŠgZ Н'}1W;'kaaౡ3-l{{rh?4xڞ  թ 9^<+q;vk9e z.i ..|aAV^&^ڞ[C(ȸE$jRfRTr{vRF&rάձ\TuR:0iQUy3`OՋ/Z {禎4it<U2|fs 5kfPDz!Us`UF'V_=߮,$#"2[ N2J,+z4fM;v|?p 1SǙW>@kFktZV}k|Y5vXg4 *1R0t9@x gWNY{ŜY" a*z{B23 v&K. >F*\+ Fv&E9M=`^Q7}/S_EhϝP&8PՊ+<%s!u7|BUΒx*(}Q~F\{]vᰡ5p-68C Ifݻ# !|XI=%= ,θ?AM(&#zTfb @&(P *ڃ;ր<@cWSćlbOýe1bK.Uтl2hץ]c/>Lct-#ꂎ&"(;< Dkhv @tYM̂VZ8(ZaלZ=. :EJKQTqہ ]u -eg Mm837gNW=AO;ް'7hb&׺F^:2:0v(B lqzsǍrPc#jj2*GilfSڭ~l[֖in5OXZaK ɡ|aoZn[~[n?ww ANȪNFY?.s>=lzTA`zo7~?֑jxVi r'dKE%0=T0ыWzggGH4 /޼<9_75ŇSqb`g||΄DU"Ȥ[X"oUh! R!bꔤTW,-9>,& \{w͜#27ٚ0T!|gw8&ԆTũZg;Y}zظٛJ 5)Zfxi|gzH5$lÈk4b]0yĔL< rdӁ BQL`X7 H IPW3!]̚{߯nNʙ&CXQ|\Çkn Cw["XE4- K|%n@)V'Z;[tcąwTa4?៭vgr}xt/8_w3՗p`,9XOȷ8bLƓs)}Nawݻ a NţJ*NGwq]i/L+:jS<ܥte` "m~ۛ {l?< ߇!8.Nœ㤼ܴT<<C2Alcs  r!? Sοj.c?RtXy[V3n/>͑ a.琸9"ELH_ mKMZ vt&LQ.'T\r"E\=v®&ĝm-L܏k^.>c^x)<,P-s[ռ$YB|{& Vn`7L_˄2~G\.pQ1v2fG RT<qLQ ʔFMlf{A/dŏ݃drמ TU/ ѫ{hQIqA5.6[.0Œ5>"يO|s(ӏ[ZPQ="-Q8D&C.6r<`-6`IÙX-0I/fL5ycG͵s2M-a(@W1;2 'BȔfS130oNy~k9CAyw\w+ևekGQ<<"V`*O|#_nؾ "D|b9nPOnR.iPA{T;pڋ y'yy+7`3ޚ`]<%%~uvVG0,y`t"e# J~@n[UU"2ᓛ E>*&<@^:a#ĪU%B,|ᣬ.cV 7Wy"VHnS<߈fhTw]/ Ax"HՅ,kmV|J{:?_@ʿ_s1Eϫݯ Jnw2:m>;`v5ˍ{3]ٳ[?za [[E'yp{r{h !)\4wO'LG+iu˄ 6=m%X1g9F&69 OMrG:Tੂ/@bfz}=$oSf<@Mxs  X͘ JWe{{k%=`]/ qb FLZdLh~`ā)ﭻ*ŝ`?@`EL:3&goV)rڶ uh* |hcK|ۋ'!oS9􊱣OVxp`AT\y?A@:KUFMف=t4{iNr:>9ZlDnFgjT5m[D4OD#FM*9=ǂIVwJB֦R'"&, JC1aY7B|V1A~N#}r} b{ՓzPŒfK}& taJ:9bmsEt)o_9}NkSgeZ /OS+9ܳg jH1%N ͇H΃>t>4kk*-c - #Q^9!QTtqbWHKALKcKHrqVZ 'V#^l΋4; eX ?knԘǫz(SnPU%Jt#& UIW )u56o7^~1(h6REihs(*ï|l7w긫,WkBK`H!E;RAٕ)E`p;ѥE";y\pAgwM%NPHĘ=@|PER(#JYS}Vk\Yr4xUy-,.Wjue3p0g3=E+ #A|`2Cj+2#Y6oikL2l0{$pfsnFXLA9IQQ4DQ9} .lhpK;bΞ(LعFp;ra)?4> x>ߓ9{f>ų_eLXn6#fQxj}{Zި[S,/tw\r<5%Zx`ڜV=5=VLy^"k>/yp4rZxCށhE M^ho +GqmXSF&1irV'P;:7!b,ؐ55N~sPI4SB[*AԾpGMb:4@[j΍ W{{F:f2,bmL9tPUG*C[VbkON k""ܯ,\`jdRrҕ]*YH[Y[$i>i5fJoPTU8[tMJE&ʓƣTR =҇mR̯NԀ=r(Zh0Mo bMi쟱spp|0XpũvoYM]KҥU~g;ڿ V!ž":—0aә\0lQ`qGXcW\kluG!8H}m 0~ExŽC쌤 1"J{lN\aT29'VZ^40}Ó&',SGj]q'95na냌.K'+ M3a3o9;u|^{ xzֻw˓_"oO\ĊVPחĴދWG?lSoRoFO6;m/aʐa(]Μ vZM@B4;^> )9(χ {f s0N;;} o +dH5ݷ|jITׯ.J `A ĞaE!:KR?pij!?ncZsA)LK\$e8+,kP%wb]U5E/Yv hVް.(2w[ǻ5w~-%(01\kzW pSJk"]eAL)L)ߕ9 [ 00x!%\BbQ1%\,k\Z}Ew |E|=5ʍYךbԾVunJzPLA)#+q( )nEIn\(ᾗTVRl;9{`g Agt!(!K-ĭwCӅ(Î ;-{XWyCaA/W1[\OyHK[f=œwVekU| |S<Ɩ0"VSxcqYbȂӮKfzÅŒ(]q0T&R/F Ymؤ ?1=@}zq !-j-OzyX0rek5鍁SKsƢN絼AtnL62jE #E?]pLVݠ2f֮GQI{^b8Tt#Bb=y6 fC@]z8P\tJA t5 ek?2 j=bYTf>)=Y{JR4_؃-cbgN-{XWǝ0EvTΌagZfm]Lw/b@b]uGG4N}LRމ=dOkgaEw,2YNr/_9ישpx îMts&اYat^`g[jmlm R*#ֺo?ӄc1) O;-Ō.n.ChH^9)1iuGD|Uk->]5صMՌ %X:D'*%rItdO)W~0=[ҢCJz9\Q`Ψ6h-s' \dx,%b%j]f1LgcEg6eĊO+|Q/t]F"(@nQ$'[|bTT*2hmԯ;C:h|^FE[QG 4!Hp Ґ!Y430@18DLaoE 2(8w C30t 6L_;Iɰx1^{r6,Q?EV%0ѿ^_泡)HZYkH3A[j(p BDoW|M8gp6w&B)S?tcߑsOl%0($3)w?~{tҢ=t)B3 { { BXo1aD5Eʹ]KH^!qCm0Xcj,UR込zHgp],#**J--352wc&jsu9,/KlPʐjjefDV/J=j S2tM󞏢]p]shRBT'HOܡ e &NX &md(*K#cLIrB37a >J,V, ]KuZe}.3w(ڪZ6چ!)HDfe3vJ;uPڄ]߭96{@(1hz1ͮѦ@sAkYv{CmRXT;%ikQe^:اO.,+@ȶ\Seĕru,ӞqD/6Ҍh8M;>O%]G(#)@#]C% bfxL#|h|*>z>jt5gx"1 /sϭy#0QE431ބQ?zsRށ8q@nSy`ƒqwcs>I^D8Uw"(RM2\ND%*eB=O5#(@< dX"jbiVMF ֮޼]޾s>PJ1u6i._(ߞMf[Fx irzmNeq'_֯V7pNf7&u5P*](g/z?_tY|88|Gxl'Ozg'7 g8MF>I%OpCpa"44'jL,', hϴ*4b=XM3+"nr B±GIf0eZ/tTM!GE;r qz)RC` 7`%v(/}g6A 1A()+×.JI/_Gxоe0z9G/dsFG䫉kUp1wbzQ1ݒ,`MhH (LUũT|̱>"҅LV!]Zew3{LGBAx;cigLX$3x; _F@"! X5e5c0Uqo_ nr2xش1יBoU o6Gt2I&)7`Zc:f„b8@[\0[jA@=qg+!H?6zg˜ڐ\Q@rWQ!iY8xsdZVң0޾#C,%(jD^b5oRsƣciDq&&!yeߥJ"UU&̯G@j{`=~]֨3FSvJ4RZ>A !_89*U ÒQ-C3zc3lp5] 2+6ߞLL̋} Ac>7mm2Q&8ERL@, ^RQdh4j QVKEQypX<ܺrfb$!~)VE?2u@_!b/cVX +ȗOQ>b03h由Wү,Gww DT{VwJ:aeF&ɦdAj 2܇.wwD/$pX%tP r"l6:/Q9MKyg>V~&urrI ՘k JIT?b 5"1A` U.F߾ۨ^t}: _P{ r ՌX"*"9( y69GLbOx^xtYȜ|cDY5n1U{us(8B R*}4b ?tNT_a㌞o86 XӢF&^߮U׀ׄB[ښvk}1#_|!Br$0P/[O*^I ͬվ76x:E5,L 粬|fz\P%=AD?;ܻ8T ,i ޚ-I17Q)PO\~`"U˂^)N\0\<)\Ƃӓr`!=`6Zňh %I5h(Z-܏ p,Tp ంyMS.^ &̋҆m2o>QtBq`Jc.W5BEhO:á^"-S6k EnmhCl.֦N/J&PDbBUHn:%}}m]Lg f'L%i,uv:DA}r"GA@4eݡL4;m6%\T{fGI>Ɨc:4qj8Ɇ4JJHzòUbBqa*/ۄ)ˊ4P\_O1D¼sZbAXЎ((DY*( A+/i׷E$P_viY899˄%ELZa 崛͌LļHk+K h"'alKHJh oLfįˡ?8!D tHÇx"b:Dې#VDgDxbq} -~ԝ\Tנ4؉oL]Ż7s&b1 @h㹷)WJFԟTcX~^qOӧrzl25\/K{+K˪W}ሞѭ28r'w7?E%;R tM3q!e|EZ:TVnlPu1V$~lK6tJpC/pXx[ 7)WQ;1 <&2*̳n__q.w=s]j(F2Q  '"vxym tx8 XTc2JbȄWӕ9N>=CU;;ǓWqa6m6 ;C'ŒFo{A76)IdUsޮ^OpW`K1PBP)!s*JwI0#sBj%M =~bEwkEݗ!,'X5uYZ8OMBqvBU 7^>ș846C9pbfEoU*?CТH|rd)ü-7 {ۙ{kpatS땈 kA|%HP*AA`F,-K2jB Dܦqs-~}-dakU*M8ZY7"f#,C>jG_IF*Di99C|i2j:'bU1͝ThAƜ8K4C>w`F&YU`G4JZe2I''g{óó=LcwaR)+h?d6ԲT;-4>iC3ԯ›,%# 9hAK?8 zL4ڔs!>9G'ѸY-x>S'`Sh^ާ'APLI9YqPSI<.`A((:R0Za4XQTjV"evx1Y/#`ST=ҙ&*:hVLZLQK#m[._V>?QF3IsSw3qj &hp#O`UC4OD佖r 5Y,Zʁ`73ˬ̂O5O<*3F+5\ oϳwHVm7f#{ߺR.exdmE&̃)8j)Y9qvcyӲOJ+/jb_|>{xK[lg1EVs@ o<\Y$(##k,K(1"<@G $\0曰UL%H su^S"T2c.4䴆bgZ0&RnCVcC>b'Xm򍇣M`DQT:2ǏѨG[@Ne$.!+GɱaS}D6 [㇒0xxblE5)$W yadC&/Tg0tQW=t  s1CVEF"Hc%JM_إ+䣌brtB2]$R'9>2zX;'=`ygV潰%QdCrq䴵`.I)|1F=$c$G. ).x=J!2qaC0bv1e8αDb|ȇ斚b yxoyZߞ7Jn_orjVűu@ d(x;xA8fJ ǣ w") L‹Jm.p_f_v1>CiZ$] 7ThO@M3c1 RVq!WF$TDLZWС,}@VB*cM) Yu-=fʍe~/k׸L0N0Pc3M,aG^W)<u|hIlOKOd,"ʀ%rTJC/]/]]K2}'h: Lϱs E`q8ps_bj H]T.{ \\%r鹃$GPΧ]2,B:G|D[%)ܺҳ2˷"l>*!X5dR t6%u̸RʵsѨR_lFiQޒsW4#Uq\![#n).Q"Ax|i`ٜf}]Q@mv綋i78s/ȓ.c&I{)@8[7 PDSs?LO]ԣӸ?CͱL_[G="NW݉׆kTZHDds)Qv> zCwfWURAjn-]H)<@?5&fү{yH@K*>IL촵Ex.!>j(el >.oFe~|5+'N)ěM\'F Z7xuke/aeHYYnTY%۔U&Ŝ)vi*Ϋ8'b ".P( >U QJTν_$Rhҙ z,^ڸGt0O(_XY7)涫0@% F6q c0X\4'l0}k.k+1x%YvoK"OC>|LmA# $]S%"Ȥ r?&@[fzyHJV+8or W<` R_b2@?O=F?8eT+~3 C}TD(8dTr҅O4}q/tt<$#_Ј.v̽ )f  u/SsTJOmG :! '#PѢ($~NLb(y!G=GRcDlt+.Ŭ\|ϗ 3M5D0OB2UlRMNTQiv".,3L|x@=c?[ws\_"\M E-EMtr-|ad( ٫5Yښ[4zFʟr (Jȩ/5fqH*N"k,$S$b|()[7ӟ{ ;rdur]+EB?goFߖ_d&@50yk:&l`^N XFxveR.))&_}w>5̭dj)b/GZQYhej뿼wĝZ>`0Zh2Px9S?Ecg@B #q~罝r(8p:pǟS_j 8skZI+z9K X P[!'H2V0qň'%K!GJ20!㐛@4$4,QW)=7Uu #>9?_+|խݣ [vz:niuZhok _\/=A;x}XuXwU/xfNi"W½T}qҏ'` 6UOP䔗; ZJ<&bPwpsZf@&cA.pU'LJ`XG(-Gʏw' PY {{"+v)erع1)%lL2b?їSGG[0&5 W7=Ƭ,%-ΧSy!Tў҅C^oS.="cFXRJ'|c]LP PT2FT:)|)a1&t70PJ2iBZXRh6Y"r KҽMrSňTF6(6cFP23mn)>̜h 1WCt7CwaA҈>H9rV,1)돡4 B2_E!8xhi Ĭ(woI|.fK*"҂8vYcǨ6:Ն6둲>`o.[3Z|pߐG3x+jc`FkCEETY{p=tD=-XJpEK _}!HRqDksS0(zTGJq%5T8 #-֧1i-%Ѓ%%T/޼౶Dvqbw<+ X~ H[?+OV)1NJ7@Rm׳0O#0tq}v遶PlC= q"[Etwނ#yK87Y>uȿx Zh?(y/eWQU 拻!mI>(ipD^K[׏l`h-nR ,ƿmKJr҉\#pdd 6BmFJ(4x"QTM٠.A/ha7i QEL{4{`sqp=*M8{׀+` '7B[Z#9h@gAyȠJv!CJr*)Br7)q\{NH(P*\d.I怒m` RqXs:YHٸ 'TCV/@JbX h'<>+#3=_ЕǷld2=؈=.9|~-@'H P{!C~֙sK>1 [ '!\]1ѝOUw]hzGǽӳy2^ 7= {ޢ!J(|yШ$!9)5 Āa*J GƇ%UeP<} zQ)IUWΝ@J#O^j"mv2̙R ^JيQ]:(Ȣ"jg0*mU낹Nw%ǘCqvun`C*sGjdTP#(ڑ;xZbTމ-Ҝ+2iH&iA);] ,I.3t 3?~?rRA 4[Fq$LL %0k\;fs[E~qكL'˽7'[n2=8.F2UTR C#7 eDwT1;}YFvc3 4go?r|t'8Y>}A'%XR~W8y3ٍzDFi# ?K4fYfTwZg?Tl2Jx1IEfCa[!A6glͼI\72Y-Z0 0&}jxJUv{LdzFXxF(M??PJk+7<%\Iq(pMUW%qDJ֞1]7 F5_޵Qq60΂C R2=TL k:<,D+pzvz_GGz+ɲM[lD9NqyvqN*jފ\Htzʺ1w# Or13Ork( JM,OʵȜU )h}:2KN:5L~:[6Q7}/O{R4,E 94D0ŅxLx1`;6%yyΠϖ53YΝZ"V~1ìaҺ.䅙'z)弴]g}'|P]fkn"䰺1lNAesV~YHXWTE52r63ǖ.aiQ>sYRkR *Klz|*UK.+H]rwٽ ߣ0g,C4=Ds>wmf(3թb ֓Cy ӽ*<*%Bb rB,-rz-:[\vd xos+Z߾b8i"'}s< 4U+Ŝ{1_\ƽ*fӮ͹'Y/c-P<7[_) 'xүQ R):ûy$]M.޺FN{,F#X,xKFQ3gs0o2P"k'Lu#n QbyBgBYqyhVLڲ̞Z/2oO[iVSaKy2Obr"0fAɫWóóx7o.*t"ݳjRJe04'sL <6 (x$|G]Γbǂ7`z]ŴSlxj`E{u"dSM/Ȇ)3C&Dp8Y<9k(V?|uxy'{_1_9nE4ZT͊[3FE0k>^ÍbCE=6Sx ZޞR$BdLOt4<} ҊI8H,B@5(WbbTؚӵ+~_۷C\ bRFi$c K^ؗѸwQL5V6Z礈0|"^}d xW ;ULCjpO/50%rN-AwmV<ݓ/f(/,t%CuVKeI!Z1_P;q<ƃa8FWZdŪU C|AP~lĚ 8{ILfP;+Jd8rUA~#J[}㧅&УT,ղJ| >O4ѱ~Dֱ'fjKd6%lLN7-6:N?0{}/֒_w*7PfȯN݊ >œTz@`g&& 8.,c{jmv slɛO'?Ft+kkb?ޜi%K(ς"aT ffӷx'Wׇ*l\ԚɈDv 'e/wmL o)6M_GO\NN/NT o~A ] | ͺqgnB'vǟVڅ~SO'gó׈}o5MF"7JNu_Rvۥ|SvCH c֞GXg)ʀy^j^Uc3'*{m2+Qu|W'3oݵԍ_Q8W1F` xʥO= yzf"6 ۡ4_ qErk{"2MdD2+9 7sy;U@ŨB2Nʞ& @k0ro2 "ܢ ` pŖ)o5 ,UNS){KEҟ` [{Y2fP`1+Yg &XZ"WU]lYӰTxXCh*$1;n +OǖYdH;%LF11- CRL͔ۙYYuMB8x6)eZHj J?QDM7<\ #X!Ft#Zrp76GWΛUj0(9%0'LQ'GDl(q4,>`Մ+MXsM  `0 BD|slh: 3[d^U m<:PgtclMD@^z񗑥 %0 c2[.&[ Hدʊ!Zii ;k>N^"5\pe&+-\Xzz!KhA|jN{LX,蜾A](re# KFa+3:ѻiT9! c^xykb`vp(V22o߳r%,|c J`Fa3Dq(,$+@_Lۗqp=O 5Ҩtv@kE- P[ U "\0|SJ{K 3k,q)BH⥖׆R%?k!cДEō_X}43iQ"{3 j?k0MתV]|'%3J%\RN^:< 6W|RJ͋xuޏaKa1x߇ oE?E /b,/]ٱ>{r_Y7xr(6 Ї{s\|NL04 ctd ^׌[T} cpsx(&te\A,p2D%8+Bs1Y`4 &)˵i`8>ȭ[zUZ F>BM8)jo^GaigfAKr*!plB\Aw )\]̽1ǹVӓ`Խܻ8|}ӶQ# #pa C-(`֕w~o3e?r@O'Npr&#^ 1x4!$Tzx\nв~guM)O+;<##ӝ ?ڶQ#'tuGZH೤u7;k B8VPP{þg,էhAw=޻mC xXdlz \{^6.NSel </?qb-=;)zUS[ wcϧdȋ}Zˢڡouس"$AUk}~ ;=*3WYtLc\A5;r5qn8\G$g ~,!bܻ0ob,=3\b8հx9 }F AC#ȯ?>џ8x1hfiߠ \ ]'CB;Pfm@:dVf ʀFw^Emӕ+~8֋ωeH+-a-c&Ȟ`AE-2Y$a[Цۿ[d#ow$ m c*'}%yeٔ2 Q>߈C9PwtcyR5քv©ה r$R3DL$4rNi @aw|wn&ǑuzswG*沒NX`T߮9^E8/ cڻ* 줠,ifV,1?fʢ[x4k͇N^Vjv)a?ḋCRՇysp}k~Uyj5Kfme 9l?X3, [F$O?;;x}HqxdFg+٬7;Sև5;x|76hQPGIPev'﷋ w[j$ƼW{ux|~Xe7ӱq .Q#o^o] ns@CtƵs_#v&{|;?:d]t悃el𜝜#՞d{~>:>dM (DFZC h4":8i5YĦkVF38Xd&1w az@ >}cM1,X1x*@{#b0"t*ڴ`^R&Lm16]$>z+PMh3nbOT޻wxrхH`rNjh(2r6 JfH;}]=vx/o67w(= Ǿ-R\lFhӎ4?!E/ш s*w@%H~BH)p9dc($֝*u 7?7KsY5NB0eCTG> F,'R".!|+JExeMp¯I$[$l@6xgGء|hC.hYJB*Wa ҕA qq'@T%}s//2u&ee~n.S(sbJĊ\\CX0&ˎ;=_6(+`-g3xdh᧠%5 2?:-W«c,ғ^ӳ?ys|QYu0&.n֠)u{VRl>i^H6!٣8cGW` 73q_6hb\>J>˓Wo^JIW AsPVqK1MH ܯ\[ХlH31C|$mÌ2y`f]d=j{>{lli)'i6Bj@!^EM9&`d۝hAL=gѭ 4^ X5@:u 5/z:\EFD\ʳo΍>Q nU6@cHe7ꩧ-J[@ްUIHiEI-$Hz@ ]!wH^TK/Qo:z3T`} E-{:TۓH:lh|EHXm Lyc3bZXå pa9Vޕ.T]Y\iEOnUt2MLϭwd!lX??Z&Z0k &ADe.GUrTވP+obxQ3VN4Eů gJ"r摤>|U^#-=( /K*Q坪&,kl_͜ɫi4O! u#;]ѸiZ,}b-B(:]q?#l&0LivS >̱ƣ1K^Rs.=>ףd (t5o"zW>``"P5X,F744݈(Hq ֭do}[6st=uH}}pNAkkJ.c;@PhU$D{iÚ1?ͩzAٽ-^ `4qʗ*1'b|cCk}mm-)_!b? ̑+ _L`cvٕycб\(s'lY#85?R OF=-DΞwԫJynJBrWku# 8EiWd x躊Aď d?>G&O`d OGX r5zufWyۃD:_nooqƵSA+둋l܁'u!@2wbxxS @wJ(ұÁۍͻ O߰Uxa lѤW|A'nw.t$Cn#sO(d|̩ $?'{_`(|wwXI}bGB N'E_cȟJ:QAs?Iܫ)u{aQ5!ߗ% oX[ /^/Y&M= XM`JНEc|(B[kqgۑ5U=h#s*S D, !a0Y1K6 T/gʃq*88 -ajfg jlJgS_98ܭTNilGë}_/yŤ"cFau/'̵.? E&凓2pr ɇmJ[ ak@S$ x*yoP3 LD2kYϪc??>_OAr+i `x?\]OpR.>ة0?Q ^ף{\srD(kѓq(.C10L!qɨ[ߒ; *^Xgwh6>ObO+FGܲ=I+6Lu&ԣﲶq>oNPLhKO<:Oʕ ~&f&ֶ7XYXlQE Ҵdcr%E60DI]XGt`hfHH'8LD9ۍtA u-q&E$5pY7<|aIzg/ + 2K \$NpǨ3vηN0F(=# u ЕWxC=Й~ {60+TFz[6_W`bjӟmulPn),v3kOo=wr9?1j ܐG]phNpu*YԢf9ϕxwg8>8\3貽yqCn*N><}%ip{3Reݗ[*wjT$؜&{eU2ă*^0ZF4>}.^!H'1ḓ!]8sv‡%:TS Ljɹ<1ޕ9{%q4nT#R 4\ ,&)ϾC=ҳ(ֈv〜 JOxU,z*RxAN4NI!:zI<QMvk57঄0/}x(;G>{='z}LLCIMNnqt?q"З7Q#F&qӜ_[MT+@Eeht&H1svxIqii!7̗kbl햋֣ƹM7-};'|!\vAӛ=[WpB75PYN4؅]4]{o (|ƹ8_El8"2Yșḏ^UFIt${goN{)S>;8 ,pш^!K~rST7J0q W?+B ٔG uCK(uZi7/V)e6lmo}Cs'[e6`/^˒UMRE.ߥ;4K?&"UH)zk6 p>U6ʝ7\\.ΎN_H6i'K(H|sE+"(}Gkڈ!gN;ܸoNԆ7,Q3zO&[#+kA ڣڈ0h#4Ënkz=Sn]& Bv KŹ&$؅76?_ڳded62l5;vI?8o?ED1c`O$ ahFs'!O懱e_P4fk؍W͒EF PIo04(DDhpG3RtT!z/iܽDs"6_&0I$$FD1{hRIHjJs̟&RG"Ɣ<7:3b^E^-u͠cpU+l**6ԌNU`O]W,ʜB)k۩E?P])fM42B:)q n]ooTe= s#4MrjWȝ3&'LGr& ` IY/W edgkkk/)Ax_>*I*nIڮՓG'1d[|ǵztD-ngQƱ ߮<պ&M*rDynU.}ZVvEXw7r^I߷A䡖:Gwٚ"֒ILМ:$"5ȩ'YA 8=Ad8R\e<NO#!3> $V)-p_&8j_ rJ(R(%-tT;(-袨K+N-0 h.t H:fKƥ, ήbI)4$ @K^0Obo*4x?غNW&x>f>Zs`NQD}Թ(- ǡ JxPХ=9 4k5+J5>#R7 m70nqUM‰=c>h6rbcTؔnSңӟ&@W_),^F\Tc?^@:b 9‚2TnЋo¥x[N(ʓCF'`Fh[a4ExncqFl{jd-sd8$eaitŠg*>y;3#n~Kv墐EJIEK~eT m'{+b::jt4cH'`$  -<7 =hTƌc^RV_6v.ż #lkw(JXx`=!ȗ^`35nTIzB ˵^?iPDpkO`;~ON<Ĥ~Jۀ,- B"B_xXt;syI Cr#)$C]n(O<cgWo̊q1  㱸5פnEy;7f8vI]2; yq  )]:E#vKj]?‹ J8߫f>~Cu"rOC! =NoOM'C.yH^@ZipJD 'KBTb]J$D@{wP!s^}Sj:r5 n61Aw)^{**?-`u*HBqQ >^F&ݺw0T_\OEizDx?@Q!R:ĥEt$m'zuAb"/a(+k%oj(wJqA2ء邴r0^ kULB742cP@K6i`S}/vE {!O`#"? e-+-ڢP```}[-E^rus2ݰg+NIBU J2P_񀄈J @$GӰ8叻"3#aI-jB\ԢxD "b`6,k<,{$\\^ /(nP4''eqC.<9{q9{": =6v>6^)p fi[kZ{b鸗SAh)wCI|:JjZ W$L΀9F 3qg4Dk>xXSE@zw&Z*0ôO?H76+nE7A_178C7b jEibjgfoST[0Z0"-47H7nyhAgEp`μiM5#M6؏D<f+ f+dY:1fG:ߔm$ (Y{#ڊ8lɱ}[A;m%wW߶Jxu*aȘs6$ eKN=ڄ[G9 SkXy)[#W7"RPEuۤx }Agk83IUqa ަn%q-O n &EҐ3D Zq56PNxH+p)Lf{^Ěv #>DQ/ b`2 -Q;oJۢlHq+IďK߭EB&fzC[ u>*P(jeixi"܋SIHZ{1DBZ;GG[bD-A_@.jPwd ͼ4=gǫQU35i+T D+=|W'y㓐Iʍ "$O\BI6J%-R)R^}o"H=]"E/D/:KP k0P:<(&'@$/OXC4Xł ۩|:hƀ93uX꙰N XB1fd< "6@$k΀ Q1A4 +&Nm!K>]i Us)dvJ #M~Q$hӁE@ ^dA pNEC&(ՁRZ|Afئ1DjGu_t81ir̹?"1VnDۭzIDshq=P{c` q$[pm{:,> Si jL1 j<.ŨA7 n~W?@~֍TI  uD'Lj]sWkJPޅimM1 0Zaw$M~*cބ!g@$4 0<eD#OD>uM Lta|'SArl\ Q0+&d:&ՇILOٗ1Iʿr(fJnWWwʒ]>`)W=L]d/$u qT j g9&m$ik0Zt&J_nMeM;ٱ9u oA(`\G}ZOhz1/>t!Sv{!9{%CFgG"O;?8:\|Osa ij'F/7n+lZ}JiVc8S)B`ꫬK&HZ*1AKR^sp2QzN.K93sa0+֋r3D/{!X-D8,HX>[_ ]k0w=7CD)-(Dg- #aH U {B.3) 6cD.M|2ŵM)ML"83o39ǣ`²R|=s$uݤoysR݅M6\R~>yjO=8ۻ8vONꗽ2"eHWǓw"s2*=j& ug?r5zX>>D1 ;b>EwکkMPn!?@%UݢD{gΨi4w ǿdN'oE4=K .:sLw6svW2Y0?irO%t"&O b<^ԻCbGMewQ#;҆Rܑ~Jŝx%"EڔU Gf9Fet2 ?|~&ɬbu?D?!Pv~Lg83;ǹ|Fg9kv'yVuloJ{aY_qW-wHєkxu7YiO ?.~Lh^BA~çMؽ> =t\ R[E)rG$(1alj>EgHޑHwn-zJAU]1{7h Ë|#|soHH )L[`aخ쩽I-&t-0tM5Tyx:wۇrdz7ZoKGs.M936-de oY( ĝ6:*4x3',;dƒ)P#d^VUBՈ;@5׾-b(1r>w:u1=D~WHQLPdG֜\oI\GI2=$Rk)6Ȕ.&V/7յZIZb,yڶ$AbNcu2aT̉(A%d xCY[% v=9y݋p_"Ŝ(nEQRZ;Qchܹ1ҲRf,) rlֻUgpfZf}L_ulX*ʳ/{)vȱ.XIt?LUO\.S{[ࣖg)P0xQFbceBD^/4bNduU۰y@y{(?~?bfjt(GeԷԍVU=ɳGlߙ澵PܲƂڗp+;8?Z^Zd)RW`_vY}LxBbY:^/FF!"Ǎw44cdxpA 2C-J8/!BGa#G|x uV!H#J"-+)}I рRE@@+O'M-/R~aKpe֛R)gl]x U]]Ti;sL]$/"I?YhǕ% HVv;lOܾ%[dG xPWR} H| +)@7aA!~M魥hʊ𞑗7R6?c䥧DPe;ER+E|!qRBMHlN}B <$BH _qA{^62wь^ntΛ%}p D +ُEd-p|S$>I~w69//>kXVeOiN- .}Z˜3w-&x#<ogc G -.8mѵ5@$!s\ zy~tMcX^doo0K I*U09p\L@-!A[L8ޚ?*[$4$$6&*>Ý)Y9bʣz84->Sy>˓\3I)]?X)h l/\J%A=ϥ@$vI.{2T"[ ]'LMHMh ޾Y+IBN%z0of!hxZA,Sƻ>.tet\FǕ3D3kR%1o iKL<~} "F+UKt#u ڀ! lshcN>ьJ2~ϧñF@bș𱒤[&J aL `ְ7Is9&v5'eON/]::K>$(MV \{.W׃;O lȳS.oJɼ5Dj9JWA8#@q0VueVF*SL!*}$I+ՓA1C()h݆&4M0zc&̔*v ʼn6bBv0Ĥ4#Q[B!RB#\@5MZ{8 ѻ g2%>`@/Տ8j B99v'hӵfcHtϔ)=>[e7QzEbwڷ?{o6 'J2;ݪ8*ݶS5}y5DȒJS/;ENW, pَL \OUO.OOF8Yh6 q3(+upK]csy"`>Eur崱"P=# UK =o䂂"$xZ CéIa,b9ImkF=Iȍm=S !7OOL})KY Zbc0UM?9x :͂MFl$2Xhԗ{AIRÉ6DƘnKCiª#"kK`s⡖=@ `&|: nTL7fr]):OL p/??&~0 ŎK^<\T2xDrW+b\6E%/#lyi?{20 d*%RgcٹI4oii{!<}g`,wGでtOޝgO`75z7Xuji],@YK`:y&X"V斊^+̈́DM1rKhsiVbZ?,4KI$u)PR˺7-$.?,@5]tuZB2!">'AWdrwy/{ T\ k<) ]*(*`9PD&am4GeK%瀦FA@\Y^۟ż+ y\Z'g](h>?tE*}XаU q vwN t188kVP>tWM}k--5~qzBB-d``l ́OC֤@,]}&Or[h*J,zY!|y&0'x (7w!6uċ:~ QzWiD-^ 1#pڻmØYYB[tHG4 B^]f~mitp &jepv,Tnȉӝ;N+l&yu y&4XGW'`N(l!,g'Qi&<*<Z<(`\ԶjquTd|b_2"U{_(LC#)3H M! (Kb0t94 q %=4'ka'1.@>(*e'4~w:Ĭ,# 'WWhuTgae}{Lܱ)S;%)I2WڌpViJk;Oꩮ@{0 M6Wp)|SJe!?bc,KUJ禔rv/ 5moN_*NoY%M[b 5?pFW t*Mٲw?+^;KeؚܳoV?RJ> 'uRfsU"\ M.xDI$o~&~vgPkjRg orm]P|;$NQ ŏ/޷!S.\7"2p(=+YMԒ5WWMӇTi d2>K~GX>F۪`3FEʜ;ܣcNF φl`nD[mIkqiЩ$$X9QLTYgqݕ3[7Miߍ>O'a7۴rӊZ7tN#͏rCYQ"%Φڔ4 F3$ 44NO1}df0PZ̒A"VoQM5TJkyOq&˧TP፟ J3N /wTx"O,]͍Od}60xN(>%7)b1d5l0 +5xy`6yֻa1t]k{3r_;-k]j?xjz"?lOcyp&73_J4ew_֏W2+__3uOJg//-='@ܗ$7q_JM]D}5q_BU;p4Ɵ)T ,]gܗJ5}YIRǺ,q_CUVi8ZtUҙgܗӪq_{ܗS;j`a^, 8_E4B!8OdcI;͟%5^"Q\#F~ @+Sb?Naр`v]0IїlT @0M2 yJX,-_p(T51>M(%u 8@ :\I^(z\#\SS7CNq Uω.-%k>͕Yyvq&»[2׍j#sO_ѻ"P/#(xO'G߽z*7)ĮEg"3y-;ZΈEQQr7A C;i\ˆh:|`$6?BWU(sGYQ)>iMa /*}b?/H/k;'6U>@M(J-^ߵ{Ëvhd4Gg궃wM܇{Erd߻:ofԥ($&lp^fj+%cXd`'Il4+ۛlms4쑒g.!q:[+!H剶 FU]*kd9cQ|QOڶx2]Y PttG|V5 Rvc6}abc>Iԛq屡 z,XaS*z&F,FS 09wJ6 /qȭ%B>۩oGh`u[WDJw !ͻLIj-Ldx ^a1w/T޴06WRӘ/OD]"{i(12tT| .ge& qJy>>oYv_Jikh"piza!%Bpmͬf\Vp[eJ![{W! Nau۰-fQG -˓mtiŒ镪46nˇv4~x'<@XHr8bfl{{|;!D*$X%UMZ1][6ы$3QGE܃?v{c5 <ϟ?@ hkj 蛍4B,5" UҎ(PESXdM̚ZVk4* W,bR"fżFMy Qg甠\KK6o(luұ>g5ڬ)H[v~fJP6Q36{+Y״6ԜܼkHn΋sy&;/p,ⳡvwJr~mcfhVbgA[B;s|ן7聁u6v}l\N9I*l@kTr:Sόѻg'GDQ%ǃe|@+/W@XPse}ij 'E?A;C*UݣG1@KN6<+_qG@+y2Ѹ6'Oy?GcԚ>  ԇ!:u>}_oo).?jBW; > ̔/;wkt/ %^It1KpJ;}`NGݍ%g%WQGہJwk B̬/`i^ڧoV&D;_3?oJBrnp<pnڍ"T<- nipϙo6̥tgFp=sw=_d:nO=_,Lʣx2B՛/"7/eA}񧤍+ L\h|*}Rzaw!i:{!0d׽ 4Tm! ieM[qRCͅeka>+ 4S Դ-E\Qhj ^}4Ε^34_[7GIWooJ-ndw%?nz7NךDnV)?}%żCLLBLp|7$[@TqK\ !} we#w 2j1TjM -ϘHP?Y5.Iby%[?='kˎQ,|9!2U6e`)!mXbU:zRI*$p/A~*cY3#6a=uVlU?o6E20_t g ^p2(&>}C!,:1ލnBԋSc8&zrW6`n F~pSSOso0G3."HUґ6T<ؤ?C<#9 H\P4\)W]QX*(+*YJ-]&3)&g_5ZH \,̲TVϲ4}Lk}vҜcAQ_0!wHWx GSPy4r;;k}}k/[;,|:ME)#>K|qZyk-kD5$ҙo凹 mX> 8pf <Z2K[Ҵi,9C%ukbs,em*fȭrEżI& l1p gXcLs58DSN`w4,4Mfg1v9um|ڸ}ymO *IG egǖ{ck{swgsw{wO[;/6v;t|jO8gck-YS8笱NB=C`@ pjiAo0GutfOX?<`Fj㋋hE瘐fF:uHǯQcN o~ϊ==MtaɑBRPo4CJmzY#*97J~Ld/r|c/KhuJ3 TJa7 ^dWat]":pq5ź, 8+Z q=D^LLgnBOh癉DI:s5ˁ@ųDiS/]C˯]} p19׋)KImOP9}Zk=}N{WQ?:Z0s解gB)/ʄ•ɊtdpNDt7Z+="Ay$Zr k4Rq#!eF$҉`j݀}3\M;kki?[盭\s:v-3km<;NH\=}/v@_wbLXU-Oj9BWB V8檫sj;K͕e)oe@]vuybhdǒxG%P1ХK\m풎f] "k;XҭI#>c,c锢]umԁyI,yM&zɶ2YjIV4ŋn*Vؗ ёn:bO,wXAtK/H镠DC.^htjl7J2HE!Arh#<"ݍN_A"7*w[֢bfeE`>U0:v{qҽǫrJT;ǯR0Ā!pZuhrR4~fD61\^E'T`k:+0ne#0q?}ڂsY˞>34_"Ia,2I4L&?a7k1ՙT6IhSU͙EɹAB U {WAKNx*$SЫkb(b5妻cx DO#FP/f,NTϟ}a{"b.R\Bi{T.i;m%f>Ewx, g,ȮPniLsuV窱u'wMT)c-+8\@+Ɉ.cޥ%} 7M L]@QK 1q OOEi%^Z桥eo¾9׮(yU`&?Ǥ֍#WMwW:O*uH-~4h,po$furەe[T 걚Y"޺g״Q!UH.q1;}s9 0ӋNxܵ%U,v[\ޒQ7Ǚ/V^d\?U +z.O`&qw8&@M*XGDzu<4%ru(폮8Uп0Xb?-(1z~ۑe-Ĝ;I518C8C-PX$qD X9PkV/1mX?eeAqTMi qT˩)JQ(c`WHͮryjYZQno5?xJP"356A>MOo`i@J8%y,gH529nEl'xLJv6\G|3Y%E m͇lވ C B{uhHZωHHǴvG[(_SrRl4 yq\jÖUfF;Os诋X iC,OVzDAt}ˣW{x@w!o/6tF#-)'Q|Fk?a*oIwG8IҢ9M譌6n+6@WK+';)21IIハ]W _}B[d hgc>Jӯ%]zfw?H0~?c`=FU8Pb0K@r-|޺xQgIx,8٤w?a\4 Hߑъ:]:Q`E ,`֏PÎNv:ho 6qFn iB>d$R!?I$MMܒz~cfx>;R,v{$²9{K&:}uaԞahdxЃ{RaɈY]f{3G.ŝJ(;:W6$L"h腽+0EW?JRWQ#b,f % |X h v?g? (&`>'2 H!Y) 4sEe|y d=@ѵcs['1TF`jqQXaYsX >KcԇD $)~g3)I XG!_bۆl#AxP$sKGhCom>OhMÚ$ qsi6V] )\֖&C70Fa XTse^2t4&)_S׋Ns_Sv~k.5˿S1I#܆=Sh:H_g!ID-yJb,Ljf1sY[9o*4.oh:J{ 5۰Yv}>sBS'n rZ_|*9\~3 aʏN;3"u:l0IKvSUwIvKX_S ՜!f+WXfL{ou@]dvVтJS-| 6k7dmk#3zM6B:K O"P_q@ڱ-c2ȓFZeCzG! p[W'[3^qBN]w0"p[nvJhk-Ն eEi(FI{MxZ^W@XA.*O&#h%.@d5G1k| }=xL⼵0Ek=L;^# `bIP̆Cr{Y&*VY=5zqןNN gvoX=`)G7.3mhUQm.4Pt/ža|{#~U:q. D'lNruݛp4P>)mDѮt/JaJKRArx|JMЉ4}:B{x1Y2 ˢvs*"jYY=P '8riB&VN1 `,Uu5^OeA:~Fÿ8.5;_m*+-?^1$jh/&\feSI%bTG'dDh Ƭl~rOD)"/{03xu.7ysݸsمITICAY]w4ʨwd /s<,NY< < e~ѹ3u1l7Pn%Lp1D^&<գآ6^|1,)XLO*GvW-̴W07aYGo;t?_wR8!҃òBS*IO2&o SPxddV WruG1 0~$ؼq8S#R3/K~|mXLx,~6=t Lw쵶n~2˱ה+:-.g%lYS>JO\S+)'|ϴhZ ?&+TK$qJ4!J%42Ô>ٙڅb&j| 82V؝ډ[S";?g1c !rw$g~e8h`tL=Bpև{[؊44dzAP{-,\YuZNdLo3Xf(ǹFǴ"g kfOm?0t3ޅ!fp7Hokq"΅X\6dE|یnqr iChU/ 1j,F[R#Oí s|-1g̿DS'Jv![Z-y}-lYZ h䮵qV_C/G1<5inOko )ȇ Y4cl )Y=(0AbBtJ@>g%T'kL%YTiLT~ΪU 6 @a1$0Q04ƀ)gV6T+?ԁcA,ҧ _dZL8%q>wGR~`:RD'1A weqI?O?EwZ*W` q>%(rn >j&ʒjM"uwsMCِ%%0"YNvb 'iţSgS*mE#$^ 7 Zͽ*͕@a) }Z$O{4h+~h+LaA^- rϱl4/>xO iԇ_BS(a(&rM;@&rm)إ™dS#q}ypM#q.9*;'D^}.zR736sV5TOD~ _μ"9LCGAbf-V ~,4WUwqC )jЭ8u ewf5 o|.<_—cۃξ z `xx>Ioӓt60cHXL^1 m%̬fs$^n #yϴi3q Ə״{÷o< }:Pj&ʹX-Y#wt9mvgC4hˮ#!zǤf xA4/vW^: G:0o.8>7%=[-lxh|,|pXғ`8; \z!ִI y@ 9G+R:aD`oQf1P5kX D4F[@Ka|N)d.T+ftxrɻnど,I|\Ua_rq˞}1/lo X 5Ӻ:I&RD6^\1//,ȱ{~rnK?4)zZpA2WyY JΚG2QVi0o8ZSNZ[qti#ՠ^B㎨1gؑÎ{/L;53WUGըzDZGk}H>O;9;Gjc iE93לOYfplXUD"CA>s%Ϗo=M,a ;n.|}M!Sߏ=fSxyH6L2?4Y>rhp`Xcӣ&U2n~۬#f#0~~~ {$>߇{[iq}H'_Who4~9+t s zνStF?>;OmD%St9-OkP |n qH|B 2d9aٌFS*Չ͉nL?O(d4F )YD,,O(5wS gzJL;h`w_7FQ9FqvdlZ_:HavʿcFp(cn-A,$Qf5LT'^̤ʹ6wIx"+,&[&ߚMoz7+&Jm<8C4b /7!Q;jOǿCǯdy'OD=T\cjLYs4e=T / ŠL: m'դ@a뗗_C^A5#eW\V#hW dmc7KL5&oҷ]J]94b6!R=lKJ(2]82F^u5v#-9ޕJNa\L/whff #-Na$%sIEԤ"jRVTD[Fm ANIXJqAlʖ\-JB3'|wӲfi6@/1[l!-,G s-(2MŦ,6K&jK'64S"LƖK$*+_+5 me1mѤ mf&ԩU{lF`o7CE!4[CßFÃiu x^x^V[?޷~Qoap5¬S$nnn5EBJM~M{M"tBj02ڷ:4v|^̸hfԋ7jIPjB_,HaR; ˪>'X`'~@뀲ipz۱I6M}Hǃu!>H머/b7 \#T#o>|*:zJ^[gWKW`ڀ9p4分pqWWWVYW#9oXNJ 7'#f54.rx&d`un_hxjUgvҒ#S{#R>#66_l 3RO3[0 dcl,H/h|Eq;yN_Ӝ3峪J\cD|ΔP-f5. 9DT! Kک@57C|B Ի‹ۛ2/sƤ1U [ ' ^HftfD4\c2g2Y(䅆ρ.5gf3@XΤNQ]u;u2эsmbxlJ≮NAÇڌE>6ju*,\KUK02V5%D`x~q0 ]XMsK.%9iT$*yzvxfSOD|A;R+LZc`h}w} e ㌑T,q<;M0TP5O SeݔP)mNz7TLc&*w,'Dq:#wrÍRK 4V(cIAH;XΦX9!SMXqVKk`@qo\@4:Ckb "&1aomtO;۪kc/q܄6F`,)ͩhpxJ@ZNI&|r/./TU67%gLq f4h`= bC!Xni?om/E5%κB+!f~s 1KW7tt.޸aiǺo!ޤEF2aW) A%~P *b*k1Y3H  %rj*WLLە1QRe 4]U 4&u!4vDGnW6J*0Gu~>G=@,eTAmjiRw<bv\p/VdڗsM*H5 "5[N66831F<ᑑ!کQZ$z*dڞrJĸ8)߀WA|:)?֦b[S@B}ĐJ{cիý6ލw-@Ys6ISBRpʽr ;_`.ۇJAs12tS. Eq cY`t:,mԖpi錜='me*sEcOKv.6\)\-]Ī3~EQ]SNjR >K/Uָh.-nSq(4gB2ϤTgRȿ32 "$w1W^<RA'1rl9ARҾWEcdaNPv @Z-'I!`+.M´R*ijn+RPYRUUHZ?)?*UX"`|2*[YH*>ZA^ʖF*eaTD6fv\.6W);;O7`U<k1OB&Nk`oggwCno6O y?ɄPg[P ՘ Sac*\6SaP+e8I9 #+ gdʲWi<Q+`YkV!"C)lЋ`.@F7y8F{4rB(02E6Ɔѯd JSF.Jµȍ8ܱ[6MkH|v!բ+@L[鞨EYߢS#imR1꟣adB@6Y$'=Suțm)fEyvϴUӗۙ 2cn'V*&x~|?o3h6X,$dbJxsͿlkJFA]PxޡpxGӧm$L|Ąih[`;OdJKQ}wZ YRr5v/H :e֒GB?c9 o,j QBCAa "m5# mal*aga،ueɐ>nEVZ+|lҺ+.[}qVfu/ zωNZ\}X:Ĝj*Q3nyKi+a4b&L wskgbl쿋xdoe[Yocml%lDz͒ϝ9+}+4jPƖ_E]Jڙz d 5>OZ "c1~Ӆo~ ErB$'yY`?R[.lJ( x9pGdB_WɅ%4ųYīs4aLv:gD0b& M2ccXlnxܵ$-X[y(bS2-CX_i]S+K9~upP}ٜ[)}dJiPqRvH'+sUKvsupUnWfsY(mvonVk:Xq9W)][ѹJ43Ϡ"kT>/6[eȑˣD_7oW/Ұ09|hF0e0* :Qp^yW)L~TB(lƨw+c`U!EvGŞpCwpk} t/~]o7On/vw/;; BLl 1{ 1$M]F4S# `(<1ٛGeux]j^UfAtF8\d2 Q)5YLj7h57ajA' ~p*ZN) ~c=K.8jYqncIiy||ɕf,X_ocH[=Q=tjJ'N'Pv'h 0^(裵kI!dOcv"1 s 0ᢘ&;MЉ_0&ag*cj/֞3u(Tp DstГ/0g&vk< O{#`zdk0hG-& OiZIgm4Q#5#Rچρ =b_FhJ w{~R'^_d'< m`n T+oqטQ{)K0 bߠ?uYFCOeO:?%+44T1 :}/ $za@ʬ.q3I*Jpw$15_ps|Dp3r" 7k=ȯ%&N6G7HEZO,ٲ~Ěbu'*R5^g [9Ә61ge=7La&_pt; BY ?A 2!\#(thp/>>D4 >(j1DK?[,we-*Lπ(06zDC?i(yvvlݝm^_ک_ݡ?߹9{%{gkc-){ eJ/.n7~'9 -1vpFM xl ot!ӌ͹rB;cTl?OvdNz\"ٯA%l*R0Qq?&DrOn"/}K+m-6/] ԇ,Ꮆ>:`}Ah) }U."Ng6]gCl.2<4DA0ƟOx]ğ)dƋ1OYO(+|UݻaỜǙ w.] x]wt-.5n}c^j2 e918x"w \yb5/l*ckYw-4;8+̐ bY3ʽ{0Uw 'Ze]:!. $\zwp7_"]>]Tt32_X#N5"s!iM"+1:11?Ejrd,?,˗:8NJƲTi}GUj,U<-┩xy ?3HfZ{j6'xkϼ,h);*IN{۫̕h?0־'3k[e[?U&±qMUXJGaYgSϤ's x2g׼wcg{[m%@/C\NR%0)SԭLhqn-UNxAEZf#z+>Z]^!+侚.|W͟qgv&ǍT[L-Sv^O|3%ɽ&r5!/6R~s,m5ˮ/ѻJ9!Vr\r!n㣻k3$HhF)#Igm2^5|yZO6z[[z_yq񗍿n% b7K"k,N+!x3iEpRΡtCI9Ѷ[ P/0DʗL̎kX7 U,k L40asäm U]aqX8ەnqrc!IGnRjSLߘ(WS r$(f 3}.L1ڰ&ea {0;[?Fƙs'yqZ9e'v$TDRgm}+B%xr<<}^?=Do|;\hOJ[ݹmi[埉dZ" s>Mܯ-lt墎{89o٥@g*E%.+:J*P@5OӹZď"^R ]Tҽu[o4ߛD31^q G7àgeOƋb.{t3D^f;hƅޝ* >Th2Xkpc VFR갽չZ^œvL\0LqCnsMWejTd]TKK%,9-ao%K'h*z&8$mĦ`6Җ`=߽$bKĒwF{@aQ1 }[R$ǜJ98c7? ArqG?"\[a*VèZuhq&W!9դɊVBG.|&3C!+7m<]A'm9 +C#zVVjHtv"Qp|?^(^hA䛓zbcȢձRC=|uu}T%9^V߽4UU[tq{٣/Q8HK/,EhpN|I;ӡk?OTҍǀG=HB'Z嫗㣓3!"2v:q AYp.q 0,DNs0&4Ȥh4Ɲ'Y?:OIn"3ZIԼHI*׹H}N)}-glt,x3$\Ȋc7ސ.eWsJ>M*&Ճ[,I뙒#L$J=!Q[kEL% EŇ9\'ԑF%B| :mp)P)˃T9>j'BIԐT i*2BV`ZLK Ə;ZCGymU}3VvnB/ʠZ_U vK-d:ːTǐy~j>4.-FPɪUx ‡ѷ6m˦R?Ϧ2l`xzmjTØG|)8)uGJ=vd@ϑ:óWg5%s Wfʅg^m@^KB(浲X>Y!B^`Gɧt.õѥK}8 fp9}eY)ԣp ٫ RY46~-ruW>_Ӓ_䷆ h[%y$BU\.ɪ.iogd~k_dQ(Cϯٜ$i#0Ls8S⯨Rw+frlxW|hvb) ?4]6¯W.K(h@…<`[>@UӇ'H-|siч^ŠQA,hHi&ӌMs5S+=PZmR`!TIYa_Kme5.BE)(d)(d*hJWEDĬ w|i7*SB 7ݾ*KY=N^,)7 o0N4_?o6_QB@REU 'n|0H e6*k]7 FA(KcZ5):*r5 9h _z(2SX>@UZR[Z/UT+Ƽ|Z-(nD8-/Pd 콟nmWa/*5݄݋Ax3T|{*eH|qsWXU˓priy|AIxd9ӖZ5z嗓Gk%{B67I U2,GxIYmbwoW^-O;:;| b=Dc20E ҡشV_Oin5fx0S@'ogT#h?z!pW?o >K,t;԰ A.F!Ú،z1~)Lib&#ͰԮ;h,Y0?4^mpl#Xs6 > ;\Ū@ׅ\/\ke .+f5n-f۷,4[ә­4kjFSBll#9'Du{Ad3s7f/_ WT{zY{|nz5nh6TKU 2)72"ovxQVp.soʱR3*( -H།U/9 WB{['R1mp[&Z'wmDʴ{O .3K3omn+;뛻ocb!2Rrq"^G:3E (-S\=FAҏ;қ߉Q1##Wk!tgI{Άqoc B~b/뻏,fQlS=)n z&vBW, DT>#؊om17Ƙ,?BBSMF6Dgõ^,Zȗ@LV1Jj`!q?|?߄Na8z~wE&T [H{`FZ6] 솿ޚ@nXK %O =d3)I G9::vqWe8m jQ<ĄNX &h鈄 ?iD5(Z` |:zufY"a) 2>>)uD1XE,iD G _2Ό_8}׭N{ 1̵ ԚZlg1j`*;.0iux< f}|.4m@`^@dt4Qbtod Ju:12*x $Rqb{GoYCti b^QZYp]Lw"ܠڂP/uR3l> X*]{q1oq_Wzik?؛m%p\sNFFrqmI臘k9~m U4;AWNh ˦Ͻ-&{̻IwT{M@E/<,Ru&lte@KT &3!YJ@Fqh˘A^Hl"+ ] pspr*4#4­LhX\36"!IуpbX4~i])q9$-'=u;AD7 P 7N0O f f'v_yh倒`pFqG5Ы7gZmk 8بCGr|tzVynV$G=jm_KWM yds/сt[45~sS.>~VlKkm̟RqHpmL-& HjB5|qp˳%|FHnExX{a*PsZvKu324 5C.#m*2\,2v6PGP'u<'1 6̻h蹟ZQüM&4>9ݷ/{I?5 x`dGZdpA=>J5Q!MTHI8_FR_|g?.s6?N U:0t$ݥ; )uԙFp3apgL_KN~kXƍZ?{PsFǩ8Q}7/e#+>ߖ5J:a ̅L֦d3[W\[yRyTZ*ZsA-4Qnϣ4㳃nwvboɤ)uBMm;ʟJ5#ℾ@xB.%X9T8. nCmq93Y;]5Urovb, ˅"NJmKG]_0z膴1lv҇n!d݆~Je(*)5LAE{x"I-m&´ceBi#=׫-t3W)D v_S q'dd 3N߳{Φ}3h,(FyS v3f.r* INc#||ნ_9x jT$YJJ'$MޯgxZF1`Ȝ6-^)iPXG3,MfC=9!hS]ST$sKWF7 ~ ʄvy6_G1"v{Ehwdzq2!gbx0 f>g}O ?BԠEG2?QQf6? (%d]럷eZݠ,ǵ@K}M􇤙՛?%~$RI-%, q'B&("ڔʃ ?*Wf(nٖG(ۻNTd┱ϸ3==yzGE^C?{db Eg? 2}@7cA1DW7=!$`K]BxR"BPE<.ېX**-/s T?7Ma1J Q|:᝘<'h*FoSW\8|hH,@մ}D[D?Ӄ|8ĩAIC0X?.}*1š!!?$)10 _.2FT'l,GBoL@#K*CHD! 1VU@3VgP4^ FK%8ײF4=W iOYr3}ln*&B!& ~ a ji}J iBE %23a1bjh2eBC3VtN[b{NFAѐ@u*WQ/{b<0n{C-_"ځ? DC"9dByZ!sH1 &."UCDQ m¤:Z;})ՁkDOɥX9cyiGr0R~tz~dJb{Tb:awa_;$9:M.˒M͈bLs%nB~V 4ҚoX̢0Gu:lIKveDR d2W'Y@,i̫Fv:I {nBQy`5$t"0QjSZCŷ2>*PK=fh3m@ZBdsȔua$7 ZrChCU#M)F5- G y~5&M(l=\, JUDveey\ ZqRR .ҧQ_05g |0j,̬d٤yzvxfUBnTW&r kIgœ >j&fԚ+o뭂x?l2 QǣßN޿{7ajR'o~b wA1Y(gs UECu-cم;e?(0yyK Cg|5@Nܸ~tԿAx:)Co¸Kc,=NzIii |u >=I9J*%]qh"Kf/$(ib0HsMսAޤ'`^w:>SV̧yL=胙P6gr-~wyHwExRtOQ23_b&![s磊ǾX,<KdS[`}Y%{ 3=aҔ( lhR~ei4gc/ۜN`iE4%`1]65k$1]Q/_x4]Tݨ["u~ Ll{+4Pw{ׯ+"6wP/)|ў~(@,!6<߶TdVh@\촹4thbJ]BjU> p٥Kۥ:-RWL4TUE]j[]]ړ-WGŜ%҇>?ips_l56Ź(my.şMhڍp.ggQLj4>>)4?PpVOB YV vВM&|1u)- ;)2O s9^a P:Ċu/&$M+G֤v޷%j$ک>F=hNqOAc= Ӥx*~Ŭ~εO@RsFˌ&ϳ;Pst * fr)&@&_~¼x0(r~וEs!G+g DR&JCR0|_7lD䮝\ӳr)uxuO+-eΣ XjӬwk~{Y?BN\nNyd2L_Cmx?_; 1bt6n.U="Z.hgԷ}~qⰗ2%'.Lǭ킓I5UX|\-s0`L!} 7HCS;O^Kn}(C-;5% h8GynVOby+:23S"KyVܨe{rWˬݮA.}ՒWkȫhn}KE"+M)k ;gVuz 0cyGU\lh"eHfWZ+m:+mڸ5|}G.N)()&7ѻHbp T%9ȥH\" +Ai!ƀ7L)85ggK&X3!|I'0>H*s+;Ut|DEbUtb_ݸ:rr-*%aVqytFct6ހ` Y Owy  M'H;K:Xhety\_6zAПŹ;F (eVyP{+Rm@,G_v̅l>l> Gc ͂\.̸n*/> HLN~{VexqyM7VܛHPM>o<0& > ͓woXu;.ޟ,@V0@Y#g~:洇sX29ID9kq']὆ui;B,ygnz5nвk5{V5-]~ 1|R_4/O8`b.{*ƃUo\ζ-+W?YI(PCA|O! T̼~H-iHYP ¿TɀXmj\ '7*Kèa]y^M(h ڄj#) *915q $8\d2 P)3Yň7;p_ MhX' .p H2L/H}9 ]?9ɾɢFHaJ"SMi\PU+g+*"^G@,2ǣd7Qw z2+1:;zuហd92*qY_>2~҆ bCWx"ق ίủ:!#!ղ 8Px\a6>Q":2Ayr*<+SXֱ֛lxش8?d)s j惠7MB~$A"7zf+{Ju=il?EwEZ^\h%u, R'[4\({,>xҺv1ȶ?{j`?u-T.5 <||2R .>{]*.;00A ).f EtlɅZ$ ZҚ}Ō8+Wښݺߴ;}ҁ<55tȹ)%oUW`kia)4P 9S؆y.](w5ׂ69~Ua {׻ޛ&6oymyyX0}:qyKfe/(s`DZIr+u4ECntÛ0x[V5wi&AHYWG$߽P* Qxlvyad9 .ES<-FT6!YZxIZ)!`ijZ_?0Ndt =Tu˗5ꨬx':pƚRQC)UPf)w8= 3N5xI|J3<{}ugc.jR6:79v;{;J=v}>LW*~&aTj|w U\ܓH6Gl9&kB <9~VǕU}xPh?P&7$4#v,NR_Ɠ3wlhŠՋm^0Jߛ}Z\N;FeN5:{Թ5ˆ_02w.2Ҿݠm9"<^${S{2eO}kwWhx}E$v;'~e(P-8~[k{"{ds_`+jr0:nIsm?CU[s^Ӵ]Z  2`8s 3?̗=v)SR[Ǻi<ԉ> L. ؆ءn<]7;`e؎5BDsСVVgNs-&yS2 d_%)WTBz˭(j||LTlfSfay䲛a͊C?trYX$VЦjw65sJ<J&wM:Zx]bS u-u󍒖R!.M)s6FSB^?hh5?MϣegJA!u]rdk\O菥 ?4X!'g24|CQ? hśxeD(5 #r& AM{$9ha}hC}ʗ}rWs֪HR!2p('MƐ8wim;hO\Du|c%|?"$_NoO+{ck{swgsw{wO[/v8ֿʳl5x8 Ā^bpТdIDeJb#r5&܇j42 !44= Pe27__Q= .͉-W.}`3,mk`زWFSx$b`4)*^Ul<0K6?qnG77ˋa6Wh)PIN1Te*5O,sny^b'tͣS<@Dԑ;DZvw=ܓ('_=I*BA\N7٬/O" N-}qKXqK}̽ (<>ǎnc\EOH^|]*lX<3VJ97IlC 8;X< z^F9l%ft^{uxro:l~YK|xZkIg:yQ9砱vH"c;?&Vn o001qjCˎGdgn9vfaK7]zD*sGthyv3|-39vqBVǔ]3lѸdj/-"?chuA=cg?a@+if).bfB7fb'W];AGQr7AwEGc IJO,E! NĄH,l!ޕ5 WU~E4җ:7GWo}_왪+|d$_#dj8juY-ZH4OTF7)-]iԌ+!Za46 M`ɞ;)]#JV]gj0L~) j/40cps14~tc|yjPVcدfrcA(_0կcA1F>` ,kc䢠:i2+3@Jf%?^ Eɇ1>3T.x(3|<̷ #F * ף7*0Qa*ήeWy8vA Rݨ0֥Pa~o0 C{Q* oς G)UTZPaDN۠˜uF'*jz`0 e& * m[A֠*`zV` ҹ@=A`ʼHX@`s@=`(6OA=\PCD P07S 5z8^1PR^@=X犃z=PSV;cLڷIQPbVO@ D0JNЏ0A?\&6e@?!\Џ%xНE #c6v6w_7wEEwU0R)4"ƋͿu/Q|+ĖQ-$H;ӃSxDo/|r'fXgԷB,N50jDOsJ% =f=CyTITcb#M~4E8LkНvTLR!\$63&i}LX$~ڮMixGx-EEuP8 dòHHYf2Kn4 "fGSf/Gowi|PdpD K&X zJ3JV>Dp;gC,G(mwTd7ggǦ!7_lm ,bsO&pv7vTߋݭFϠ#I-fKH:8BQ-PF(s^c"9,>8YOG7 k[Q"z ٛSq2`*P`xM{BM>@ϑ@35`n'?h9YǬ|-N-HS'k Ǎs%ltgStyu꿩X^5nҿd@%gH4:]0u斩MN6PP0'ˋ-Z[1}a{㋿nWx>P:x'ʙ l?wgzflÇj2NUz◼e}<\DS9Z"[0$MUz{ DzKO<x)ℐr9 `g HS"'}Ahz>_!mn!6"m'5n ~M!!AHh4Ij[A)0ף]OjŤk׊`'w#Z/ېMk8a XHF*XswE`ZGFDt  @͆Q Zi%`F@ɸ7Qi!k)Bc=Z|z|Tr)gq%W֎Yx9c;h4aPT+K|K%[[8Nojg7t1s8A$`"S _q/ԅ: n/026-JXs5хDbEp9u/' o} 9M33S xfiy;230Ĺr)w [2\rugڐ##b"+s%h<7s9֠MkIN8ro*W#qwƃG8bqutRJL+YZXv abaT*I's,>D)TiƂ5'Ea') 0!|b)"N:{yg3{DR55f7xķgQ ǿ^MHЊP1VHxh, }ȧVD/6b.^gƲ'ty -ז_Ȥ.v#ZnW)p#I/iϢg؊hem ~jr=i>XxJ`I#ˠ˜#R:79+65*gP2Ԍ/{F hZˊCQ ]06‡zD2&,#a"4 q/B̢~<ٳZP:r!${F[Z%LHc^ԙI^$=ҐOWyoe\cO$DkD0OalYWoiS4\P/1 :4v ,R=Xq1JRpF?!4 _,G(/qyg™`Ҧ:Vvbytsd'tհs~~@8&(\?%$(8U򃣺N`{,hEIqmŅ-zL@]GgON^\§;VrC,vR?jU6f,j ~hMD?3m}|kzB -.vN`𓙽]ȧhϚ/qj9QIX'=4~y%]]u.m; ZGseL?=݄]R&u؉ d$zQB  nhkd~gRqt@|yyg͛V'Oߓ'_lm[[{E(S/,_b&eJ~q 6 wtNfׯL _S.$ʈ>xiP|Nnjn?2<4GuKTS=WfG' {>= 6\~`Ċ_WO_J Lrz} UA+)0{_P46hJM1 Zw <@o:i][vDzewۖ)Dw9'TTF<oxx<qOq %{,' E=\m'Kvy@0Y*L'T [QM_v(S\( W ӹWtJF P*Vt\b tfl\t?Lg:>0δY@{IaRL49m\``S%rͰr 5m˪y?rjL77spXYb{" v$% g}aۜv8LX` P慄,4 dK0h+7Q@MN溏=L筞*TD(;YMB٠8U+&`+?Bd&)3q #aGb0ViUpEyy8zxRo0;#{;[/6ǝqO]?bHI^7F8ȟJI@HV2$$|P\q ,R+@Cr=N`ÀP/k =~[ p6|Ϧ{>U4RH_@M͘<=<weތ@hnkIG|b,w?|Bf! 'A@LlՈZ/iS(an)tOfsn|߄{Ƞ4 _yc[I:_M5J ݚ@:& pCՇkF 미s#QGwvN7Yg|_[ygJ1bA {.t\tV}J b_ :֎69V5_I9v~ȱU7VXc=I 1@QPW^CɭHfgx1up!z>Mʋ TDF=&ER;苟IU)##,\_gcIw;!?3 D##iEl,2RN@'DNWXUY\rյVOVBKɼ%JV҄;/(AqD @]ƅtI+DDZJP:,3`R v(v&=ÉvyM0;/bʈ*}-arSB,D,Pay,Us;`~i=%"G\mn 8fpDэ BHQ7~z@N '$gD^'}D(C+L^a|J+V,6Qk0T& 0Ȟ!t!hޱ Sr5CO8-~}fZ%_X Pb|ȈqП$'ͽ(TΕD|<}_Ud"SV\6>-+PP~p;gXpťCB́!>@TN:@4 b.Gn!,Cӆמk>^;E>ReKR1ٜ?RT*RCy[_Yu+;TPt!:kۧ; -bSqq*$J=1l1Sf(j7nTKo;#j CE,b}1̲_۰Px5mo0]LG8!bȦ6T 'd94"ddx{ĉ|g>ûh`d5ޫme_d*ߡH,MSj՚D7 (+ &Qؗ< w̿N 0!wa~T}fg-6[o[ m)[?ۇ J)~x6Ӎ1C„Yᘋa-0} LXELW,†w_^{`pZce?7>.T,LX(+p$-E4P!+ˇ3|V/pV*iyTh4ZcY#\rx (8iaEJ&~"6 '`r~E}W'AJr^蕫5ٜs52;T3mZ9@Oba_.? M,!Dkgl ͻTw^o e1B}Kc( ͝J}۵: .~kbr~΁:^ۺߖ<15 _:L%%NxE g+F_|c;<1as6oI XO-C[XU?=.+?jFV&1p\^yA t-x}tyu9 x7ƃ0HIvnonickkgsg }sw{}]S[B !qd%Iп7򯵉/@lfO9ͺ*ޝ*eg*-yiLI() Q\q2-qd:C5v:2S:k`INĎ>%Mw wS%VDZbpo:J?O/ݤLuwBz`Ky>Vƅ%hL%sZ*$P $Hg4}+5H,&BIrUwA8h;%?y3d .&kއl($[9`fCRQt:ec1}gBsM?FƇd4_L IX,!u^t|Հ9"_`GTkB20 ^ԉܥ3=a/Ty@(LR $)(G&{e})\xScN,¡b"r>dRP<8Ő<-#y= sGOiLP>) a%j"k8H>a$1@b)¢A9,bhcM)"QA̅=v}~\B 2o*jZ V Ӹ1ci8Vj敢;)s߀ Sct3  6B^D#߂)O\+ve e}^ Yxl}Ի,  }.4wF-І{[wXf,F e!OTa{SAhh w D|2NB6;w5M˚?`/AU07I+^ ۸ЊzwOkQ$|]T;AC; Fc#P)7HP|#R^–s8WcK/+ؐy6~f VC/4ฯLଔp<"ΰ4 .t8p-1]ˋ.EKK߆ |M ,>潍NG~O`u%6", Ͳ]2 ]O_ qY[7c'Q/ ]^_~/۽_v7660A[& |?)u//Iqa4> aHYR(ze̡Bpⅻ@α%q[6b^ǚfUbse+~@&?~pV1\_o]r [CUHx<~lȵ-P8*IojΘhLO B5~LQ5ZTvKK6Ļa5ne\+J;_"JE \2zm,+e>|t Pr ۹I',/0zkB/&a*2*!3r䫴I &gP_x_R /)@ٝ\I1  ^yJ(tnfs + [x1Y,~ D& M"YP:E\ OSCiW(~qCIR-S:c2ϘwYOpgdc^ J-WE֐0>tJ&Ly] PDå`)TpwƟ6׷7_lmoAFF KgW:@-踺4fp#Ĉ#N`4KaR{pcַeD_gO}}׮k~xG_K]]vNEÝŭ?KHN'N<ü7Ԧ?(!#-%]t%g[)縈'-uPkc)a0JK8PQ sM+J%­s*p rW3}/E\K6N1ZXv ?{C A'i}l< f/h8SSҔ؃}f|Tgj4\Lj;nNAeCpKpxzO>יUo,nF%j1wf3&ǘ50B$&1߯oOLzO{ioZѹ.@j{|g-C̗zf\ M>(#a:dq4L6;"ð!Rt&"qCwTx`ͱx_=R$E eʦ:n /GowimdZj<::{g:0g #~'/_>{g'G>Y.0z3U4x&Oxɮg,4m(*7mw4SaC-:\M-s 3_b9|Ctkw:蹟֡ o`\lzp\GN.EQ딆rڡX.K R|O7@.UÌ '˲#l2Kut/cU`anqp-|Z5-?tKZ(Phtȿ 7bӛ!`|hSVIT":UsI5ABY;# 5 1ĉQO'%3H=^B<w}|C򨁴oB߼<'DTlEIÓ7㷯^mlhB|&z.]mNGG'g{[/qs#1xtL`ix  <P \U? *R$`aiչ% C۫=b#zC ,Fc|"Dª0aeRc `E,7nE;'gaHmZlpқ .*#,-%4/lU$&*RF:ψTdw9KeUy帇/t洈%9<9Ig;D,v&X˥/uX* ,@BԎ ȱq8n`bT,\*P8SМe.„m8ws4}e Nի z RG[PUyk)+P˦ѺN"ĐӺbR*2V'$b&iț_Rj۬yX+o 湗0U8>~ ksksş6v6w_7v7EOyI'U\K{߇ Eea0G@k!ƘD<%I{D_՛qP+(8Em^z[p6xv>{rgQt{4@;Ψqԗ{x ~ʼ#aYI=SL'3Nzv36a ^yY_:U;lpkUAƻi2 goN;F4оP`i]!xB;}JHB4_ MOV=L!:B/'ɘu@9cX #.T~;MU?ȕ(;|Lve4 VģD֚\f,Ix@Z ;`*Rx"\tPS8Gu:lIKvX h\:) 4(E8Qr)˩F1BN+^O|&0o XFu%G]7;Q}Oi#Qc$ X_d;d@bbD5r&]^Sem`vHXvB[ Joy ҲrN.OdGBw<\N2d!թǪQ)F#jq&Vnk2"d8, m `|:^ـ;ԋ)XQ+ f’kR'h@1 F, gO]\J 9$ ώWO&<ˁrgDDM0"!\gRKjn?L&X Á| *U6dF|{5$87̒.Pyk>82d 7|XmJ`xHL=J5o8d#}uD +x TY%c*RpiM}|k>Ä T'Ŀ @xUhkP8>ZDa0A<@|,ɘAv%¾t*4Nw̿^*N5]η\>v@@s5Dw8ϙZvppީvy71w[oa$8+s̗ެ:.j+ }0ͬU[,t{\r o{AO䕸 Tn& =t"a)+/V.#7<puk1LxRծk` bej(4$1UdIen` Qiw7IY!44U"S|T7!&c4!.6a2;$zF)] zWaĝ.G4JY.]j ::XNUlZj@6],KS^Fs6zH'e*Ӗc\8`kGqejFE #>J+K;N):;9­Mm8[6!i-iÌ|a<QXJA޿=)r@奊|YZ;_ gr<޳@BCz SfBekN~YBk˥k"(^͆= Z62eDVuD?l{[f1OcㅖNc_S?MR%C3yʖz1qco􍕾ăS,ٴ%H~yxJ>6er;*$anrJ{glhb;f(Sm8M z`.@e!mwQn NT6n+ Ix?:7ǣd X5\`K)<;zuIU (HթPmq%y:Jhnk9]GwZEO`m}lG!>8%G z qg) iI>N|[Fz]26/Ja"U|8Ea Iԩ$兖bSzi]n+N)ŝ14L :K"t~'mMI) Xu@Yf 8Lk1_lK'}i\1}h҃o /6 NFZOW1&1`4lN4NC9E"(VនМ&K#upV#hZM>EbLIgDBbZ)ʭo4?-IvP ōR!/O*UVɪijz>}Fl^mh$~u5}IT ewOJ5\ȝ>);p+mj%uWZKjuX7@OuP:RUN?_i_^uzr{It=_pN!GZ?i ISMo!qH;"ֿ8iK8FÎfSd|tamp&C Ta45q&ap1|HA IKTo \DC0SK}!#\j"0Kfĉ&dC;Rn)TEv'ځ<@>Ԇ6>RWQ)Qt]C+1y k|wۇÛ_Cmj-=Ifټo/hqi Uyh:p]L^̪N$Ŋvu.(V`bHY(a1Īkس&$0iɵC8c1ǹhffR7tdΉZVy@XUi#/~Ĩzs@S)W S| H6z\yW?hJ.4U`y Hޒ@326Db`x-zuaMqsA@CC1A'ɤI5PO{Bˆ{Wiu{𪳚!w8Xbb^z,ͮQp^+8vwqE w?͚6nꂇXr_ۃy<*Muqgs[m;%JoT3`v̈w5ҪJBA&*;Qjn\ƅ 쓈`ؤ~.SW&.k#X&tbs6B`MloNO,WQ`o@] jiS"˒^緪ʷ+W=tNWdjtQbt@=<>\@e ,PA4NӞ|.ݟ bW tlN]X_pSLV baiTۤܘ!=ZsĹu2ts7TyBc`iko S{0/ƊEwmF1ys-cQny,\'̅/2E6+ t՛iDƮ3EQNOY8|DG`Z:N? 1"}ԐN۴EMicfB/3y1dS77_6E<< S X}oc-aͶ!_8ٗ |dZkn+J¯UXSJ^ V\ܕEڙ"nKYu[HPqZ`٪isD ђ}b6ٱb ~UY2|FN} Z }NQ;eKNa6`Y,h Ҋ[Xܿ uew_ccS$?GGJZ^IV(B'Jf $(ͻQBVZ+28?V&o{j$ꭰ6x"-] 3K.BKQO w)-Ƈ(O`|#Mdwf8u!裼7sxxT=PYYfO#{:k٣8ި/vOaok|A‰ m]V;[wOQdUm=Oi&\ۍ56ܢZ왥ZlJ]V,2ܕöbbf!>5+ExK_t8LCB:@Ae=e%@be)7G?{[[mw[S%?Z]umQI'Hxxѕw}c\lYfw|F0gj/J9L\}4Y>)焦 2'j҇!5#̓ ؑLh'"h 1D5dqK0I'.`|NkU<'8 \J q2 ٠v@m,'Q3vf뜭{Xq K"r2v*>p^Qf堫r;Bk̭§*[8kK5mD~UƁ4Ɗ6u*[Y,Ncl̡z+{Vw[C,"m8bZI /ϴ>VRbf=`"e`Ӵ }"tE/3 >׭-dA 4zp|40۬nn?غ~芺[6JOla Td[m56yW 9v7m?_9uBw77M*^7v?Nj֝0 eoy~R <|s[Y2{7ۺsw?ԃ{[ lKwU IvHȗ7&Ɵ7?5<(/w<(SP3bϢU9Z'(YJ[GdB3,'] O("pS/r\}J,nPuc8Gli c<>]MMf(zjvE|ƴi1_C3g5IXXX?-,?Fc <%8dlrv ?Y#bc`2pڟ@ 1?%څ '׾S7Ii'2$?5З&2̹}08KG-1Z ԃ(*]P"-%i=\EʗO1lQH6#q8Eeͽ(|W"_{O.u88˛ SQAZC}BVG*Cj*$ s2S)Q7IC1UW%*Sa(W W$h6{}rYЭ cquaqE2?3[Dt7#?#d c H@[@i{DU:m[rIc͠ <)p"&]SVÚ0#C4+ `6mϦ/A u&yLtqSf h_my#WtwR I+Eﰤo;,N&JMc8Ov0XΩ9V5y!{_!HWr)L{J0Z|}7֏Pelp9%!>!4][$ |Ly>!m]#C7a*"瘪ϩrU43k$U"Olgzc7M$OP$KOPZ')Fn#h7EU] yX'Ыm0F*2[|H$D+$RK.3# "aDR Z+UbMM ֓ⰖȘ \QZcdq* ΆpNק}0 jǛIRQ3;CK,vcm]-k<Ӎҏ ԋH;!Z"wdF؍DC۽!Q'<7[56 "иNa 97zPţ > H"(<#APLV+F8FJ%AGP[Z\m-!@&=֌tyb](cWL`XJk>7[&t: ֎djJ] 4@b\`>hwѼ{ZlaA7ڸG% I\A`RV1!ÂӱGkf,eNՀ3Q3@V{ęJe^ (̩12d짯8a,8p !gaD,ŧz1Dg݌&~1veAmyGw!6۷ k )[ q8űo9.\xt -J4`-RqZ;]7M`FL3ؗ34G!sܸgPͼh~bf2q\*:ڙW5vjenf[t]TZU͈ 16Y&@FyѢ6LSM_7=X[_^p,MAY@t:xn F%%3P#Fxd!/<ݺul{uᶈmקRT(׹1\ݡoִ)5U*u˯A?`-)vdPt9J۩8$_h|u&"A/Eolz 2(2p֢2ih Ԇ?k5D6|` >lTzK4X vdʼ ա,nڲ8D7LB-r?x,Gl?^#^#37Xde .wq,{oX,\.=1@)ˋg$O^/k7Е.T[4f@c[-M-yFǙ&;]?9w@- gpF7ė9mD!&49ڋH2Hnl c qHҰ}3jH&l-23Pl:g<<-jd~m, 0_v6wo>߸9 A݁VP (xg|{ !\ff5IG̅9 +cPp[t9w̡%`$Yno'ז7on;)#LţO <(5< 9Wpari ٷuyȨ3[Yl\TeQrգ* \/!{` Y*(aJJfCCyzo |f:@DZ6~=y4%Vy5)b -5vOc8/uC0eJ DJp0B|m)kŒ8VZI3#JS@7 (i*L_4 +v+{2 Xr2/VBd[vKvRHJ:6KVCI 1˼JK.-VH-8I1ܗ.r /j8$݇`dɝ+ dvPtu6¡JpMJl3B.6>m㎙}ƇHeTHm|lg΢PY-;K'g"IFwz ~^lm ^'^z? k kkg`,F ?$Ӽ0Q ~-y X÷zgˑH4bF[zQSTDiXHiwߗn=^U 8c0Ax=加:VC|B9їmY7\khk4i,sB_[S7XSV:-Z"1M}znJ rkTOpB|fLyur| 4wVTd|NmGa((u؂L^s#,s`iN|XY\KƎM©NR¼@j9Oq?;fU^ Sԭ8eNB'b BjA߮zxqdݲ!&$qĆ@vQDzig0drׁC(&: _@tbq:Oam3O*VJRK6Cb̓QTP/WV}ؘ]I3W_QuBk?__KK*\Ҫ F3I Nq*] V/hr}~%֔܄x$YT.7,DvvD?KŘzm~[eEH04~\i>n6(Juv N?RUjxF#42ak=p/LƘUdUKP0pX|Hțaȵ5`wGJ."\ 2r[nWk 8t )|:2}Gd{5t!NNS”nXM q#\7.=%hs]Sqpj0uҼ|U7ȳʵFf?XO\|e˾ӀɀXqvn ~)#z %&Áʾ5i[,'J%-XEp8a€) \cXsʬNȼSĂma4[`n:BFa(& -)4>`S4A(0rQ |`zcK$<J x30u=1H4y VüWI+SUv]_߾]vfF 3Oh^3Ec vM5 '1[Ǵg^v-"'=d"C}?/~^[WX Ok0cpab3]n'pLPTRHR?yRvblo;z1}v^߼s7Q$t+1YCWZ~ V$=#SvnmY 0NkzfJw'St ]hԢϚ$ .M.[FBc3M>țYVD#|, hqUŸ-G<[%Kq8sKW.s7 9ě }1n+hvP*w*?+k=\ʗ8pi F *68f]^A*E R&YCU jcɭUKgty+Oe@.w*ת< 4E?\9*Z Q9j\j}X6.*rI m] 5n Ӝ*~Fn A.I 1B)uH汥r>gs{}m:?On gY/K+ z奱$ϰDEJ P, _}ճ٪y&y--܆ #7 [V!-|T\(kٍ{{c1C[ρ) :QeUc1=3咢1O:q X%% 8dNgNx[hYkucgI4ӟ1኱2ZG"c9q:QUatNdT|)V{=м?*`^ [LmO2pb؍?+ZށpZ~Ԋ$4Nu&Ei)KY ,ghpx2>}'bvFۮ8{+)`}5:4͵k][Ûc7{qqӲ|qvK u=WVэ -Wj]JVYS!`&gxg2𩊊11|Z)Lֵ:O-m4NRFc\q:RHo;cC1?2V$a72ƝϷgf_wcb<&8ՠb͌P+y.DFjA؜b{|sṉ;"M≱ DOj#]PQ>=-#~*9 ]L~@C pPd*Ʋ$ b2RQJ0j Фqdm `77`&CF끗AbQ"'U<gk%- G^v pJ^wւ9((%'3k(soszks_|{ksw=j@Mz;i0ǎ2i2y|;#"@`sH(o_)>@lyց,H;C3#1aMO7 鷄 TH9M|Oʑ,]e6IDCe*%ܭD+PJ*B Ph5b ֬y]Fh5fZlP"GF%ݥQ8W_OiY&>skl0 ր74f-,Ҁ0 /S@;Y|_2'ʵQ%I6m"*_=07A,e9o'uXVuť(a}E_XO}CR;ǎA.lTDTg@%H,[YP֕j4J%J Dv:(7;n8lOY?X> _ [ JcDQ/kYwXVdWZ?_f,=2UDvNIbu }E@4-{qI,03$Z- ڮjՌ/f[ .poaV>>u_oQq}Xv8{zZ4e1afP/̢{sm)>\E62AE,g $+Ըd |A+l-J&P϶%U2<zoi|7a#Ei=_D2uh,$C>'N*E?p-4fsx7U-xǘ8a{rZprZ޵/\_\\[X-X+do V7* Y?Ue<|Y_ 5w+$_"]3 ~Wo=52FD WѰuү>ޔOn2~y[v;K8K+(>#`}"^St=V|E, +ۗ:X& R^%! ҙZ@bvt|ke| ,K|)|DA_ !L%{9INB٫,}NTN{Su!Q.?]~rqL*|; ~{,,TQ3V #cE3՝uq/6p1'3@MIUpJ-!1*@ 7ȣSe!3*'gٗ ZSFMDA_34eFk}X\qGݭ;tqv ɪ͇^֬ƱW`J <EN<"PՐI%#^2z#t$RAs{ ⬵(A]9)`?N?4q4|H :3<FiT߾<9cebɯb٨7ݖzXږ]{,LV}ify8E-0vV6 NnIp㵟0cR #p΍Sh:t< Z6i@P ȞIybSy%u6 Q\O[5tE5 pIwpA*ۤmVs鎁/a="*# &d GCMWJi@?ixV[A0- "|a]~_)L0e.5 10X6 ˡl{{$eHj+&ؿ.&DqO,~S|d*&rqB*{\RPVZIB Sa55]c'i,  ޯ vs څA-R*_KB[;|nH,祻 )RVj.8G$QeM<7rpw!1h먚S5 c, |P?tXRbNkoqm D!<߻ctZuvbla ȳka£Y~mg]炋%zz]m40~ۮ0OfT<2bprRڨ22S^es9-w놁FAi4w#ýY (?Z-zq %YvjLg[StBz;(9X*xpҗ5-YR>iyWL`?") 18g7Ƨ'ZWwJ%/ pR( ^Tu1Ȯ&΋V:#'^,#om1NQ֬knYߩ~qw{oXӱJ߆?rI4D}&K gjEK!58_$ )181舐LWwAgFäX%{0DC)џ4+SNax(u眔n6l`f4S B ZY )||xle (F hFˆ]/ W;!4AZeX{3|eyf;C|[|gM #6tC̲%oI}t&hp~Ρq2`湎>vCuI\cȔ照U:寥Ԋs yU{ʲB3 {>*Ԉn'ـ;o?gyuuy$OԢ?Kkk3ށ[[%zX_Z-gȒ7Hy%;Z}p;m @VR)̌hElu<:)ӕ ,-+*`>I/m#\\Kns"Q!@$~IF!`.f y ,gB#@FcSb+_zwBÂ+bڸv>iY`Ef.y -͝In1VLrشKtZ0J ܛR< N1SLj'-SNI OMޝY?G UsToqeMVWhawҥ?Vn@ؒ,ľV~2W4e1!D1dA,v/EH9gl,mYkr!eX$,EE^A-,Eq=n ! g_loN?~sLź< VbM fa՝F6slcj(6m T% ]NƗr[a4zGGxs!OC8E%ݥ{R@s "h=,T @4Z9㒌lwd=wϠ T/.F C;o~pʌsa1YǎtCcwbS^QlbrF[m  Cv&PH3~"ϐ`9-Qy!+&œ₶&J/H b*Ŋ aVBM>63W ɖ>0#̀i ft_A,jfT8Km!DF 0 > F4u_F3.QX+x^g@'IJV"coϰD)T[lȲ/)0o%`&mb`#,`p//nnp$<+ / MQ=ÒnK3;Kme|?=.n sV萼+_N籇E(JKA{``ȿ_J+ 6/<$OJ.#s9r4;g_7^d핱JRET1ydAS{`ߵňxœwE/ rDXvev"EB] Ӱi9 Ka>;eS`Kر"~ dUZpץu?H~B =Q0!cxϵJ) D64n 5ȀjmU*hZ|Hٖ:`05(}MaZ N1"+e'=zI$;GiwtVnn?z`JD!]ur.̘v19*s`%zG&QJ4ehJ+eR,y|!=E{mes4`sI;֫ w P0(u 2<]ƈ(LJIu, ( w֓vPg~A,㿍B/g}A4A80s+d#K.I`: b(9 *"6XɂCK2z[uؘ3%D!/_ŅqǑ<)jDOcD|Y*ubgzTT̤E l1룏,i;'a;ɱ k78 dIpg4h1@ϒ^b@-Na3ę_bM`OA4Jў $`,D !>ey7d FfgA, @SSձH:ܫX!\ɓ>Tq~r9&Rrr{m]gP%dbuee|;L?T@4Zn)|-T**u}1|SVy ,&r&2V8ؓ=#9>cT9ʏ=\}av\OT btn"}%{h?|5ӂL$,hX{67޺S߼;nr{<čc,9,i?+?Kqo.O{R0b/Ix/D-a_htqK81*w Ru xIw-Hi{cO&+HJC!BaC}%WWF]&M{$4D6ﯤ|6C/T?>4[΄Zȫ{so-DJC 3L^ͿnT?w6w'Q*bq$L,/jRx,a>CTqc5g/30o'LY?#y  0ba5C32OID ސPq:ܓ 7h"/υH٦dg(s)}uPZQȏ !7ʫ])+k*^:WYRZ*5R6|\B=*a|{R'aR+Q_|-6W-Kʨ%/_ұ {NDъAv 7B0}$kqWm:ޑ#%+WZRZS[E# mt'2VkKƬJ3W%W[C?x~dXeRXP$HQNDTI"0%$3e3vZ} \F?;Kɚ UT9*V?=GT*֡2ի8ݚ^8\皍9n UsSdkEHp8%:>~TjsOi?557pD6T%ާߛwԎ=ԳRdOES%=|X5VhC0 =xL4"[ 2l`'"L5+>,um&4 dEH עʬULĴv1g̱!TG BM@y:4E#D> ZMN²]v%Θq'L$hmE3'\m|PՅyז@_Ɵa'HJT!\~p|Ƿ|6${w!^I25! 0sSFF¦W١jtm'~!\B%VK e"WqDi#9E*Od@8~PO[J\X u8K]ʗ|#8-M+ [%M+X|~՚:ES=jA[N|:gNM3gɷJ5owB,Nl&d#R_d}@@C3%%dw2GDPq+]>mOVEZvfĉgVe@"<34bL7:dJR)-c8͜m6k؂{1cq?'{ӿ=G`6{VDzÿNl` rhzy1Ke4=0.#upP^N)&T$#)p, 8Z U2A&Zp-ąAU5dXx|(u_oNLxΌlZ坠ҽOW?Y26r{uԶԲ[:kD *Vy œ>-ҵ; Cs;!jTl9OBg02wnziAь`%$&iuE֜T-d}Wa.dX18E r[ďO)kw @ O+>IZ8#J^}>HSuZ[y.sVZW[DcCW>$3Qsn*> T{\߾]vf#ٵ]~AׯKcHOQVso^:\a<;-]zPE2Ҋ0Uee. X5Xp2(E>-/c1\|KyrU~ٱ^j540T(X 5\y8?9ILMKL$VDu)tAD& 8SliR$L(NwC\za-ɯ5PydE/#j=%ѭWTZ%=I9 Oliup OkYKkK+zqf(1?R`Z} pQ" "bꥉ#*㘂;)2X0yF#8SsC\Q^^z㏽hq2FJ"Ko 7}DTzKi@ 3RmѿMyG#jAO $uxvVWf8aV%-ĘaC]#faiV,f&'[BAmԳ\//EVV?xCBvCB0_Phүh.%{m?q‹rV8ؾf!Y/^/c(`ݻPڗ/fx[e8 ft[n״.4p͈8f:ۢ C"ts`7Vn4Do}"f!8]Q v8&FϰipVSw<3=J]Shw')!FkrB"7cqjיݰ<"وٚv~~>S!sd-W]33uBmOD=Ŀ'LqF޴a } +IѶPWdt ^G6ttF铪.}VBP$>.힓Q>z'ڌ\1_P}^ާoo>E-/xm-HƂgh 5+Qa xI2jUz r `VaCVh%xP}kFҧ~+4#,=Ʈ&|F d2 Z V 8Jl$6B%TS"Ra\ j>:l:`)f9;mi?4!Ϻ Ez >WS4ο̖a "iV}D" D„_""͂Sŋ5D_Z8U Ts7=--|~\6% {xKӤ<>ةZ-Ng"HBWQ+*Nq\0O PoO$ Z#u%t!vGcrZ~ѩI-癫~`XY`F ?c7؍@laCv!&aTu@@j.m@._À]@4Rd7& ºi݆em///ί--/oϰ؞f/Yxg}R&@)?H#d4 #qxQ>_雹Y_Y 9[h2 E5E!.A'8O 0 ^~r\Bi0:}J' I1^R|]6dMFUim6gyUK8^*7<.j/l~pM_-b3Wۥ~su`[(qEK7H%5[rJy=p&KIv6vilJd='^xq/x#)✊;VUGXCzaW|yǓn w|/i|W },QEj"!JkQHzU%̐U3qB?fhG]ǑyxH/c2^5,xj/\kΜ b7ue9SpN'DQ9TN݂vy4qTU邕gJ^:ulֱZNS gA|n*7׹u2Yc=FԱMnfUoK:hĨudv>QU:8et%v2:U{j6VˉUm5!cpv/[ \]wDJIAINP&gT`*7Qz"~fa $,}G!f)b\XWbsdVS[$G8'UZ٤h ag(sb̯$'HPe$CA։#qKSJ5Wjű#vyE?k|L:1\aLpƤSBxޤ*$,#@;K:v"I/#S퐨6D"WcQrm"Rr1&!c:j|uU Ø"hլ!IN%Ծ/#5Y)uʭ>(ɄzT04JfcBTφ3Ƅ`26aPaiD |c:7bD} (<€M.%CWG3şSg!/\?tDЋӈC)gtqC}FyR$jyP4y%#j!y TЪ/H!,p4W_Ȏr;488!rw{?m6֓Dq$M5.CY>O`Y?Yr0 + @XY+Xyc /"MFZX*MEIvk yQgU=eFkd.b. +>Ǚ."-;LNFU:DWY__\6'Szb%-PE*XK]CGZ1d6Y6`نGfK6/ cI8vÉ2$N&340#@Z5Ef BǓRk1N.j\MNh :e`\?vd&a2Y_DkcCAFuJ $xDoi3"S.G%Ly`d)#FdYl%r&e^%̗'U1.!lsx gy{η=G_ԇxlL0.Ϸ}doH'KkfЄt&Q3'XahY#}%>Dh2E/Y6#gcu`Vc`~ls%|e8ә pFL^_qɜɛ<`.Y0u}YH.y/KͲBzv9dgBdpR! X W VCOg %-1R/,F - ?r~oV2L͛0N0f)\,Ìyɕ퇊]ole\ܳߕ֪bR J/ur<~6W)0Bڗ]Ll 7xe&G[IKy*x<6gt03,ȆbѰQl Mx곗XJ-&5;)w|bK;ۻϭR>b>m؄77nj~uc{gk^+ ,ߔǢR^#es y@if39$(Ε X/ ^Kk?!^'c r3xWOë"Qo u,z(q[Lޡ,Ggд zn"?o۹?-ڭRу3I?KU'W12WQ<>`da=C;xdйOW ucjn8'o+*ݗRkeV[[~azH& q.Y)$Du|A)4v]oW^]0TxY!3Vdi?Џ͠kpZS(0zZ3L /T++cq؏ :R@?p߷%fIWu?gh,Ph F50xl1>?:&.ÂyIzX@'i[bހmn-ו.}ӆulMJBAJ K\49売} ]c=V]jכ.9XY.ZET~+Zϭi0,ӽ?߻  hp1\I¦3pDascs'*ZH]cL$7{Rf7M㥘̐uFsX2i ̍4|c7^;/l ǜ[us`h.,=k T%jn^E &) ˫oϰBHDe[Qg锉j=  h,v}S<Q~09/ GoJnۓW ɎRe*IXu+niWUYUYm 84O\T*򭜉 <A") ]bېWP_@?n6k4--Nթҷ ۓ` !Uv d-aP?&A:KkBNŌ%PH7bd'j;Rʿ(:8>"$&(vMC91TLzB4퓺-UT ՓVi=aL'b*֪02Y )^2Z*m$Ib)n.vVT|,Mڟ=ɗ7`OhMxcc Y\El4P08o;}F}K|+hR#F a@8amL\G0$u<3z?q(sl48pha%,哺x[:q 'L2zJ4C0Wmƣ޽IőXxZ;j qp+/86X,^A!K0 K}` Nv+@h/$ҏDc~ Ga7){lXg('a82P&di%(j@rm AJrt&H]BT ռŠ2} @kAY}_FBW؜DF\˄+;ٞAf9cg3ľ̡k3k Jew7u&!XJEkeoDϰmYd NJVZ/4c+Q5G;E<"tΰS 1Y4z];fYܹ3}+!o $/eh0ah6%;I0gjF%H{V\&gEq[ dsg iVWP{|8M7ϖiTI4wn8- vpaMP zn  ת vpN7>;Uw.o˞S^˰I^kGҧ 3ekk@i<AP6 (q8 5ÜFP@]=u2ve|7; /U!ĪIX-C|x)WæNno}mڝ䶉1rtp _e!T~|m!30dN+Qށ^.p^F o,KтEF@S̅ql )iR) vM"*,*R%;L$[(g4ȍ  ~((ukc5b`sն4xh5T=J6<ЀNy$=[yfJq0#9'=mL;690 t5hGh\p  7!π{ /<+RW 2 3\rpwdHaJX?TG9Ubq ͊ hR/Μ43Lud=ִI!#1ĶtWl fE`M]TCIk56pVmZr1\@RXEfVV KˋkKKquyk$,(Tb/,quڃ{ h_cπgr2ۭL.Ht Ԅ)Nb|M1;f\Y_?uwڪM= ܄eF6fʑntF]28}S߁% HbZyI{OxH{$c贾U 6R/9"cNf] 4b[ 3fRS$v+ ( .-ØJ !]؎,&:0d-wk revKcQ:je`_Fh G,!fkҴx1H||!Tz̑řgV1Mx.lgI,,WCy* hr)!s$TḣsNg0$;wjm5} 4, 7gHݹ(JlbbxJ=eR[\)҆j h]k?)MeY2egKNHEw.>FtZ?kn`};UM3VDTʆs k0 J\W&b#YxS16fj m3yh fb~؆kx'/JpvQ86??J<봽.E՝yszkփꃍ7k^l136v{m?qZ(P=vj*6<ýatI75xDZ%7߭>ܽougs]@SpVAB~`}iR-4nOg]Doddt.SΫ ܑOl;ޒ6&W(.Q$ʷԵ۲nLPweG:-Z `ɟ@2»36nQ>"`&(_/{- 40*)N}OXn| ta8SVEBOCz|*Y78^٪ W ҖM@ʈn44d8[vth/7v1"8uW<H  ^:? l!jWqejǭ|8[DI}"r&FoZY3f*0deb X73]ԍT_`1JKrY_&2ɄHJ ĤJ;S;:ן\~k w&-6| |av ? "G*""Q0)8<0e=22 m-]pwRMiM7o>nkyYA]0n!z0?w N F Y|8Ѓ*&8}> ` fm*$XBm;h\洹eN`%59-ÝAJ<^rA"}Z Cgrn`Qk>lq)R23F [^n9f8~AWڊԓIjL g@\E9D( NPӺu4*2LCUke\ygbPp?,a".[{0d-̴O9(*J6S"т[ϰ$D+"\RF IKiQXB J4'Q_|Ϸq1mLqz(h!!Cn!}@ L6Z8g'֌trWtϤO^ e Z Ǝ{юJerCy, PFnTn[u3'[Fgשc==|-ENɠ"G Lj$9>I9E8JN Εc`R$1ЇL'Ic7_NߜnI]8a\vf}%@6d&GeAz9ARbGtA'7("uNy͡"3`WӉTd!ʲn[m4eBEkL7]lb4\-9g̬5R3fNmϳOyv[+jӇF:$M4铑tC[[ kJPX+UVa+ a M;dRP" oFHqc[4"~ř>M?FR ;pҷggqnc[4kD M@Ln/8vSyf[*J,MQ&;Het5 ބYP Hn N={͇%$͇CN Q@G"󳋺%SQ!8:tf_uc hi5K,iG=qJx$ܖCon޻gh0p`8KA{;zHp8} # FKȤf4l`q92| "8;Y膇Ηt nMO<#2Ēd䐟OXyKNQnY-{ 5ܚe\~vFH~aZ]/hYi"]Dr 5 1J3n|.~׀,c$4դFC>'&ȲPi-dW J1=# ) /LBs˄Xл+Cqjf1z 5ÁSѼiʹ'yO)>q :1xRĦ]ry9;ӯ},Bض-tr%2/,O#x̶DDkуyxLCb՜gH l> "Ynûw 6?oٛZ-$f@zI7F"PU j<)fZߴKM?z}!qqYW(>3 &dA: E'ɞN9*Ltzp߼B3>G *]Qr^/IF8u׫R^eCc̮2#8!!gj% +Kkoim3o\qǚhF:or3Q%×FFQ|JEe+3:>rG G$bx1; -\M6RJs!)[/1Q (:d_)2TNQcA,S^鑴Й#;Co^b@##b3({dlȒFގVzKgeXd͒LfENoQ=ZN- %v\O-s\ Z<Ј)USKAȘ]$̉ͅzﻗ(j,HK e VnFK` #ǹS!$-4_+*q %`]L)sMA1ԙL]qAXMarȍSxWo|1m{ȂI52pLq(@f21؄sB$fphY(F0PiC6W:3hXIQ1t3 )Q1ϳnf3{\g 뢏 pƋ"aR;8f< '<w@jrXV5&dM}3b.Ywj+NT#cC[ b5At |P(p71w ]$Ĝr kS ACK&*s@.ҿ ȬAQ3N5NPv !:n{[=9dMXZRyv07NMdm^3w8/ /a[{̾pq|]kl9ɊkI:X|#2fC`dO_9sgJpEFfIBjbJJ;RFԜ5YMѩQt.r;5A]]jx9.ZWEf$(oi-pi ![RۡBqԝc~ۮ IFsVNU㸯?k0 GN|q:5"QJ'CfYƵ5YFV|6 +>2- ͠>@dT$AqA3O1dω=bŴ-8mpPүEth6`y]rdyjT<ސח1Q:NOJ1SPhIC%FyVx H^Yg1ԒլW=:N ݕ#9uE$BAnF}sVQ3c Ci2V+ 3wnpJn0誾3 D9 ¤ltps -,9abnqc-Cyjzj8 XQ%z,EolEdU%FfնW;.Nkη[x%3$;Rsĕ.PQgymR*3Cl`/C3Z^ 2s f$K"WXt9FbƷA?Q`VEor%*^p jѶu;VKn<?JdYxkt3r}Z3n?hr392nlmKЊW/5EKPO"eH|++SX@^0b3xN-PkzkhsK?U^g2*1[% a]_!sіᭉ pg#r-nu=c[G2:23u#`mȞ"/eˉ`c +ne P{T13,3X{G7 21ɛib}馎S)mzT}eI1bY>cKmc[f^Z!)WqS\0\]̫U%VgR܀_и. #BdߙdI9ч\ڹ`W$z;v1 cܼq;i#p956zO#>W#\4@̰c, eez1:7\(c&tŐ(e¼kMP.qA5c!CR&-sǸ"2T|iI '6d #Qf#-7'%5l2ĕ |g5&tIU#4Go'D86 8l4~dQ,AkW6">$`ְ#h0r},>8j'/z:,i;~i~t\XDvOpK;T:u-Lu NMAsFܽ v73k8V*vON9M9pʵ;yYAӢ`ڲu4p0~wzG߹p)CEh |o?nTEvѭ/,/,~ֈlk'.O㐍 hљ-t5ind@h߸h3tu֧ r41u|iGJxWG0@ӤIKygF?IJ?\#G.VqM81y[mݮum7rVp֏fWWWW 󋋫kF-=w-1Ώ=_[X]_^_[Z^*/ͯ.Gw'Nی ,N%qL`)/e^{vc|Y@t!@ܖi|do4&bDsұ +iZDYY7Az#ԉSiDZ ܅f asaVL3o,Q?ޑɷk#ouS1,S?wN( lqsjpoo6uu;4_@c<,4B*vְD2^ f`QVv &S+Qgq=KG7ͭ[X=ˋj(w`q)!paX` *˴{R0EҁƳ#j1 MaA{~.2MfH-5B4 {8Qܰ0 )C"da7TXxqm/`CV 5ƴF:A٘Y8vuTscBVk<@REtQ7wCH|at[}äP܆$+ j‚X$2yo7Fu^-vYsBtF jmL2u⢑K7,YLfcG@Z"`[,2Ttܜ:3hqd 2ޡ]Ӄ1<@UpUluka@i׎-zm %G6ᯃ֡m0#WYh~tl\&}DvZ˵t|wt(e*>"7B~*Hw^4 ;^1a%T*4;SA*[خ|cJ);Jg>26ǴRGa~۩p^gQŀ?<6W[5C$x14$._ Xx`AtUdZP@z;hh `FdXjxMoiMa3A<UNj)2SN+LHplB8a2Se.,Tel"T⎟ωBV?`UZ=W҅۬9־Xb`=y_z(cS!dquyqul1g_Yvn}$-#rh:pus3`#8D[-Q4}F46;k=pdՌlǨ5#J.?PXIe(d|C/1hOdf܋B[jȖ7\fA_ 34B9B2:Ԕ41w$k!u"+zKLcB[ݼ1١fDY=sw裱EyXVV_ %=X7_MQ:i0HNòam DS5/fk4TO3P,dԀ eHj{Uny(q@*ԌF>VeVu 7XAdc72LoqE^Υsj0Ƭ2S&S}e4EqHn}yguk盻փ;"tkFefTMکDSQP1Sߴ yFVprvDZ,2L$%P6bW^uu=j{2^G$Ћ_E&w*hunxVE5֢+–F}7j.B8X\&373Ǽy#%޷̍Gt\؈vхc" BӔN8[pHn3OMIJ U]\]ɰn*6Rp?QP$8SS7kĆcD ٨'f:|Sx^Wgne#S {N6"jE0%1Ò7Z%@|@'*c/6_ҞJ8y"r3"7eYe #^ KөQB*PQJ(esx~!H,pr|d#%c%f&9}ᑍYm[Cfܮ4kھuYqŬa<9Ɛ?k<﫵U;Uwems${F?~%<]߮ܮ#R%e+~c}H55#L0!s$8h/ 3F!U]rNP`N#$vvq|7$_ @_LS\4E_ FrYf͡`N@>;ᅚgru<)ksZG_\\[Xx.oO7| zPOpUyՄ|%rWW _&>_-|uU|~d~oCwo[v}xPJ-_?*8?.8 RxBF +v7M{_t&RCo!LTW病#| _L] u/BPxK:;G­GK_Y–Q܉o<*Gg J$}'# *`O w _]?ߠHkQ)OA'W o.a_o]Ʈ.8<|Tx[ÐXΑ~Uk.Hp:je$(xɥ?k(>~;Tx/ևT0rM.+ȿ&' ޯ˙BLqY<÷ˀ}@̃a {-=fow QK To H{-?©bYR-<.E} $FZ.4u g֣.Q 'N+*J#\1ȟDhfWC-` i7pvـB ^vgEn=E%AU0%L`/7\Pi?^-8kWOh·+_}Zp*~WxvOޭGo ^z AC^X۷^ry&CY(8UR`4T@a g_'I4q3* ? 5?!>l?_~0gJ _UKV iz-K||?^WZ> :lw^{e"_ /#}RBW@װ$mXT-{˒a;b7|n C{ cw|L׺nәz\3 W8 g_J% }LjO*p-[ G5 =Vmyn M؎߭9Xn 72R }o`yhaŹަ.o&[` vި1<U|3c䓧k<0Maӿ$zk⭉L|G?#/O ]B 2K@pH D0_BkDC! .&&<*IpN 4 ;fMD`ס? J3JM`DYbebK}I\kbF\4̃ 5fxx3:VkIko`'/9Ь79}-`H6>^>qIQ"&w䡵(T$"9aE,|m.ݢ~B:r,(s'kb ݦ*sӸ}#a{{ʻ "z^֮;TY"QpJ7wL_li-nPak7 */c"O\;1 lH#?x!#"//^DN@iIl ΁ f\ eo B,"TL&vq)!pPݥXl: KP9tGXٝW3A;Ԏ.;J''8_煂`: 8"?Hϴ{߉◵0jV:r`,;!~藛YmBS4(qހ wԂϮIfvw^Lc4΃vC~;V2xߴRќ#; U",w'~N|Ԯ{n褃AI_!/< {c^CL>q=6s|FMNg/&3*x߯~Kc0a{&"OG Mݻe>'Cd&աt6DFcH>"Lo:^=b©4ϣjrOhĉ?? >IwZ~y[ }W Ml%Lqgw^4C<ƌ%Y38aMꄛ}M/8sD⛴)]Rpe >m :$gX&m-6Ar_de$g.ïlfiՙD7Zv׈]B$|`ׯ*rd ۽c.6aG Q'A2d[h/{t3Cuq[q4u,~~1O{0?~,wݔu>7X`:'KϵUn#"Ǒr^*?~úNht[dsA1|EbԈS;n[S3Tź|xtƇ\(߸> CkԲn<ܯ)pBsVT"Q˲vIvWwY60Cs(kP:w! kN6p>S 6V^k6*qҏ'vi7 !e 54PhVź6o>/ej0A@'QH 8 laqݪVg=\½[_1 gk?ŤNι988#~U{GEhgW7)λ7n#:B>D2*@khŮKp#, "TڍN^ NThvQ飡@Ģnh.x˴UeHlypN?J =\ 7Zĥ-G _k"OEpξ`{e8l)zʚ{Eݡj˭u^B{uU_>&A9y^}x* !QW} A)T~-R*~xZԹLp'mD|NO% (tib2aA1ϰp4u ؏# ;?\αJfEND63GY1:~F|GVYΐ{aН:r|0չtQ9 p^eFIYR^nDW>7T{omJ43ßS2^>.AmTu(F\?(q9P {$gE, eYWjY(V[㦘ŚB,NL.5 $Û|B9ǹ&as'Yڬul&G`8k3Qe6DqJ3!h 6Tw ͗H[8_lPp ,A$z㻛6wՋP~}*~Oغ2k*kq`0Nax㌃H8Hσ9o_ o^W3/s?7QwUq0yß/q])VD5lFjxp_ uxm뤴}U?(<y]Baz.OąA3օ.2~qOn›fOz 4C 5~ wzP[ANoGX较Ǚ8.Fk R%;/y G%FOČC}Caah~Kђb2>ZN;P[:w";[,ݻT_ +l/GJߢ7U8LŇ>ˁjڛ~}nI ?mV*)ԟ$H)F+gzt8a*5!qu8/EFG$Nq~ۚPϤYߠp$Bk h_6)m?+^~ODYѵ<؃8zrZMe7*ѽn3Y%U"x4MD9;O׊W:WAmǔɛM9[>/M3M TC]D5zᗶg{qW!yrm7]\z ]<<:Q nmrJ70Ԗ.jt{G El[3R{s{QܗkM㫻_a5ﳿG‘XO|Z617'~݉O=KLL^B{I¿|t.w ե^zy 5z{>|'?.>*.stoX?gYYw,6,}u Ih`⤬:lY? (˴lnuq~b7_l-TߣvcwᎈʹMh0 Y t%8:}n> b.fQ7)|[;6E{wQ@K e1&,`V5%V)٩sBXС8 Q})eribݯu,< ԝLd<6:*Y ѭ&osK 615 vq(aGǬ ^*3{K~Gb Cn8qkݐ:gK ᴘ) D*G0 `2:e??EA1=( wW(DB-vSi͟jtCU1 E)#6 .6HTn:ɿ*#m”#5b^>+bآaX~ִw"0~0?!X^79|5J$g 0d^?h^{ߛ~.c=K?g[W/]xh!m`m Չ__)nMۗWk_\A[bA?rm -޾ ~L @v}߻d_o Ժm.s>+K+kaΉ_Fk)r6+˫++g4f?_I[bK#gVW1wOOq(W?v;frH^ KkK//$E0,Ύۭ׭ΩwO]nݵVV0+X]Y NZ,[X`uYMt6>/ɝΑ' _ʚdOh)4Yvf*sh,S r;-m|YVȂQ}bY>E5:"Jpq&V ݝz][< (bkjcڛ>ڛڿprcww枵kvxWf-;n"~ ؤ0Cekϡ{px-AȠ:VNkN vIUCKA'Z=˩V4hSE&#P|mS.>" >RN5nN@̅l]ɱqfk]t[ c+LNNǯvBN;bLMMu|RALiN٧pFI:rќrSܪZb3,BÏeYW*ݙ ?{ai#vtڨWh[{vEP 7Gև\~QE/U (\gGxk Cy@"ϧ ^*PF=2 ?h=##Abh"!l+}#ѱ[RB [ ZYsK1Y^]P so?@O@:T!HD=̜)1[0ѸQxèuׄ4BuDI]U8Ń/O&萜"Ԏ6lL$YhQo9Z\)d!lLӶ[gӼ$D ,N:AI6۸| "#m>kmF -fћGa;0D,&@LEKh.Y'2 j?"%e2C,>{O'-OE"ڿΔۊ4 Q)X/M,d:E]` zЍZ hJ'2*U.΄x[>7]8y.:qh[ii4 i gR Ozz3ls#ae$(i9 OUݜV{rKVHP6w1μ\Qf6rr,$y_HB&vkMi>8E֖%b ˯.hJ,nLF渕Oɢ 7E'lȌ RB4;EB7L$$\7R֠H̪T4"uY^?*`txx ʵb*k$XL$:]WTͤ jƘx -*Liwf8Mlo.,G 4o-F 4~DӇA>uq?~nɃL$<%9wn@^.pw7賈;|v>gOv. '0Dqlqk#hB`}l{G>ęݜ^>(^qqN:",ptLK/5+, ӡ jd N[fga0q-nYSZcx_cGᶫe##Ciqc([=7Dm[Vop!ӱx_1W6ue 8')vQV&p8y:hZ30-pl3P+(%G &3մZ>Ө3 #(x2kDP_k; "}ƒClr);E8VxX zimmEV਌ԈjXa"RCG=?kX9a! )6CcpܯH"uV/U̦C΄7tb?{i6Zq~X."H FڱJO(&+YPdT+9ł/?Zg7ѵ lv MwQ'TT0NEڂHJ]a pb}c yȆӛnMzOUy<7$WPFf[z[ /tjI F 'Sgj`H w Ni] \4Jt9fS"6ZZfIm(-}RxR2LR MJ-JOMe1oFi[]U#d1^.ւVH)sI&tB?=HsݺMcbk+'t}FD煉˗0dD8  erUN?\ANN`h(JdX Lh5*@33?ʜPc`l*C %“œМ ¬y^ 8d] [`E5w.n} h(;KNz(OB?A[eƳC7Ϣ#_`Wi; /H~LE$0g2/vY $Yf}Z4Ox7Q9]uVӯ%!Nss?)]`o.ߚu$ (kb\ p8_))+/ةsEf|4ȒXotKPٗ8{mcDX˸vAj?=`{ |1{cbΝ(l d+r[0YV$g-Oj']%芛  ޛM>>gy8M^%41^./ O`x0{xY'উ PA*e>_Bɘ{+|,5\HswTia3Q /Mk3퐕yX%VO b55TżV8| K.%Ҏ-YҖ1ڴԴeCg)3;IjgnN pX 5ԳFuѦ4ݐYdiHxm΄F_d{bڱkh%ǀ s!Q'plI6? >`@@A!yDJgiHِ6Rm61`&TL^ 2Q*3” <vrǤGuc&N k?\A΁^CVrUMfRX ֥.O̅W8!cb 2 t^/KZ7ŵ$* /c \ 'EEĸ3CZE2!*5J\%1(q Ɍe"SOw4w=ʦa %ZFND8VL{>G?P^|ԛTQUg:V!Qc)%̻xgPǶO-B DdkN/Y@_'$o(y93d A,.\M"gg:yҥ/ $ތbv1E#6EV}.i߾S#e5 ?G'.w]XMsQ1OQAOg1 xJDw`wsIy iN[U$(;TD6q6A&Z @^WT P\+<$ZI& -oPiJ%ʘcC2`&Lː/tZ}KUCd<D%> )RHH5i]*!AK`ǗeJZ|_e\ =yR8ߝ.+p.i_tpFaѾlL[ʂ "]TS B`ď}'5aTJZXC$ 9"]IЍDZ^:q{>浫 \koV?/sd$r//G#yc{l=c{l}Al=c{l=߁靂lܚɱhj- _%k2{j2;$u^f:?f>6&L2^W:\Xdz6~ok4gVdLX]u"Z 2_@k4#K<h=6\_0keZxſ2dbջ_wEתd"\#F Q8o3hTNlz1j85Wi3 hIY #39k'W (+DqF'/kk+ c?g |Β8?{Ņ.>QEzaeaM_X#y"]ǻInܿ/}uE|&@*q8rw+ .MI4B7-m^ ~}་?w)n?"[gn8Zxn[=jwpIw C u=V7&BF 7~xǍkrC]q<춭r;=\Ve[GSE;(t$7ĐɈX * ߐG) C܇*nu]AzKc^ Y Ǥ{EUu}&K"iYctC.?[kqi)&`+".J`Bϙy5UxMע# b@xPL钡k4XPMAJo#b6W,ӇyZ-jC{C_3R~^zU ?@a|L _GDA:?翷.^E?=m_/3{ٮQBmB9o:R}k79VnY?e%hq㾡9q{w"cBۤhìOl_"9k,g? %?ko&ޛx5 "$mhMnx_7\F9\-r"@щ\zk]N yD  K4 A.D!)&$BRzKJ юn$#L3iBtEy`nQ+ XvNwm& /[-_vOn >LG[s|6+|s=Yݖh.ks8N\:F<?lZJjvχ^\?lBS׆[nYvխS LFk-of1!v,/5m9dܚꊄ-X crPw0@O} ;i=,`@v| 'AsAzOjՃA}F.i<4PJ]3)q&gql?ԀqP;`2MԭS Osz2MKlLӶ[i>4/13Y =~ M<®j8|N=0~rvZ]Y@Pi>mYtjQ*lL #qjw۞w}2:&| ja|Q@85Վt"v'-Op/؉/ #U hKv.eÅm~mW)%?^` !|sv$[@g LxWߦ wӻ$Ckyv=ne:x@8-368d46/^!8KAđ3ABD+A]܀?"sfWI7AgK̈ߤ8&eprM'5C]@; *S+ouhة=]C'b&pHv(ZZ57i$E3X.jEQLYHم!8 P:sA"={?(0ĵKW'~=* efArɠ'c.PÅ.lPyCQ ߛI //E[;^%Fd $0 OgJI@Cak#nf}` F-)fHw7cTIdb@̇9醬Ѳ"a/ 06?cMwGMLNS ޭ O% R8.CѢvgނ~M8U;@IQ IpYf2'vfp Gwk7 R.{]86?{x {SD&=E-Q,Q)}7^~o὿e3t0__A~s]N(vZO]S?_8k++i󿴰0 /ί,?^:O;ld#e/ί,/,̯/?F\604\ ݲH+JJI02>@>yݺViQK[jΡkA TVlZueln4'@wZ6)VZjYmߒ+u곖#>Tt0|3"=X>:nǩp`+wv{鞴=<T˥h*/yS[T±@]ojn#_ x}r"Lӆ{tU` ~`je|>],% }JS_pjp6:n*,2' xe/'';[x>v$YNE7Z)BA#iBA[@!o4!8yެjoܸuܬ#e_X^[W()aNۏ 8]:9vk(LJajc7n5=dg>YI}G:4..mK5KYW!dU !&k ^B9 вg6p?UӁ9~'S&W7m?MýTnnS|ݭ{Tw粬;SIX~C]bvTɈ'o\B#2x/vYĻ$ l ].k|ݎ_;99){N*M\שڍ o޳RW%mf>XsX$<(nx:BrFh4 b𑅰5/g{][XYZYZ[Q /aI_oe>GЮE0eTj68W[ȷ۽ۭgٓ8(#6@edEűd]P^Wqשn]$.mv{<,KǽvزbkͽbK6T:`$ ԎV^P*=*~zAd C|ۇu47XrL-*mHmh@aCaef)0pҮ=J P4FaF :nm.,XX'v&V6"e}شI`ݷn~sH2N :Zo7|kv=?ְE7o//~+85o>i14la 89;`>`aקHB  2yDRAG0QfDAdz84[)MrV>5m(0-5lL"WMskad$qœ{7nzN vM2l$?dz?8pS7{O,zP|nɵ“`vȨa.RpkQp.ޙBzi40e`U +n!tj^͛{3' ^@R=8Ucjh.Gm _/10 *#O=;;@YODK%>9X_m?ܺUݾSؿK/@\Q$ :h!π6wmM~)I10(#5Z 9`MM}I ?1!OY=uωl)qd]Z{$dqWo;5*rasߦݖJ:s%Szvf_k{4u+)0{t *luz|vl}SRp: s4{:v[FA.YMmcNA[B҇C7%o7G'3SHA~ `hٍ/*bwt x^BX%6GDPxƷBU.=<#'Ef7jJeju-?=6׭=˩̲uHo_ h>߬`Ѱ16*vD-Bh` ?~X| X__t;UcR\tIyq?_|fh;8}zMBy_ )Vw -/9Vs!RMpKam>]zͶϐ|w7Zk[N;mKcQZ+aW:M?J%gJ RR9JKf\nHh$TN ''knǜZa`Gq?;:N uawT6EGMzP8RxO.|pn^׹{skG!uL8Y0,fDŽj%9>qnHdVQC}u)X'XDpqkǟF\n{G"#6eY9֫WYT ֫_>`zKXJ~Ud(q<XҚ!?;pOPQ>J R/i+tb[4{imZSJx"v9xe(V}0 KOF[uj݁/`>]Ǫ][q<堷Rź+1HIYʌ+჉]0Z05f6f#eI.fegÑ鶪5;N3{{;h+SOHֺY\;fc)|p;O# |<˘X> T# %ܛL>.Kvo!=:?5'z 7!64ܷݲOyW~]P紘sKkm`L(4mp觺r/ס?O\ћGD<0' *Ad7? ]N;,L⑌Ph<}v-?: -9ZZ/>quL uGQD[0^9[eW{TJxqD6.`M!Юݪv8eVaJG1s,i鲙fm# gG3b|KR&Gc]VW0/X6,\L(p_1תm ei5q{ӨX16U*:Bٿ +v?7nFzFD ?F,kmĻ{c뜧t)ÑMd Ė (nn`e<bbOKKcHbCZd.MϟW*;mY߈GsёtTtDEON^pé`H` l;8qٌ=CuYi (0,cфlnASVUy\gbZN}ḳ7[~q1crhڨo1&}tG6n}g5:i+u0.mA~U})g#o>ª.,H?-"x$ϝ'CY;Mp|Po+U$ x>|,M>21HORiR(}ymu5u}#\hfQG漈7/ۘin/n⁷Om=Q:qڵڱ{#8+xѱk}.3 LtEHpM<3w`-bɊh,jkPv[X"y"{ZPSuJ%pLK|wsZ}˚& ]s1 twm9 Eڋ&oun  C{@]됹[!eU1POn6`7JAX ;@2_XYZ# ޢ<zLRy\\/K ,g"u;h׏EHpxWsUă^kE24uZkcY k)}(ۓh7z14\T"ᛎhӾOn: an6(ʲljp:m#[Fg 桝&)r{eId'W  s~*LDJ-I[DժyMY/7jy8(T>H}eZ -֍c;A;2tZ7#bΎ8i]\tGtL`w"8} hҷ"+YD ^" td)F"܈-`/A{ J x#H!>k*Oc F(fc&Apl+óNQEfoE4[ %Kx\xV9GӑS5UiDv9XRr.85}RɅASWj{fV s`QKeiL6JUʁlSG\R dDp&W(JlXe3&D~=g|3YSqy# !FLL֨V@T득u!XFJaKu84R{t췂)XP>qJ+oN+WUāxRl c>?}k*?ua@ ik Jc)jme3C]K:_g|ծrt<<><>>sI%&+4|]0Г6%'\JR9aG-$UӼ%|\|їTZMRF@ؿS]=B6t M sGuaZ @ $@6 1`2(2.KMنY:VoK(JlٚgⰗKIY@r`,|λ/=]XÅΡyjjVdbxuY7J#- GV>Yrd[YSڰs յP!3ݬ'Mд †#>GS՚``44OvXB5LN(Ov]l"ܲt_b+>@_0qŗ` κ@Ϟ]ơi`l]z=u: On-e5oj'Xt?7RҩTsGLSg`W WjTlf'i͔lG *LR4- XԜIFVDg1>y:Qf[ggaXyy~ehkf `Y\aԮX{^JIr+aHi{4ƞZ\72Z>u4XZ@XDRDc%P9~S<Նb]j0<gz.%'ӞTjEX#yIV:(MG[0W)1Wx+XܸTǛ~ޮ8Qb@x{,A ', JbR9<|xkSLUtv#صg^s~SU@$ \J*L4֣z:k61H !oY//fA4tK"`NJ3?=Sowg_b! ]HچaEl:R>5"eL$mI7Ƞ*2=e݌ɻp(I䙭fffkfSwj@5|.X{ԧw ܉V{:4?Y}ݟ6)XܠU[)Mٻs$0 sRWJai \1U~<*?04odžWX8E7/ ,` ~o4A""K&k \o`+ă_`6\X o#=0 ߳*6؉*3J#O:S <\#L dI9BqK 5ke: yR[}G:v3 iM4Λ||wNo9 pR3ac5 o 5{?-S`Z3&pIF[`rG؊?ʝ1Hl_;^:}bZZ˚裈`o1O=&*dDbJ|85!n~)rQc6}GDŽpC~_vGoG 6N3a~g_UZJ_~;?^L7c?t]Z?}}X̶8oo~[WD|jۘo'P!H)BrRNULWyDr mHyrp_IWwR pr4 e"Yݜf ϧ8*D Fo.jCι"7>kjJ%h)fpRQ.65]%@'f-f^V{4 =o~xFgxϪѓdeN0N+x^tPskUC1I/a̹ӯ8`=7ް>)О)XȿmT̿mpwǜde+AR |섞u;c4bARXB$"Ldq#5v Mw\kLぐ#[&F~F-N>#7c; N/CLEc]MYx} qk8^R K.-ES:xf/( wҏ5?Wl"oH6_7S6_ uڿ(7H6fnUk>ޖG?yFN"o9wkn&JRܠuވu48R(<_W wt /+}ptHװs`\ NoIv[)fɞ;FJ vtE vjRNpS;`Z Q1,մ !#1EЀ|s8)kU5D1xp '[mBIFVdd)-rƤCd>Tx6ՈT(^ze;E2"%]h3oƠN̬(8<8]JƘW @.PՕ}"951,:3.䷺fp1nA }?̀iMzN)s~]\1:|zUZh#2rғ28q&1όpw(p; *c~*z$|fa\O`oΡN OO`¹fGE;Di} :n!+g̺rŢ~sª+K3ХZдaCw9f5@6%k:lTo5_l _#8 ١C;@ m9j3:K%c_Ŏhi/h/)5dHW?[깒JNhP j3ʬ*'ײ!"63N+\smydY a_eVs)kھo<<<^ϙ + @קWF9@#o#r2 :'p:HM~)"IP9Xn!+̊@%p| :񢆣rrMS)qJ)j2a*5 S(HyNDevQ]Pfz8?ÊlgKN L 6+2<)ψYdAO5l)IMtDďD&!!}r2Mx-k*Ep Wl?Vsa6&ʚ?Tݡt @s (k> |qYZ<(z%l <_0 ^Zּ}c{7R6afe|L\?cj\u*'5[Fʣʺ_< Ξ yb{^DY[L_ZovyӳPl(k} T}V2s7SVǣVJ>z$u?*c)I'@Yæg~y WE5\GLY ߦ,=[,BʚV2+ S$#e1tae Gb /c?QJh7{#)|6H:Z2׵ܫrʺ+f~Wc7RDF\c{d'ѩwva`'H`昶ښB723,</a-ls T91HE#ƿ]UԱ44)GC_`¾8zd5]vfUq&rqΪOrx<ɦ 󜩿l'tO,0d͞}'RHMN 3\T RvEUN.iM0IcHdJkiI4FԿoFw{7Q,?TgN`2tǟ` s_`\(w?^o+orIOhOgIgv^Jy/Xt?7RV9&VmK$#Y^Ϲ aXA ]k[RtQMcg~ok3e=gݏĩx/kOۖ~3հnCTVz} |PpVBn7PcA_ؖ2P)rzEnrzt0x+]:C4AkU6+SSdžb(Շs8~OD!(pQV18iG(s+^V{[ounSVxE#UȫjHXTȞN%wo>}pJrl`dzφ{1fsqq*?f'  T@lhEmjzg֣f~tS?kbʚZ>YmlѦ|*J j4յg'땃:UL PB:[ֿxV2UBߒFJ7ؚV J5jXR8E^stJDPpKNƦ8yNoھnl$d=ʃnt{3($1SsP0cXw9tgP3JBQOa:P sMAy)UZ LhwϕvZT~oeoq:p,P4ue/FcaC%"'?D&3 Fyfp. s5R,^FK 3#ȉ¨ˆE Cqz)ќ?}:/qUEE>uU|G_\cR@C+Ēb5 7:.V0LshU}wn}2HLĪA'_m"ZN2g o< ܩ1p8w]c>2u.$w'l`̾=daELH&4BK[<8y]V>xPJPVk҄4Q[/у+O4.s7㹩-W#w:9{~0&ͽ^s'Ց&W$,in*LGWdH-flmئ,Tt20"%?;[͔wzh NĚ"EH?88zwpzp~x|y`SKz҅!z+hsa}=1F̿^<ψmygf62][ an',m wC<3F<)"8G9ףʔD|})i`fQTDV u}#v4#yU>" <"\X!M;iG9lbMep%(-.ϋ&Ҩ^Tj^<+< QtpQD<< T @'T\ݱ56W ٔd)~ w$3{6*ʼnGU$t~rr5N%rz7gg'GgHI٨i ᖈWmttb4SRjy/,cďUZ[ jU1J6n/Z}JZn?7SVM"yL}4Y; _GU5>5l&J#=m+G7w481/4O;:"M=Y/P}`W_ 0աaÿlKW:TfB8 _?\ێmړ UU:?xd0lDKLb5~L CCBL s빥k0f\k9o`F{ijCgUQeW#`<=M 㕱kH<)U1 !%V|ߛ+DUNȦd'Im ^ Q@lA:؎U;,Kl՚jXw,”'~ -*Mc0)"{,~XXVgtEgsCo7O.׋aלFtdKÛsSç!XD/\}ʒܭ)Op-.(\.p tY<,yG1Sj5Gr: Ln^U|K 8<8;vIbv啘1`3F$k|C qE&H$S鬌C_]FPb+W\E;EWAQugL, b thxɹcJnԣa)0/OgOiLvľO%'uS̀ F¤oѺK)٩<W5AL6vm oԐqq!չL/J>Mۛ;Ҿ<ó)VerErBwb&<|)^䬋)m)WqtϴGL bo^bѻ%[^'SZt_V@Y/<͞6 (OشD$o>۝6w7SS'"D $ʕ9LTdʑ l vv~Mrnο@wz D>bXV!noo1_${Fg_L{_~=~E9f'=Fð0BO2/N=g{.Ɲ7qIfQmvRU~Ye1@?bCUzg1pObinyTYk0p8B~+?gJoKk)?Ǘ'g}GT2-UQD4g)kZq|eQ\_W*Зı!s˺VW۰J*ڍVBm5)_N?槺C?.=}R tԨdt"+[G s_RY#_]EvLY^^s2.\ 哮4R$t]5j7m/Sڌkd[MA< lSe젏Ц gYZ8NRRU&#d@|aOs  uLmW~H>T@5G![U=QgQ4n9w\D μ -"|eMN~)k׃ j*OqvzhO4xT 5M9;YNd2^qߘŽ4| JqΉ3: PQ}*K¿ْp)W<ҁ mr3~-? > kߴ_Kh%;6F*'-Ձ@Jum+T@%ſn:Umzǧǐ `:Uڢg*h5H*-~βvM˺Ӄf,Z͔ͤ?ozK8).Z{:u E3(klU!:[U5mze|1euRxۍGMD3/<>jkGht{oHY/yD -`1q`T]Z i.hKTZ ).)lJ_ j|i#j(NRFz0sRb4̺ au@mK_Dw|ٰtVѺ8Lԡ (Op wPt;)kJ< 9 ,Q_P/>AyXh45~7)xs8+aL8Ҋ ϸڪЯ| {T4K]32Q5ƶsUV?jiQm1Sk7RENo͔ծT9*7g^h *lZMb/ qruxuBw-i+NC)͵DDwbAQBT}øa0*=,?H@a Z֦^` Xtzݭ3eWY F>\\ӧ cM]pc4 qp:>56\Wko~R=L##~Pd8nl9,)MlݣؗTӡTuKE)Ng)Cwk?V">Ee~+:q~;)PEfQlW˂. 7!kUmYxEԐkcsA^RL?[@w\ ߁qr)O,7`ۗ| "S<6Pu* P5s&u% Ei9Rv. yh՟B=WA74= G#ݮE"Nujp|#ЦQ-t{[o#eYd`'tƸm%m >(b,>yUGu !x{)g뽅|>Hƃ;uvVӃzfO?¢2 D7m7{ͭ$"rF%ڡ)*0ybx#Y" ڋ`"[b:.HX҇o߾Wj:VdZo$RsGC"e J!\=`ka2VMD>O)>ӨN_m\5Ay ?>jEīS̆aH*'2ʱ96\@e?0:UF^m&3Z fFNXsSp8֔CΞV8w52Dz80CycJadbb7K"8' QCW3NC6̗29?`v~?.9m"Fyk1_"fs,R :S!؂ o3YNvW>0g V! Itz#>He8naBȳ7S$ONj>vj2ʳb8[9ͽO&5d12@ qXV0d׾tS1^ ;+SGyiUGM/}A)0g"S{k^7Mq_>;@}O,%&Byϴ /?puAatTH.t9ڷbq"1mQpL&2卋pk %h58>S8]*WA-ґ==_fbZڰFTskN (2Z᤾k79pL$RJN.mpsB*seb ę|)w,@*5333 Qbzy!(%7P L˶&z‰5~7J*7(S]xaP'U! ^)S .ogJf*ܣ!8us-x]s{G~[*yeo~Wq޿Qʻ 3ӽsfM(VXV3RF3{ =0W)ae} G<]I+ztktqT(H` hsSJrav\)GogkDWk͇k.S@9R9hw %TUo(Ž.eKE"?=\DJI5,)@=d NQOQ}0* n=+z{'W505hWmn"o!4;\uL aw֤xؿcCJ I1Œ3XX.;WՎ 0~r˃+R98?:>8+T~<9?J;hD{W}+Ԋɩ`^dtN(`43<_-5 sإI YH+5g-T5Etǁ-2RDǫ$KR>ezHSS #[αxw3I%V)p:Yo9XC2 v7)u-DKq'eoŻxˈw|s 0衊12# H̑>W(N|J`W jD/ƕ>VUUzem=6wVciom忍{m%C ^<+~5*˫{xQ6|KսQ%Q7W%sȉ#æ 2ۭ^#"[NImjyJ2ɺ ui.: Mu2*R!"/;9tȞFO1yQyPfM7;w[$I?Q=K:^Rg?u|Tażx> (kSG7?U񅩘 ,%m'P*ǧpPZ|&/C=e|31&])IVJg(#njO[tj|'ڑ>O Կ^Y [S-cV Xd^M0p9<=T_ $ aóhvH1N[ ]r2#?@˱Ǒz1!ѐQzp)ngT>˹"&WWvyZpOy=S|$gٚƴGNSuꝝ_!?t@g1t8'a3M dmPKKEiܲt&FyM:_W@V*_3k ;d ?уQH14fwU_h LTKFhO =b)'p{TG-p(M= LE̶;}jX{l#r@}١F7W ?Xx<~6BZ?e)5~wƶU>ɂ4Q>:G'Psڿ&Ymx*eani7a7ߛKIXұ ֳ^zci=id9Rlˠz3um{#ژMz۰Ѳv{-Dyr!K䦾 o j41!J`Vu}Ri`S* \0̖>ljDR"R9"cD,s'MqTn}Q"HM*=>u<) Я?и-F1th׶m]֢BX' ʕwPľgqpi[њ!7c蜄=u CSk[Gi'Yg#ee|+=w^NJ uZZ;γ,<_;Sd5W)+=XGv#_(+bS1<+*ix D8}&kH|!a72!A FD9"}~L2([ݷ^hé;r.Y=7Scꎭb0gWv814H. q*k4!r'lW>8opsiXI,߁,118k( 0[ >: g<1 'g,GãbpbȦ4@|U*P⛐AH3 (cژ'ZJ"DA6?xo@F־?9"p@ɺ5%ۛ8[Pwn{&()tDJtrڲzK2zh?/?7jdű$@70RV38l *96(^>荿|oPNEU0 NuyT?-`jS5,:]Ӳ6R$+E`ծ Չ0sߞ}QtAX=v\U&Ƿݺr<0RRӱ"SSp(G@Z폞d\N*@X2rl;F8(@ڮ^5jC{!a6bM "gRBaA$j>):"JѶS!gg0Tp!x>SpHHv8PA  Q9@Vf5 ~J4>"0Y"78 8gR9ܲH1!4כjEQVQ⭼]7IYo@~oPh4YB~lφ_gО"Z(^K-7X0``PNjIuI5OJ6tJ%&n51 v؅2kU:1_Clq s哵sV0,o:jCEJHrTLϢv!_%TDJUR/bAo !!FhU_>aaTV(y0Z;$~ϢvT/޹H5ϕ y^IRk Zii ԘӒ惎"|\3,H`>*/+]/_ameEюm ([U2>CQm|cE`*DJ[I`@:n%$J[I`+ |I?44մ-= %V(߂m\_3;gD0#<`KQǽ PviU8f$=Vu-k.1Tq8e9K+!xkj(cTI~o9{c{7Sv% ΄˙;I^Ř˓@IB) Ků~ݵ2({SFc|q8{-9IAȀmc?, [( {6gPeuה,14gR\'HWΐ<cEشh@!A͂ Ɂ-Ofٳeٶlq8O|9Dy;)w~%pOpit{ K[ wb>jӡ=O)NE.9]y2ҁ=xgTG#ؓ`~sÚ"cNEFh0ˀ~@XEeWhxA@AȤRKӾz~29Tɾr5٨M!z@*zWoV$7iA%#;9nb9JQI K JEMKUhOUfeTS*GL'.2SJW7c.)&Smw̸SfX)>v>Snr̔&z_WWk2 S@4{NcDI}1T`쏷ծrD51,ރH4莊vºUWN$ca,Ry^1'iwA}pQq -=g\tl4i/ԛ3֕*1Tk4>qꪢF1[MUWм ӗ,0~ V&˰1ޡyN8 wa!Fįx9̱=>Gp26EZ sKCߣ g=Q{Ey)bbs^d hz+;#TFL,Ϋݑ "Ɠ 3ְ+Ybϱ&8 ]d y`@˜[4 9RMp/V,dr-Lu]Exj09ECe:5y2~Ǔ?ѭVTֱm #5Z /`h'0B+Qd`o @}%` ^& Mb!Fps<ޏ֋ ɻXW"f5-75bPp3'͟ X7cT^ eP*$!0 -tIݱoa-5MdXLN9?%8`X:v> Fw:8Ъ4g|CeX#s zX01.J͝j6G}}ƏU/ "0uf~G>>qc8 L9L5&vR;D 1reS-{ɖ ~[a?8?w~F]H<Ev:k<3)")[t› KlJqlҎgH0zXu,d쒶`ⰍihdpeaA^ƼQ";R^2bRaG՟ƴ&Dn0:}z͟ZS7Ʉ.pM S.g*=dX77 DnMzeq<_rL7'. '걖 ɵ1 #wvi)ruk=}:}U| [˷E~~u䇖2ghs%u7%6/XOao vS@̶ܲ^zQY_R Q)90c|8Ns  !ă=0HUq< % 2O. @QZ,N(TP7L}QPb=P\,N,VR9JhUwIiOuuO`r'32a b2Kvj Q :%~NV݉u&|\ 04i+83D3\ӟ$voP69M>o[R}95y|/0@/;Q-:6u[z[X7=m?saJ=#q[O;A]δK<ĸJwC_[qL7BQH^3V?uhC^k}rZtэ3y gEYh9X9sm8Unۄ@i{Xo! }^yD78F69!AtE:76zP+#Il?]em_Q^{m,: ݌LHA`ޘ|R&?jS[&XxQ/HMhMq~;Gɠi."DIXum8hHpQc zddZBH5 v A1*b$`6X02" (5K ߂jhXǔemp4[/=FԃT]U0ij8X커;+jEzPkJP=$6 2|C5h8!sKHpGHp'I|P9'4r*s) 6 -WV87F+<-[> F"kru1I$d&gA%h8 V^k8ߠ*Exϰ"IfDڝ=+ls uE{w(v?x>0p>0~ 61RLkMu0 g!t]ʅR-8x+Ct^bfET<+101ubB@RX|nUʁs PV*"@Hz@W禞R܍5C659XZ(e NGkfUEx4HMy콤ڢrN4]7U˜: +~d؎( ĥߊ`>*.+XF-,VěCQ[t**sP D=r@2.An~77L-E1@ 2ZEeFS#%tFW^KE ^J[fRo% 1M ]V)u{I,Q0[yLQ@>Yu杪 r9fW^g>%\+Wї>Ɗ("SkwJJP*Hy|.a׆Nj*)MN\YǞ]ʜ{ `e\ycY:T#a&gs{4F233+!88֮7W=3W"eAJHRnɰ/wnH`KYw\k tO@j0[I?*E?onhZٹ (^WrKmM{٥b$,McU*ZJ hTϻ]- @:)n4V!L{&Mvѫ2Nr ;E{i>Vyxu /Vsky N7RV̹ռ;/j'?[d6G1-6s8 %%icը%^f~~ x}K)B_[5)Fg?շ%'F\.9aYr☚Ç@2G>ϖw'!&CWCxם8uHGw8׍LI0IY ɗЭm^lkR}49?i ;a5;΃Fs$UG#L+,RJ*`p"GDcllN {Y]:猦g?S&wSo xbWڛڎk'?p:'Μ}d*/D 6AT aU~ 2{yVO4z8Py4po|XY$8dcSxąI&>t(|nyd Zw20X2o@hx~m M ~rsY/XDžbCs' PTy:cF<,d s@, 23S\g,W iJ D|6D0R8ԊqXuRGM+* "6\~9*FLq)TB plxQy;Ȑ"nW5sha݆r'K$Z qDZ%A}QoĢ%7iQA%^g{_BY [v3?w 8`_3x_X"1Yhفm'[m]fC'5E6UFi@dcG`A +)r/8:۷\H 5-hêaW3j~B=jMN Ӧ*&ڮ11BUQۢZʈ rIgݠnTwYD1Hn$ ۃ* 8)L jj7&! u8RMt3n"dp]W*g*mn* ¼`Jl3b/5G]6>3e*Ѹɽf80qe`zQG697ZOޕ> ~'B*=i: [;C#Pv:MC$mϣ/u< T;ϼѡ*{}yoz4Lj˲F6:a"BɆϼWgW޹0m j lھ=;br-]P:ʠ\$n!:e]:{Mܽؿ5))c(<H2)7?c8"c c(5}ɐRhǎ5LC`Paݩx /9mV2IGLCj !&~B [ˮ/9wfݶE;bwf0.:A#}x͓˦\R*.SU%H81]^݌A)Xmt`^;u78xJS?dJ *-δZ}QW\g<٤0^^ʁ, /ʃeDM˶e&'{qE~cn6Rߗ=4L«Xg1qyɌSƜZ^X>;(j"=T>u{gI0SȷR0h_&O,sJ^T(Urr&*?O> OT$b2`|l~u‚/v %9 y*ّ EϖV>xlg׍ ܘY&%ʱas`QoSC.vf+FŠު,Dy,bJ-Qx-1g;,#dx</Il>;@@n}{+]].,{{#o iK!;DpT=3Q"$H"dЖS0i5A}t>!{-۞-oW^]0o&g%vC\@aOm[&6}i6Shja> f^`Yl;%bvc?mnM-\?"Og=$S*fd0 h~iE@"B0}RE+B5Luh/Ku[dmI}:H@0);s?9|`<Ƅߓ#CI祩z8|Fn}_Q{UNحhPO/_z~8QG:W_tP^nb<~zwAV(A4 K/>s-Ӊ2PK+~mh[Z 8k SڢOHﵓ^H{A 7Ř^҅ Ay WB3_7dYDTMs1[(&#/T IƂ2g(g z`ѓIh41 sWz5FN-Y_GbHܼqxn8[ 0Ax3c3%MW|?0IQs@4FH LƜb`ijT\,c;HX8@{ p-WE렷i7Uvh%vp1s F*lNSDUYh+'}KY qR {\0Mў?gSG܇R:BI @RJ;S u<A]TrFI _(=,pbpEU)TPh ̷pՆidOC.zZ?]?",ř )?|{0ۜr;qVhv`kӕƇ Q5Wl>P@>i5ENVJFʓ$O!F7I(F;)O_&j )@28: 9 ~"wWWL;ӇY]Tz%Q FT{RJYȴ(T-6\1(!.Uds'>V|&_"1;$|OONN>HzJT]1I^# *?/YiN$TO21o)"3qU39#c 6!c9{R6uPA>0 :w;#SHaȣզJ>Eh$[$iI@# jo1ߢk.ч za]yR/s˓?|}⠳@>J?[FCA:ahreؙU bЁU( %N^WG]wTDezrM lR2+ GS}ljk?TOP 7{~KNTC?KfAC㯲#KݳOIw۽m'J$+eg?%_eHp֒%(([n)koeυ `{4goيi[}s>Cǟ[/+2br^(__q_iߋ[ը5ܗ/X棚iL?U+j?n{HA?;|<$<A"dlԤ>hD^b1ڤ촥pš,֡-܂)LAF'k`VFjgh\G CWnݽ^tK,x]l`AΊLzVI]nnZFcr2@-V_oj~/ Dǃ&w%h￙ HcP_'2枆k4[[#e%_ "v&K)k2[9_vz^>۬#/~s X\U@$R\ٿs"9-LS^ʵZ ˉUv-U96?p^j.>_0U4B5n餸OM74{p/&fn Ѣ疉2aPV͕g aWcHqY-qj!Bfĉd|;zʃ ~sgeL* BU,U`󦊉৶ 7yDD-/7UL#2GF柇em"kfa"6Jtv('6"qC- h s<q< T2W5`{t iEvSYǓw"O`S4N'%Nqm9KB?CamSLv_]q۪ya;pon7h=a}]o[&*[RQ}D?}ImK }vFJ==+=Da|;T}f[^DŇ%`DnϽ_by_ 7eyxSRJeK.Ez a,J0&&\T;h_ܹeb@7.Y4}QW!#<)oK^`:Yo6)7NMNݙ `8F2gHY!x`'NJeeE?54# w?/ P4?o<_~yJ ,||6(c_p*%Ww60gP,`~ .Xhrvcۙ;Uei.bLȸ}e.]Jlݳ*ۢOsF~K(>~Ul32gw EЯuK͌p`ϖ1(~k {SxOa %}l 2dD@(ijPD xBM}GQ,e mCߜ c;4}Ud=BQ$BS FC l>7u +?Q+V|$m[z۬ 4d7m׆oc7rK2J-Kr(u52@ X'Y "e<Vڪ}ŗ!rCbie6XXcx2!0 6%@x~'.p_u "jiO&pv\%Ov)iQ8@;w^A3quB+E|oΡQz1n_E"S` u8bdB}i:͢8cBU^[^Ceo<3M$",DN{0#@ъBQn\ݏ|:=]Xa%D'L†^V =?>?Zn<' v)ϩyϓj~GD:gSWm= 2Q}]꘽BB)W/.~pyqq9BX|I.@?o'zFJ\Cq5>;!,eyeGΏKu[L O͛\',J6$Jzo# Aj_JQ7 Ut 32fQf6S1c|:ZpH*G m.?*bShtь[oak\OwEܧ`CO9y7R:T70ewvX_,6FW}ʤ)5а7e> 5}N~( j>(΃?-FbT6&B-A*O5 `]¤3 lk{0ɜP M}p>:FSGP{O뽿~mܣ>+5~8`/\}v?I"{iLsR!",Bd;ßXI fk]i+3[3 US9 =dyD:TB*2`c-@Y9w MZY('0ml/jw)x):*m@{]U(p٦ʆH_HW=o>= )ո!?Ύ1ޣwDA,F -/~Q%x԰>)Оm$hWAp |S# S͟_OØ 1ޟ&)|cvT!^e\X{0PbOL:pF+W4Lsm)WVC"Ⱦfٵ/E)o4[[F*_Upo@4o<5#Ig 8 5 QP/'A^a=7A2&i5E ô8`^y]-`^J0ȓ`_YF/G y"p˶ӫԐƫb—1c% ]^ 8@Ӽq1q>\(FLႠPNA/X[q DQp[s%FUx:Sb2,Ej&?t[[c'97n>/k|a1*4/3UReh7E}␘{dP<3z.[ )_+a"TTպEl.Lt̅5Rԇ.+קٿ`Yq}|^7b'o7Rvo^)j x=ncNlHU =v@Eq!]g:KzbkEJaK $6P#RCJ4C̬V( sK ؜>]e$<`M 䳉ޑg[e "QIx~'Kd+?F~&b(w97:yF4gE^x@CMy{SP e5'VBY7 . >zwد(E{nYc>1pޣ!(%oIĕX "ZneFS@ +yOΏ/iyOG}fR1/g‘td# o_U᳚-~ڸ(2ǙC=S?_UPN1$ULX'c!a*ɋCDJ͞ k \XR;ǚ$)O";y@9f8l':{;ȿu<ۅޚ{֟_l<|kYj-d{JTF`r۰.lb"w m*K;!~xaK!9|I5D{q4 Ҹד. D^;pH&)Aږ ?( ȁu 糡,\pmy`ǠwWlbW'GuQCǽF kjsة}W<4hB0?2C˒~:}fvK7ï=Zx.~k)?5 6H "S?N AU SH0$Bic`O/> (TwU8fh$2+ b; Ӧ]-޳1J\Y`1@K/ke(m%`JJ2{i5:9AT܄`&t8.`0ydC)9Jά335~t`Nk5Fk̊YRVosVGy]Lnޮ%j˝(<߶mPIv iϙݙjZqI $-T[".TeR+̾24Os#[s}Ý)f&zߕS(R % ya#"r.A11 .1<6K+9^ݜ\ QXhiyp0 |1ՅQ|P{+ 0_)HD<ѭ#4&2%CB pQ<Е:UKJYT9EWhXEy5yMg`PϛKM-{l! r[!0ׇWJ;wXqXs\\TLhy~'ty*E/ (Wo_)jŎh]-mA_(B^{QoM&M|O1E;~bͪIˡik>CL#k6?g#KbP ]y~uGVhǩW<]\Q` ?8x#nIJ`&:ڛ jF0|[b#5=L0) `VF0γ9)0YгDڛ ġ"#}>#%8y60QorR`"綁S_h*g 䜊< L|09TfUH/&W)nIiu!> LP& PI cI ̊&:|"U!}~%[\( ooRd*g5sV'e"㕍ćT`cRf=x1Ҿ\Ѵ Z&zki2 9t9#`bH=O S`g6&fE@kSZ=U`g6rSd"5.qI*#R&˰``#7@GNH$뎗mm[e5?Ǘ8(=(kvfJq ,߿5û1491 ˫$= NtQoR5'ō"r-qkkz@<@7ZsA@{-v掣aPrR^ ]EJ-9E W4\صg0M̏ԁb8Ats<æ`h?4TНc|8=:9__R_;ʦz:+#tLnM;}q-OdĎz(Wl ~:#HPبMumnP ai[`sꘆ>uR]~?. `?W.u87/qUG~)i?-=r?0 W=pvqiמ;odž=W% gtβnDb:rh;^PۂH?& 5~bs:} ?gߡm~YM썄qh = O5+AMC `qeo?$..^k + {/"XvrHG%1"L#'"1˦{j9 K+Q -@yE{U[G jo0)kL vRʼny iy ~n8o؎ryprӮJbzM~+őmd9CŠmgyaoh"+,' '0 `ɑkpAhrWMިZZm6C- FWT/YϹT ė[zѨFl5Hv{,6|z3U쁃 CNPi3%D#V85DLU942S*xW>Gsm,#&@_YGȏ_\ ^g)go{#%rO*YTqZ̝Wo0C'rp) 2w&^]y:H/J!u0S0sIСHx|&.8m s%*"ZP#㍇y'Zh[ į=%q ˛P.&"1{j>e&[ 0U}_t<]hDgp@ro?9$b&1Pdh44ڷ<%u+eFm40P3R:LU1֌PT>[GJ<.K$drO,q]lێ.a_07ੁ2ukOi̙)L<6n;,ezlC8LʉDRAX@C#Aa t;qbߛ4LJN8<%fR-x2qL;y wS~#{[#g\ODyDL9( x5cIM(sd"xWrbm&@_M*L/:HV 2"!dx@m|@2o| A2Rp Y8 ː9A"&oxIAo|eYArso|UiA"|yArylY=&BF*$= O IV“zĠ}g&7Zha^`uzFԯ32鍚s KC }H|=edL**|AG[ᘷ!cP<ʰg:[.[y1cxB>-h؋_SWSէzoIڢ٬ Ĩ8hLцV4]3ΙbflG1Naۡ=UG+b`-4BB3 i,EMʖ܏tOoSۥ[4;W'Mbnd$-tOZw#WTLc7'0WG')W\4I^^d,ՌN!rtAI8qP}D/uD``vxҕFO3Qp$tj)#HDF2͝r6w7+F,F !&΍c8H#;0NXR #RzhO41*L tHEwp槆.~ͯ2cU~D_!@E>8}qa`[n[ar(̑Tb.憋vY̠6[\=T3:wACGR| .?L^xh5V/] Kbq2 ={ѯuQoV#z)N>iL49EVFrc$ֻ3U^B PeOs#%puH@Dq}gLhѽ1fZD|MJ0R4.cw]] id%b $z!e/ Xf8Nc `X\LR|e:sV#auY{قV0TO B2V`LՃ5ޫ >N+[#6 ] ͜zFW^|@*YYl4b>؞pЭbOiWT[[W. H4:Fʊ?jpwd\'afHؙʞМWF⁺a4R|Gb:U[4& "Rkc!h kF?Cy4^m>fkkuov7R@27c412jү蕕tmWyeeVaȶ_o11?Fsь苑X}q_ ߛFcuOvsHYWkաKAnʮ^BLD7pr>S޸W+ȃaZovL"bǃwаjd 8 dUDKRgVcGJ\ =%B̨#? &| +cNc5ۍ%n7Ri.V|O3ό"/`Z>MU8qM@?& \385H< OHrƆAU@/Ai 6Ukoi5۝64y'(mJG >%>TxCˎ$wK>31dڒ T6צ'[d6|fϒc̭Skqi+`Uj\ԏ<\iAlhc_F~sXu`qv亮>P,o'f:۱*b0Q^Mj^XF:ᏺ ~bwf9bڏK6[[FʖoJФI[-?H`K_-'LK$TǁTx'4 I5W'zq~1P@%inAjntc҄jA[ʫqwlT枻g=V^?/qo<{__|RӓoU ~J+ClEy:eO=6}\#8QQ-cʴc龦/v"&G𝽅ʘq_o=DcWX=& Υ:?bj⯃j0 _+雡{ &ȯm6D-W1ذk!KяwwtzJ@qum_팜9 'm:s}}vvxUN ڣ)bB- W y L&rBy-__ MZb(_X<:sx% [s!œ:ou E8p]cm/TsY[wc>(&:3@~HY WmNCiT8x4<8;.۴+ԘL،y]h|C]qE&v [iFc`U <$>p5E[&83M KB3! 1w^׎UŚφ\xL/5z2U4PCR2yGl? 0R'@p$TeW5̄Xcf^`2͘# &nI.s;XД"5fz{).%j80+=2xȉy$o4, 殛-7Pk@ J'=ΡS6>>GK~kǜ&VDYgp 蟻 E&}켂1ٮ*.>+oNw"&N(0yEH /y=Dr ~u# !?dLLɬQ;vF%"G-0b`. yT1\]fY -|ZȝKP+wR' Ch"-Y"c8ܗx 1%n0]`%|4KU>7_HmkTDH)Ÿ~wK7Q[1/>Y O9#p:ae>']wTX7@NK(J{SZ>y(Wz +4si/YEY50o#P[PKu84Rq[9ToE$n UOsJN0?'<#5'x&e}W(۩d贓o?;mkUDd=w&JyAI n1a{wNY"LJ ]O " soʪbc$X5l =A0'!wWτ;"D%gdSH %\j @}~ҳ!/T_=ۢxح'CVivznk)]>_e"`VXs[™4߷0Ý*;dŠRߡѴ6DG=M9s|$~z3]%Y۩sOahkI}ׅ +(t5޿5[ 0>/61JѴQSyδDe Ko?lGDA)>V C?| lrXR>d)6bVvﲪʊ'EO&'(iͮǏ-ɑNtS{O_EWNo#e;)cXmS OOwr$Pp˳h{ _.3)#s-pݪo#]eS*/[s"o! |7뿶Jl:[R>[ [{ߩekWc^)/1슄ߢr Jx4XSoG ƙ"wf} bQrw n?! *mB=LUG3;)+|{8 O궷ᅴ)eO}>_%6F </t Y,jW9`2z{ ,?,Cv?57UW S~IF* W =TrCpPBj3=xqPxl^S @}[s+c[ ǂ\P/^4TYSB5Qх)oC^ zPK1"#1^N"ηĨv#=uv~uFqT@6w`+_Rq_8> ~O){.|&4U՗SČ}e`:E: An`u(BTKg_u|~+ :{>|xJg'S0"F1=XgtC&AAyCᑣS`*A\ׄ*">> uf\07Ԫbw>\eF0 g]'K4kFxxU΢/F-F-/~; šk^rL}vщ\pZUll'cؓ.(Z `xp]} Ķ;:'(OhTW..ށ1H)ahFDps!_UCq}"=wzp<zWcVKzmwN6R3UP>/傷9~)uY;AG]WCt]R;C 40 VmV+!!b tBjNQ+: T+q?C( Y'r=m02POOd*~%9U1S'uO֨K<T*9#  iT~ ] E;N*D[ǂlX=Ϳ2p|~ \N޾r-k\߼>H'q乜зِ6rX&E 7~pv~س!PCVB _()K#,tfJ[Qd ed Y +tD[āPpp 1!$(:Z;h?"5>罢>XǓKn;^A&/NbG3հR&KwHU}||ۍ!'9 G "0"FP"cvʑMWJMNJL"M~$q'ɀ<y oϨt_ِB5Aݬ§\n,tf}9ԓrDIiarB9A5*4a1<*`8dv~"`ƪW{!hTވY.y9c Ξ +OmgupWw,8_x#/.}yq6ѕس_V>>kJ6fʓZР*9tdO*:bҤ\H:ԏ͆ETZUE[(^>xeъ+AJЩUy,H"&E^Y+L#+p'WzwW\5lM9_ɍn[ 6ecin⿩KAF &tYBO/)V.sR$P 7sRu))>Qr4O jLy< YYiR**QOmlS(kȃНI?zK[[{ƶe&~HKlsszp~I}ʆ>f ._v=}4wu/Ko7ڽw3e=T6#~6ԏ Uj(A`74ollm:eϛ/ _tmMG?khrbLg#eOĶ/R(/Y V3(M Q\/< m ä#Oit?6RVzf]%5]`H=Ų3} % #Fzf1-y^N2~HY-'#ouֽf#z_tyoM(Kݭg#e[IwGa{_DyG@{g#e;RO|Ptvo~sTZH! 1H+ާ 9 fENlZyo丹kd:R/in*LGWdfl9fj^Rot )zj6{vwo<SիE<7ڋcJBp$[arqs%pF-ZזpD42t CPhV"(ƶP$L-s3pX4'd\xfc(U_N0T'K,XWKBYX>Șy x@4q%!.G VĪ4' &HW(9: b?_-ObѻӃ˛V_4OtxA3X" oWeBrWEXx=F2xf۲jy2?3[>~f6Rˀr\_)Po) Cb_a0@PS3/L70j7L@R߭?PC8 F)|n ]L K;ؘ(#3CËq%Jろ"o({#W5>viSpU刂Fd9UZ*epT56W 9hɶ#SY@ A,y+*ǠT4@<Ե>Wý!o9;8#k*w$)g"+$^nwlqH3ތYTc{ /,c4_k/=8z`qoVCU@*L%vik.OFߚhJ{dMT#=m+G7w48$qv`%5H;d>aKuT,TT9пlKW:TfB1S<;۴'JAU:?P 04\!JGp)A/?s W)2~[k s빥k0f\k9O9'm:sle;;J*:|ɀy hȩD]{FV4[q|oꯔeW9L3T@X0G]҈@SΪ~f0~ؾ1Q@/1砃eiVZ[m #]Ʌz1632(3F]_a*[ ..zȞgOc`ak>LMeIwG 'Ӟ F8 m8׀ \TYP`mc>(&r٘00y ){,GlӮS3l'nqE&*'%dG[`iH-9?9+j1<85rqZb$ {2zsU}h%SOUҔTizVةǶ)qPCq8W'ӋgO@ ]WX'{x6۪o0P H(@N<2Ck9ba1B#*#[WUl/p,aU =Q*^H8:Pe*s!hQE>ĒwschsD.<~wrg%L%:= O GAŖ;$Ps^ ta0`-' n2͘kR nI.s;XP~!^7fȃ6]0vC<0@L .6rbgI-<>o, c$-^rվR5w\}az-lg)Զ&Ȑ#X,FL㉕<]M[- -lb Em͔.O.ny.U$&|zlV7G5ꇉ7KaI4aMF_E֔j/J^чxZD5T&~MX ,H?BXTM<قR(`R}]E+x'#RDrÕ+J>vwLYou_+:odž}~O~ |ΞO_SS1p;:K[jV%gd:aA=m0;@!9Apnh>f[]c' C̙6OVM[r,B@eeDhg[ʇp!\?Ս_XE-k3t"at)Q*G'(EhgmqsRՐR;lk}49EWaoۛcw@љӞlO:-疥HuR_Iup n3cu9k܋Gw X%u^4bK1"[,oTf.Y>@#,bw5-30F_lyjNv޷\ g`u}>$ 5m^! P-$V8l6w芄$ h޷9s;9sN%wLgR?^SoVA_J )o]r)߬rNT 8$+s g͠ ,z&2_@X>-2"-^nq?W! t^+6 |I}Qo}c*jvs_iffif/M3N}ܴGoܾ:)hd )w|$h);wx) 6j&Ag_5@mYi ̌\u r3U-1wuJ7Oq^ 63ä4-&D;8,t),ZЀE_i⌕NO?2HD,Q8l 7LɒWW7U8GtdB#!lDdcP-~ԋARBspht/Sy> |jF}\BG'j <1@ = M_b? v߄_A AR.uɻx Z&zgQa9i1/:K8>舢Rcf 5"/;tqmA QRU*y$Ex*4}+<{ҁ,2cLFhImڰ= &7GT؈;ߗ*/L A#CTηjEnuܠ8 -#a$ɈbcW!v(ZnRZ)=ڑtņ侊+k s?[h`CF_EڍeۈjPo"Xݰ%BF_Kڑ-1FOAh4I#u2n?V=f_Q, Jn9XT4s.ܵ6bQYZv^(>Cxi#NoWXN"Ac?qޝEBwEm7 nvk_5] !3Hko#%}͡N#WMJ;? wF6k )m]0V5&%)weqAp%O8p,s/ƴq-Fr$Te2J8UkäoA@ӯN`a>W&ٍKHB|&av&Xxa h*l"I~LKBZ4 䬬2mX{./` ȋ=|5 ۮ8R"L6h>5Hj+)BPM=)qq.LÛ-9Y8S± hIۥÜ UB䓀phL"0 b#y]-85e(kC[- %E6:ȣP^`(cNyiޞ~̄QQX #ʓMUi!c 1YUvG'b% hCwUmW(&/}Pyy [XBq5VQbpMeG\ bxڦ hp`_)"-UF!NsCns^aF< H{;g[E~K[T*\b9z7I xe1OQ R|PeBSZYbKrp_ fd; "Ml /`{"G 8K]<0@vo[=W?W$pcꪜnNZQZi[9V/׫Z_"Hj[;]#Z4,8.pv.e#93{u:(4ms"/@"s͎:tG QT;"75%^3_JaI}RH><(cv bɕ&^K74/ Mծ& s F)/堸kͼXP;Add;bcWF`NMÛ+zlQh6f@>.^=> if g0xaileY"{ЃH3 vCM'AoF0)J&{UٛN WOHsRgz/o};)P+/Iic# (:84"phm^nf4)7dK9 YXu``lCfEyK`d/xݬ}+i?Ǘb*kA+w{׻ܷ.4~ A.LBܟ?+/!2撞īgv $<7y~7)׮Zr] 쟡C ḿy?dӎErh.Gz0y=HJr3YIAȈEq%)K)迶R?['i#HrfÅMh^>c3;+Q?+P${Q/4]n :?f4Y,są~x]%lEvTot<] ~p#dzqo/ʊӓ d?t37#\`ہ'&@sB65Omg@,pm(2lF[{ &uE ,ݠSպz^3vKR{HUԲg((/{}bɚYCE:Q=xrk- um:z }O1271K!)%UdkC Qˬi?s?U )a:{uib\]Vt7h6Mg(aϟj,z)VR'89=^|˚-瞞?|JIT*psSW_$k]ߩ?ye&S@ 諏&Kx+t!(cy`ΑL`ר:GPG E㒒0څ)jl4ȏ14/ok`A *- =[)↼$s4sœ FXxn4 4=A%h)Kth6 :p;B:΍삢aۃ1z޸X][kBXT+ZWݻb얢ahC<;Z! SUY4hLbPdG[>Nx}ߝvf ~E#<) wB !k)wA8xNoU -$*<*d Ȅݦ t{^qO(|-x$i%ez2lRIVɷ<7&6ڴlF_ IE N}}U`J6v4\)#wQYM&bƆ"̏^r ۈ=o*j\f0)Lovy-L)gkBt6"N&+bF E:3n.H>|~aοyXl As9w4îC%~p1?!sߒfC2xGO?oH4~2tihj4~i*wF*ٗ|pcxy#ms#luS_+rfqj=C`(]ݺv.ҙ(ϧEAg^~9 ;OT筩 5I؝TWK #|cM:6T \,$m|Ylo~i:IAwiXN@ `[ P$< PF 7Yv`0]Fr rukOwO};MSӻ7u3w)YaOv)!zEm{ee}-&x®lFY/TeA*2_졋! 7<7D l^賃tg/>f"q;GJ;?Z.^$e??*0XG'x8U5yudO$~zAXHTX`4AkLFE2n l=6ΩawhXaa[=d]_ՐmP'3|t ]G=: K}W \ \ˮ_@-C`=Y_XJW7󿃤T5]Hz5[-&CgZiπj-?Hګ=\2/K9}Iή)Z-9S؍!%Ro tEqI[MIBuy z&QMJRSU*׾(te߽G;ti/:(gڳ@1”mT, |RP)A^p8 Θzo17,Q ;t3Dƺ"QlyAg؟&[ OC\E4\O35 tI:rq#KDqjdIm_*1JsP)o"|wg#E;3@2J~Ptw;I܀%yd$_-GN8n7.rh?I.Fܑg+ 6"xSf` pFxAwK1Dݒ,H2 nΗ IlJ:Eq8u̶畇2HOX"C{F{<"A$DCȧl )r__ܢulXh@CC#w}7VSWhB Y];3ҟeȽxEf7Ɨ"2E/,DOQ̵3(GeT]|FQePRl.KEJ!!?3gCuk:v9p*o!B 9 6N tG=6s\g)e>L 0vBS!^b2!"L7}kKYS?Vs6D/ g# ()13xQYANU<;!m8FL_R֩ܖ(!gL,f8mO3d$KlJg z4-@&y y[eTgE/ho< 9]0'.NOKn4 Bq>xIH_OOKkKcPB,Y;9-MYWB5 ^ዃ($;n(0ϩ1oRxPPmYAɱ>FM2/rPH,ɖm=e:цejv9\*y =D~:9R !$uvbtYKP4tͪzR`#ٶ;t桄$tMLg1^9: (RSͻ,}XswO2S48+W+!muڈ9>mgI4&5+vr] qgq{Y響WgqjG/Y28<|5N]y0F:}9S ԠY}yqLІ6\́v#A hAҗFˆ\1 W!W'~+).WnXT#汅SACOHZ~"2fsiA@L # vLψid5BaO ]hC[qo쳇?wX-?+_d. 7|僄KfgLp;З{RQNs.J1cW ڤukH'(;9qX}( )Fg6M>(HD[Z\ sɜFRoԛk˙?Dq^)cЁTcGGnexdI1rG;9TUBlp'}r`gHovQ* 'ݻ7WE7Լ<(r9=m/{>c!_VƊ>2աJXLn?7H?߿uwӻy;)M7䋶 ^'izA+ZR䳣 KAQ1F6F:v&cE__sGGL vv^4gH:?2|Wo̅#0RR[{fkpvƤHCC1E7IqD*'@DU6sѰbt[OG6!aԨJ!,&YCL_t~n %~M8@w<>q8l%)P_нRTпTm6h-{rʱd-@U9Ѩ.٨ RmDŝ38z`M[1-#w얢kr0 Yxtg#9EAppΜ;3{nN8 8v$ \U 6Ѱx/:!k `Y ȕѷ^bDQ O ufy*~oTѾc/5EflUprf' 㲕sijg2481?dp+Dp#c%K/PbZ5M_N/#h%:W >TЯwdݟ VAV;̋8"߰vj} ~HOžs.#Fiע Ӊxw1#~ƒfczsu\{JMZv/kVVxwe/\Qk K.y&Ӫ),B )X .sg98zdݯwxWdžV3ς^1P-8<߀' 蔼eŕo8[hiTb@x_2"Z"ʈ|H0"]XzE[{x:#o̱ k<3٠;˛QYwj1kU_ #^>kqblm?yK47VP/a|[Il>76ҨCIl[vesux)1{kjTfCG2(b b>B>.OrחҺ+YWن;[^(wO~hcb\$S_tz~z`EF gVNJ5qs(K>m*3kF1I>V)!h  zshyNvfi1̂V#(dЎ.2Ȯ6 4xl;ˬvnśmʙ^,6P'@\@_0 kfsjw@ vEis"L >pbvG}~udz~ =P :vE><ضccJ}O3>kώ<z C3vs+gm58u#x#.$<'^ch6{zyh;z}E̕M|r7)#嗇{r ~:{uLXDbƯѸQM/UXI:OIN(G$JNROIH%|DZ]\ _1Ho@W IߋDJ0fp 7ŸLL: p"/Z-臾Mַ2Q%|ڷZtBe!!ɝD0dDG :epՏBr>ېlh$eM26 c>8-hX6MC\Dxޔ"PH}ITx^}PJg̊x kBXAP^o"QK30Y]lbpC]i"!fN]ң> ^'5Xz8gBbC{-d/c#gHN#!9sH ~c~tảa邼ul-od;U,az .;BX8yzTM<<(&2˫/-RyG!r!j&;=9Cu4]@Uf*1 HVG| 6obҺ`;s%]9Y!kToJb!dby U#&: w$lO(Gevx!K谿6O6y 7+'\ٲuV0) YQ7+?J'26@+x̧1ї?ajz,v1pz.DJ4 :FujTuByO4ғ7Øҍ B{ ݧ\r'CY "1* c p3pze0 eRw(i]7U/!-5Kc.cI 8YıZD R#NSeu;Ƒ "9ɱKA:@['TXi?iCS'Dփ!ZDB eaqɄ 33p 71J׸s˃ wÈou 4uq32H~ȔGFU"gLӤ %*Ekii#r~`6l[ aN0ri25q1VҮ/vz!VkKwD:rEJ?Hc 1*C1 7G 6 k#)pɟqƈ q>th Oܼ;Ω}dlq1) C4Ag!T`rا4YԬĈkh0\ uu~d^50v5W$p" $[ _\6oxfkݺi;9k.-_UF@ hA#Xt)/ 9^/N:۩w:ʺ_JX!ȍdBVR\Zь_n£)Yn.[Etrn~@Eot ` bL9_Zu7oibjJ,ƘYEA 'A'.03][?0(UWV@6ƚcl?2jD>H݉1Y:oK"|J"eI Q&lSc<Z 4S d4g cC ڵ sQ0Ӿ;<9eHe!^7=Nը+[E*lHš&2h%q`41>({7bߧlM M/ -g]/A;i-C70v ݙ7FG*CYzB/b keo7]H}:Cޯ-W||~F8rȁNl^As0,ރF+9f]`,ͱRV1^īwMP N{fXEkxXo4VWC_K&fkouab%S1X# c1 vaBvNJ$LE׎qtp;+nE]JYFNve|܋:qLI5όZ8u =@zj~WZKKȂzKYҁDCD2TvpaQts{IV1eMG{`2Ns˥vRlQe7(Lx9vqcoNjWR\]0>k֫snvB zwT<].8;ݯwWoc ?P)$MwRRgEw%,(l Ro2tYIR bd%qx* ӬpٍI'# 6d#Y69\2O6бjQx/s<EçOb\; $Zp PQ"/qaтl5fj<&?C*HҘy&9h .IY)'sc>K{ncg.]1q&x9ߛ֔E1uz5P jl+*y制ީyn;{"8fw?ܥvͶ%3|uzCnTױ*]{–/Ƃf8v6t# 5hwz JFMxEKu>v},`2K^ooNVV(̵:d"#m3Xq qI_Ԥ~`-8?GoUF{]WmF^΃NQjm9n?8zyUUc!ש<{QZpGtH1+ NchӒ@sZ,'ּ ;3}A(ǟoJWj;{#㞎a Tz% x΄rM+7\ƶxtfXM'[^]Z)zC@cHJ+ZS~5dd0Z-n)olnh)ϫ,7 {x9C# vPT7$E'ӹ` -0l 0a2)氄8p{,mE]!їʪǼ$1lٹģ_Yğ?2 I3PXV@j+Q(jWsE&eR_W2$f_3PJ1!SB Jb`p {_q yzEfЂ!.l15 W)!]>q0jz-_n`'[^-rlOLKkkMU[^Z^BʨV7y#Rr杺.:@?nt{^&urx7-z@ܺv #z;V;\a\&v=uꊳ m]q4ߠ:` |66LN{,ydn"/-S#ïPo}v $_)40O$fX!>`C 8&iXA&_+R$h]|G', 0p\{S!ۣ qv#>1.˯r(ry"!2U'lg u0*ϗj,^|\QgR?h1 /f ώp3JŻ@UmdH^cnЯBn#vxb x4nOdGEŗ[ucg`6T]wYȡǜ|]Ó]W.p? hxtIʘKRkF!~9S~W0NO2PGϠ6)şJcTI y;-nhA#S8V{ سg`8ņ2A7D,OЛ~P][ C MOEGk4% ۸gdb3Y`]~izCc(D^9 Fw Iy))'Rz Dׁճ  exr,Ug̥3_dҲ.hgbэ0mg:m>hbU;|pFUHioQ~s=4BrUY irJ `u֢a 5M\ > Jv9dmўho8 , `WU5n7;{l hh > =yq0 QKaV*oRAi?HaDu 6voXn+o0ʷXsȽ!yn؆`Lv5{J6c<~pm(qyp RR=0Ux8ɚvf\@|2f]LB2`M舝MfѾhwI| CTiRy:&:Vp50g͘=t1ڣ81ŞVLXS- {I~*r+a3߻f0ߋr7 J+| 3Aʠ6BO\IE%IqZ/L9k>$v[$N8zAYpqcT+M'.a$\̨ GĨ&UbtZ)XAʤ+QsteHwVG2SB@QU14'lG1b#a1O (ήl;u_E}bl6V͵ORA؃Erw2#^ bivsdJ!0xx8@ܟ۬yQC ;u z<~df5h}u$Jse};v"փ`MƷ8LgܷA & $dA!_!ٵl,(_`$S2u$!'9髀;kĝWc6V5+t|+hu" V'7fzC92#6 +8q[3Jx/u(AXϓԂD}qՉ1]E)#c¢RP-Dc2=D9>û ` nK}b1 }ٶzxNY؁^e.nh~.Rt]†Ym'{?:|!vJ(Cf㺱Ae.VbG_һ,< ||lt()qS>azFgxv֓7xPV<@" l1 蜺/B̷AHbXwJ9ԕ X4!yFrBRO}D{_#4Y3Jܳ_Z/c=x>0>!@ qk x~YAUjpsG6e?$ avp: REP!m [72HdO.ZD!Ľ`fmah~ΦԕGxC,y/E'Ywy/fy#xbA\ø 77hƑSgnlɎ@"$B2Ig| !Ej2N sNܸrzuO: j5ه6djg ̻E_l=ѝM BկYF5V|xSጹTZb,!)J06?5*x hTbK fÔOe<~փ/<~aeÃ_m1tYR6~āC{ vxQrFkdDUOy2#Ö̱F]]ce=A${Qk]eR`R$m tcZٛ#e ޠ_sslZ-ު*Z5CKJQM Qp%[ioe2}/אd_Z^n_k }Vۯ1삆rÈr!Bgdym?i> (8n=Ylg}K/7B?`-X_D'==cg`85Eױ>wFk{lBo.%;GqЏh Vpyhk ?i$<'JDG>5W:!TOLn^0м{L zǥś8_;O|Q7z |77r=:ݤt!oJ\9>r7)4cqX̟({ԃӓ$ғmsmg7,)ĺv0{e33ר|3b<2'Wd7opT> ɕK4DBGb0Q;_\0!o*fk`M!ފ%e杬EW0!>ۤ.VKK'{OWTgcc c)ۧ ]'>=Y<:Mf7m4VЉCLqkӰ6OGD:n7cFV%mU΍5Q _u_OLD%onBWeq(mL ߽D{]PPn8AgԊuxX䚹n%eki7cb]BY=`zvw+^xtYʂ8W!3V/ǭsx 뽴{PՋ``}6(Mvl[u!B=%lgϓ`q^fe/(&H7`5",{:!QNb[kڤ}ZD.<a jH,7m22}U-i-7_֚:W!e!Kn鞩._¼bʧ!GFz}bh K}72L wKo˩wtጻ4CRys:, z̞'/R͍F $,_Rfؚׯ09:q Xܐ{PA 65?yx?_.k~jz3]3QW?>)Kn$),O'!RЂ t{ ,PJ =b; JЃS5vXc`h *n CU?_OB%9 3uao>ej u~hھ'5aв;d1奚X_]hF&@{Fŏt0P.]Iz]郡%Ax<#ycq_9]הƖpw%J?Dze~qt5&ȶ5jܠj̊ɟ8{ "^G4E>ϭE0@6nPK. LBϠ'uW}<ƘLrU$ꈝ,:1J*An 1z}=X_H#d(\W JK$0Y6D[J' [߃,vPKmwEֿo*^EVDz+qVW(4aC3j/E5 kqQojXPyp\^&!c(2#\0J\w?:˅b :.ITPuZ(6@'~avur qw߫d94j)uRuҮAlTn$ Jҹ.u H% cF5r՞X2qҲvk`{}l!BwSgۏ[ocn@ٞ7EK~&h oD>]3[Ov>{ݢ_m_,z1La4?Uȁem0wov<_zr+3 K˫k};w_e/z(dc8@@=ЏX+O0rvw<L;{m^k~g@,zO \[=.[[nW6}3a<&-pxGAB<8ʼnb{4+p]wnwYfxHhiG^)XQ `3W$zȇ?C>A@A&ûR=qb=dC=+ԃsI{ХKZEJdO^oxf jd'<+@:6]1w 뱰l$]U?zI V21ۅ5ǂ>L0ͮ8q yw:_ع^R.oV3NܸvŜS,|sW&ƒWbLaX_g "1q=1KkRr'7֋UQyjx; X"l~5G9aLϺGD~x6 ghCE|1hY䓚Հyc'SF=(+TSjQ?fBOKؠԠHDA vrٹ7ue78u2؝&`,\'&R{Y}8}m%_;`{lvkۛq(F. i˜1qq,`)vWn 4* (+~+t4+;R=OwNzlֵIagQ^Zv.b0정a1- ;^SðaogjOHtdHHJ@>!4BM0Wtw+yߞe]\Y럲x-4֗WFs]EQ~z}_ p0<̻GZ*V613Rw̋p j}j[͓֏"/y5C ^"(H IZh~Iz X0(=Nw ة`\IʦPgjL3Q+)?٢ !:|T\'lԤl:( 7mDV聨JV1jI/ sy CQ•{/BӮIl͛:r'[m( pDRܠ-I:P`ʂQ1 )|+Q8YĒDcEieqצMWd8FO ?H' v2!64$U}l8iHDƎqA ɡ`¥X{ftmtQJ%-' )1v=傎 Hx -y䀔 ^:2kPzi]xvs88mcvfɵ<:ZK|[GxN .(7T/Y7aqbWְ߭xb>Q@x`ެ@cjb +RqPQ< UᠢxBz|ط0N{1DG+1AU^`ӌ )M.kwiS}#W|n"}m J"  kn #;UŤUCDoZ鹏?fH>|_spS`stV{؇s!'pH]\ [ tꕅGW,9|Gc9 g?';q_,^`j]PtU~Se~3f. B1٦ko֫ř't?ֱp?`hCfĭ.rf`*+AaTq{'@#GZ=/Q<Ս92}=_ lܭ$서׵b;wv* pe1!n^æ1DLf2\gf3ϰ<>/Ydk"Ŋ{fi6A FUǿ(/R5U~҂ܨ4M U&)I_#>績3_?H{f>*@LQ%mft/axe둬\tv ݧXd\H""e @#b M ڌs&HCD;EaJGA؆4{ c^x1R5c`-`V|^D'ƖHQ87_l%Tf0ȢpTnhج 9;@EwLs7jkxNnjȎ_9毿K:ub ތYs /RH2XV0}" @!^'1WI>r=DrRDfT4VłOUNiNt=IֳfՇXXH 9JssN7jϊU!:5zbdd8ma{G=sVjwA4Zq`X!.^MTA;˜"XV?p P?u>(s2'tLhΈM_L͜wYX08rtcV'=kguK\M*mҩ==__n!`L%S6^U ple1jo.˴X,U``|?qйFhh|u9 K`9ȜS^hX$-\ʨ,6mx۫#Imǝ%:ıFdG).t$?ay@a3+/.sQw=mSzggFu4x X_qE#YrgOج`@v7P4c\Ajl=ޜ~4b51 o& kT[2f&Dns{gl~v`{#MG>*FevԶQk35XtP?;8oTo`Ƌ*u_Uov4B4tΒ%[>}5Xq ]=emXѡ }1NsHbïj7g#@9e`jɎL<:rhT߀fg;g}˨HFD cɀ+WD8<J[-1CR$}C>t] k{<=ӵ_<Rv3o|bI5xk,~"?dFAFtUh}TJSE:dP1' )HI2~(FkADTnB ڍ􄳋V癇}Wqږ*IH`'%Jgfq^:,"hI؉FlƦ->:E-"mm (@$=V%-yկu-LA-u CRG4H1aQ1D@-/BB`&`#6RﮊMad` qQi6YD 4!f1"=ˠfzȠ;Q #h'j/XJ-xPsA5|$vD|fE#->ф3QFLh.Z@ik1!]-nQL~u sb;P >YaY*;XU;^&(|C#Y_*a קbVzmڵV$ _w6lxeb9q_ǨH5mlϗ7ARGp̒Y 7S2A-R>irȨ'QOQC+M8jgoo!=cR*g2 P2DtMm!~HFKԳ@/ZF5we` ŵf 8x}e#mb %͓9zZrHOԸ8GdbJ;{u %]OROx(U:-<}p@LLu$p ]=2l=(L_ ;*9L"Շ1S$;GXɳ)jzJd @r"&*GDųJbV#Ag6Q3qMj4$ٺoE|c/w:TԤQhbǑ6)|_.tO0A]6o@Nͤ=.&Z뻯iUQ΋'XP0q4}sd;۸aMX_͂06};~b-)yC66؈"k*#(# Iرx^{)p_<_WD ZAoթԩ 3Y:P77}< 8l Lֈ4~,ջ&HӞYoHBY-8tx&Ņŏ>hf5U1:yl:[Js5壇ʠXa^F\:cӽ/nc7i':qk_nv~Ӡ2OjV?@Ly #vj܊ ]RZs3ȍRbzHg?䗘91u.Dqt'Gc*> "a.7]fOkNDKZHcNJ:aW׷y2q3Iw e:vB>uЕLaϣDqf;x {guQA p/Z)_~@`Qpu,bG,Bj{(w:_Gǃ9{eBØE\?R!Z2\gCo@)&`lޯ1ǍOJ.>64;hh?ԑa:'-w:{\|&qZ 0g`+g;{#J0]gX\[051|?*a#>Q&v֋>pմ<PfKus4_ /EtĢ"Fg6zlV^ԨwfR7/zʟi &~P^}9vkmoQ0ޢ۬,%Uwp񑢣ZU1JE%nO)0tbd#Z*EBb,d9nwާt}{koacG#ƴF$juu[x7e?V_||1ƿ*}PI&92yk`HFmxc*w&y|FumABjB2%vT?q#Z<{((=cRnN^dL19?2 H)fHby) z} =2ɒݡ[Cvd ('/e,Rv^N7Fv݀o7V66KO#Mp'XlKn01c.YWc䑅qIA2k]Sl\w+w);x壟[(/2)7_5jQ\FY֙{(^ gr3@/zVPbZ: ͱ! @VwHNа8U.1sv/A.:?4|ŽX3Z %~F.*oan|SUD.(^{3g|? R<#[3n?1q2S qv"!(x Q"P Xeܷaj3wu/ć2c{^,6߯WSԡ[UʳOF 4|Db@P$,)`k|&cr] |Vcݮs0RχxJ@m?,ōHrnESJD'H&.:\OJÝ=vu#@ w_gb,KïfT@.qouSցu\9n)<~aעe1oٙٷlE_=b@E7X$oj<Ꝃ_ [:K)-vcAtcM(̛ALX>K6R6fAzR/ &;N){= KaآRpNGj#Cߍ&(k+kP\Y;.ǜS[RT/[n6k+kz/ %<` NH0on1oc̀eB!ӓ_T vXlH_x~m C]l&]xOZ_+K 1>"$J[$5zvc1e%?cI _0XG]%SI)C)SPow?qj| 4s/7jxv WpBd1>U"=KFUۨuhg-Á~Pu]^X;ueW/;'?YZkKarSRiF$JB,,ӧLp"0SR0ιҢ$= s~͉83lN[y<U: h A愡ӱ; zsDK{.L,b#X'w?uX1Ovǻw>q΁ΣmzDzPEyA3X1rvt@9sYWa<qm@N:Uu5)M5Np4dV㽧ۛ pO'u=7["cRΪՕ*|apw+5f 57\~/6k'[7gn.͗g:VA$̗a`oݿ\o[ -./9og.F^ .@^aFr&Qxy$3*?Né<.K0ew˙y[Z"?ʊVm_^G.Fؿ;S$?mX `1 % %,VJ}1YE@}gVos`9aM(R.[1'%IHGCm^9ȳ4kު}Tznc"|ˢV)TXv9#/n5g{x .q^X(;iPݬc3sb^MCLI,9IҡύochJx.f24[&TXD͌q'M7X}Xt.WK0jG>org3sO>`wc;7ݢ +zω3깘1LYl:w<mN戨s$$4nj>l=ͷEbl{,.9*aկطJe9qEۖgGOlAv|AwWYCY}5J`qw7;dxd\^'%;aS23>_|zA]']=pnbw|dx`B+Af7Y3|oȆ1+Cqh@>PΗY,z Qrjs#1XoaU*_F.6}b1-=j ^+4Ǹo@ ݯ?_9H]m sLKyW8}i˄9z]c%ZY|Ցl]~E91R*8 ]`8&3rlo>zUbak3cʃ[a?#=O0좍eIβf! Lf y lݏ|NF~i"zƌN/ ,[G)z;M]3qq). Kɸ@^~(0p^}`bWACW|>jdEPE$Gea8w:W-iАmQO=ᗬkM.zCդ/=9Tg D+ww!Ky'^> dB=H[i/Ж[HI^ɒ4{ X&B6 |ujBM!Kj]| ]&c$A9t{ً@k:g1e'n[H"%{F+j٭e%'Q25ڙ~l}2;x3eaO. 0k_ofCqWo/( .} U0D䬅8^|--N R`?ZOw8)QMgంq;]Xl[cL`5 y}QHUٮ&bm{u_l ^2juq(ӢO'CP1l;F>LE]MW07C@Du):+qu_cY Nt4HP$B8-bh#w_Z&]LW5jZˏ =3i#˱ +*xC!C6;_԰s: u} xK~ ?NƃO,!# E_&ac cFeii}cam/m4%?iҀO:|8ZS?Y?tlWܩJUuW5jkK˵k?m}`Vb-xh0ΉDVs# G_YCӪB5ז*г&>kϖYSB]RJ%v*hj>No[ax/ US5 Uuߏ20DW[ks\\!HLtu(tHz cU_qY!Ȓ- U* vIctVbp'pچ`Q:q](IGtA8L VϊD:hhmsBͶx^Ma0U33R3Vgx֋6`/ul9ä6pPR()[ AS{uVYP,&NS]c857沪/]9 bH ~ VCT#Oэ/HMe 1c// U_=+">Es1W"ԋrŒЂDI1 41L!(v+nRՉ2a Kb5H~ՁϼGc?4 %y29KpTuH-cW0N1c[~.Pmso@u3M?s0njE#.<ms \°B:Wk5 A{p7^dQgpu"Y0~0yuܑ:tw,h0x@1m/40b'J^LT1X;Y]XF ^9k4#Ŕ?s*BB٠81} K]SC)z;BϷ牜7ɪ' (hoeG:Etӛ66m'qc ̰! c3hugx)|hء,ʣzdMno'[}<0JRHW32*ɖrvzz>HLn3`Qƒ}Ь<*<=`ٜ@Q?Z6B-Y:?9N9,Ϝ?~blꑛ} h0)MNM焝`I]w1eC_ _< JA y67iOc/U]S)jhnJ)9~Ϗnylâk?rlΟ"2MEc2!Yoݯ7ߨor lӟiZyUdG[ZtrrbSAr iJm I(f&kީUq2/QrEW}$1'0cOPF_݇Omvo]8sL1߰bD 뵉쿕ftbJ?LyM-7Z5,m{нRpY[󍋁$@b&Ub2)[.,O K"Fx"pAA rVG:BVujc4K18CQmJ㾢0u\gAUA5*%)J,؃:),|Ua++b E Oxz cG^Vd.> u1oJ*#Fi_:ǨV6Bx?!hpaՋ"xGz  r${I.dLq vǢ}])S֠铲 Io  L1SIgv.w=EIč&&JX"}ˠR3>oRlN.]NtXכfOK׭px UIn4 nJRG(l|DjTFǀ m(L> Ubgu >My4oPi^F-1+;hoEY:n,><\ZD"]r@F/F>tk*f}׾}*C)f.7|-de8+데 x0. gGpiW :8zwa8Y_EhAތ^`0(6|:WLOW 2xM+e5$+ -(vo~yipc4hϖ~(7#TA0/6"Lh+#GN`3/.f zpރf^XR B17eVqzGyE|;F EO//Є1?QC%0Ixi:5{F/Jd{&N"6K-R9mB'Q,Y(4Ji^6bf4l,d$^g KVY0A4ab451 ~2V5?Bf t@+$u\WH[YA҈X0ɩa\`o ޗZB<=eVZ@gu9/iO!˜zoW!aD7|2cGl/!3^ >KI5e(Cࠊn Lp U s-nL>7W7VVWtqkg2ʍ[fT[\z:u{#,hb+Rׂs(6JG~i2EҒ׍Nc\2`YJ:F!p3r|(?`QqTorG>J@<DqJkz(P=,x[CYڐ!WVW:R5a!y2 1~``1dJ+G4͕;XJRoz"E#.p_g535F=BiLAjq {NI$vݬtd#, Jc$bN.Mxӥ769e8b/MkxelGPXBʍ%ھ}Ck׬D8^b^%iUKhM9ܱ[)yBZF'҈5VuBJ_kZڼ6P>||-:].CLCG5Vubk&-['et}nky汥+qj-Ӷsk=i][{9G|6P[_]M7Wk͵5X_[Y*eJ_gv,2y ef9/=>+ U{Y'j(ʒQHon 7opAD4S G[ODVW]yl%̀͌t [+aKylYZTzq~QϺnKi< aжCX㉹YNH3ܳn 8 q=Jc佲YCdN6@mm/JBd#&7S,jJ|ƝazySBy7:v84п6z95y .<`X6(@k6 +ߵ$Й^ r4=X\YfӹAqՠrI3RX,D*ʣI">jAx3qAW(|a<@D'5$&W]; BX 0rn.`OO+5P>i%[ +2f@VF9~Rg[jpn[*r~82muV;ƱgajkAtX3/ "t\YZSMWH1nonަ^Vj^ ѰgWtYxovN&bW^xe}bldӺ9mpg!PRX/f2!eE|*;/囸f9<:IcCtF㪑jtFJ=v.et 3MRs3}[%3chk(uwooIv' ^z<'N//cx@|?(VWwXL!oY4Xn7 {MVm 3wBMOqxFɘo.҈hճ51GP$5+L!w;dA={|r6aC1B;ZL׌A{kA;2K6 oؿd;dL1^bvq91[~w 0`bԣ_b v;Q|sJ8)ӋHcW<Ƕ:7J Wwyw.Lp,(X _^ZbʴYݝ5h -Т@l%3@|A>k>o atðwcʓgz@L~vkgvkg]vOpv٫mq cHh31y,q\k>5N}'aCP\N)nCtzo1>w ro2/z{%ZIo2r ESe57:s!e޼!jb;}Ax{3ނ@2RnlnU~V*#'3=c>st'7 sܺLE7;oZ{f~ÉJʾ9x~u_'~b'~b'~b'~b'aJ FRRMLĜܱw!%iݙ''5QYhّ3_57.7]ϓ5W״rUݵ]ݵ]ݧS-{ep P^TT4Ztyxx!^M.&ky/a:?10Wǩ`T"r7]1kF2:EaS˩:A7sL䤳qWGI4`ʛ6;em-^GRZczג1KNp*#?Y0jhR gzG>;ypz+_oOV[ )ز緋{Co+߼і>7(F<;{ZkӄFkQ~w]=|4INM@E 0tb@ abWHޤ&%N\a&=y.<`^9jcł .1,ЛWC4=X02m,>>;t.hPQR<@ D'5$&W]+;< X HKK?>oӪ O 6ۏſݳcΪUH ׍}$a2RFݶp{b9V 5L-`6(Fԋ7'KͥO ex{Ŭ3=0 Atp{,)DSaAIߤHt 0|d d)o9_ko CQ)Q:9+\翨o`lx~`3cfť13:f-ҽa:Dm 5Vvt0j֡3Ċgh؎>8X0ؓv>|* H%z|/akýݝ'=oGOw4P^R̭-d2#~gul B#m^P3a|\uS݂lDD\|$sUC -5/UΦiqmTE.ZViUMw#V7R 9usACۦp_\rRF3f37ϋCϸJ\`Qo9Tyi P. _OnUhn̂"ReR72adCSठ<ϫC*9@5@s>[8Hu}tqU2]ysXAy"D^OR172ivA's 7(=9}VCU7nR>-0v-4KfĨZѾspo"=T%qS}pUXC"D, I *8®&*9ä*Lgڋ2hӨT*333]Lm<}/w );:'™94A6I1{ب _.g=?G{.1-Z}Ye#PyVx#8|G ]!_.ׁjZO eʎxP_7Ւw>qPW]-3.9 +/VF ~\~'kk _x`: Ljqy:PB?s)7+9W 6IpĴP|򌳡78]@[^6kIqcX3X5Zf6f5/xdެ +aK6uuw?D6NXlľq( e6>Ҽ4jA[:m|14)A7x8:Q /',%!|8gX@b <t@ck^~n?dJf/ڰᤉ6BtµQ5wX8'nNԸY$/Bp SG> ͡c[dkZ,.ͧ ? >V Y㋘nhcJsi}ye㿛 'o#KJ^):Jgytn^RRǥy*E^GK߼/q_?(}Aq[h\/K_C7U,Fߔ7W?+}n雟y_/uYR祓wKt?`ֿ(u~Q7_ߗ J(*u9AoR__C TJEgJ-u7:,YUUWT*jɪ+Y?.'~MgGZM:Ujsԙ?JVԙ/|?޳{N 9äzCP?uAMiW?[x5 TL5Z XTzu'VN7cA7\Y}%Lx_#I ev dTOl Su"|>`er~(P ̔~{;U)^~Pǿ/-9,A1?w KHwd]_ѿ9":ES(:F:MCbNA??x8(TTSlXƞå:3.NJ愣kB,~N}/ip/&| VWQ]1a ??fOhgC3NDČΰ5Ov|㱏ޣq{wHĴ>/%B0"gXQۏ/`JF4MlEƫf+QoKSopa`a~Ol@\uX@C ρp߽?D0?FD`ЛæIIq-yȉ%> Dt,,A?@)(8|N3̀ /Ɂϰ!2Bvo2~32CF/6PV|G7q_?g-=LwK+ؗEAE:T՗PD ߸s^[B߿A&P#Alq}`;4D?/5`uBڦTu@Āo K> ]S;$y:x[zG,ĖwŔf ɛ?:q$%*qpH.3OC5t} _´䞿ccbv0v Gh2m]=IL0/~$P5{z^;(t;i'R#.m]!?=dDD_ʮsH9,1td3i( fa%!fklD9{?y8bQcKl KGE1M_l$ -@;?5 58Kkh.W,R._]eׇ$ү4P=!3{wYOJUi釟~O "?- a} ?Ho_9> A5z h#G?~|N5@39$'rJ4@0{ci'4k\ Etb?,8P ؉-:L68{bI`qdD^HRKZ%{h1#Kb5}\wK\+b )PcWX.Es2#o?-1S] >:7ߜ&Q_R a! 7fZ !>kyxS|Me#T8gbHOJ ^e4cz9o=L k#. Hw1߾!w~ {G߽WT~4w 0_w( 4QSg@:DEď٢PDnSb; b A>U歹_}  1 ӄS0I%lzBP?w~Om! 5_ l9(/_|w@GT$hqWͻW@#=ܴlKREu7%a#{ޫ*NcrCE55s!/dE X?b:> +b!ZHߣt&άF{8Z!fQ KNibZH(ߑg o&I&e&KF1 c+NmfmA,j!ng a]/u*{û1d` p-j19I{Cua{dY EB(3eFlXlV/I`$fOlAG~+f8{=ĐQ{T=w2~D?J֔pO.=]P$ZqjFpZw_zhdho5?Jtøh-M"s#‹Iʹ@ao=>dCt2Cs.~.Qƃ?xwqK|q!% ]@#B_lCea7"b/%?*T:? {.џ'Їݿ$3p{x*u~J:Roz3ϨQ#s/0 C?뽏ܿ"QpwF9}HvvlQ΍PF; &nmϰmUP2g 䗑ܫg3m(@ =0bY3 ER35M? '%?O A7Ef SBDYŋ~kΖ.P/G޿ 'gxSEo пeckH*"zxCsRMT')a8y`WR>ÿ>4Е%Г{×RYwi ?`fRU6Lm:JL*5\5׈ҿ/)Zd"PiCGJ(#L%CZa'ϿhEeK pi?fLCXdž|vMb#Dh{ٽ)w~O? yb>Jl}9S> @=/Gh=OD%X2^'J],q|{ fió%1h b`iyi`+g}OS a˜/JB' ̥`$ o+z _sHazn$=#F?Ծ$ZH[f~LVρO>x~~?o>Ï>W~?O|~!.SR&E1nJ?K+GE?}C?}ڧY?}C?}C?`zڃʼnqNstaoi[-;oJiOsDOs<-niN$`GB)R}CЧ94iOs7u)h44$N 9hICt㿅s4Nsй =o1H9LA ɧ9T^yt3sESstM3ik"Ig:ׂP]nA]Q1FjQj,/6WysuyYQH1a풣q1Iϵ4G#[ P~`휲jD wp.a`MXwl5Pw&,\6K+r3dM':P ch}h,뒣<bAIl}}\n7qXʼn`$ܿ_.i\xIݞts`D&c N"(@`wk6Gs1q3)55F32Xo,nΙQ9v uNp,%!x O{Ӷ0/U*иm~ n|H>u,xG`E+`vU^II. yV[8MzdԱXp]}bwL52ww wohݾٿEuOީ;vp ntIO}ܴv6YC}3;u@7Aq_JsZ+N!!1ajm?hNJ"6%p8^˫y9>6kTqO]/k?yn_+10s-dHMzp\k8y^sRFW6>Yz.BPm' )G_G]añgGURm|0!Rq>gO#RҕZPZ[B}g!>Vp4M q$M39q08(\-9k'ǚ!Og2d$ă곁O89 EdCI=Z飂Lك LVorZC_W~`= ^VW3)!Ph?So}gde0o]x CF 8a{ϥݞ16/X0,?oLU-4C1d$}qГ_"q@#$6b^%q4'6,+DRh^Κt %1!*3AQH@H2P\{|Z6%,ᨍ4L"׼hDIF1SHWm2u& JW i Df3%"t%S2%KH5t1$_3Nhь8JPAhO+,+ Gn`ܯJ SzRF4z+Lxگ A~u$֦[a8 8L)g"1)LMnKqv2@N풝)ȊCk^ҼKuڅA{ӭpLYVBI39Tdyܯ+ RRRN4Sgˈ;X&'3!tXFaʲ.]Ruá4UVcjfu ha1Oq&)Ӹ]0$ Κ fRl_!58ꏎPhϚx&˨D0THEjӱgA4S 0NaCԖ5{iֱ#hn9]VAO49FW#BzycQyrLgв;^UT =oWyC|wDχVx(4RDiQkaWZEUP'.5|֮7R6h>؏էAB~JXr1Vz?wƱHMfX25z2>? {XnJOߢ񹶫sK˵j3lUM@J:F~;)r iNk睗XCkGӊ|,mК:F\i_ohT:F+JyКgM\3:&A\ktp͘j\3U5e\3 T퓏EUҦ5:flN}kI5[{OY5ӣiE>UMC6upMa\k}7fT{\| ~jpͲ \SӎԠ5:fLN5 * ^Vn]p-+.Fnp5/;0+Ӡk25P~U^!H\(,Rڕ=и#{^ ek|C^f&Nf e +7' )iJUDD@Ov+^ÍĜhҚhRʭ*VtRH3\FV'@U<əDL:KQTV,FI\HE]`։ CtQ9VsTϬLҬ*xJtNR:XQ= Ԫ EQFX&6cU5incEsal[= jVwCO++SkCN% lN!ZuFXY3rjWLpGV2˄W^%Y3f2a/tL-jEy5(zoR9@x:\\DLr̊Eš4q{W\ t4:Y)Skڙ.#a*P,\]de km/LN"¡&[ -8L h\U8 ܺpd诠GahK1zRqu=[dܑ=2=!W9֬J~EtVaRsS0DJDVwYYxr":udy询+pэӛRbN4ӻwe)%/Lz1Ofk։5 ic!PҢVh¯Fp7OoUj<%LRAի^d"tdKr+y+.Xno_Q5LU OC|dpN2#o2>2P5hanhMnoUqgiP5 (N':UaP1pFX Z3sj\,9'I/)xj#CZn#7RMj'J+j26zzdB*"Ԗd#8i\ZٲY3J-+[JYDpUSDERZTg0.zF7ذXf!^n }i¢,Ʀ[!48LIgt8 4W~e(YoZΕvZU~w,KS{YE+~ǒ.2}*V&[ȍr> dXr*c+J(h N7Ƣ Πew潪רgro1 z&NrMtwD;}V8!,Vc#x:D9BVSڥr3z֖5'`u8zdT6öZq׊o. ZnK(eJvM9e$}A% α܌sm*qE=;k>ҧ!%]WCq/7rSh܋ho,Y)5܌g=+jBRF4Skͻؒu*Ś4_StԤVrWZEATPśՒ'*RW Rq+^aHQ5r,6opћ5LQu Ȑԗeo8iDv)7=Ȇ7'kҌ˭c/ݾgHXnP8+G#7xC:WяCяR2E4zl:NqN\)JMEE(vc Pn-#zW/(ʚӔɭ,+CUYS$b 򥱜E<#x>yeKy H֌K)؋"~XT5 ۼaUy"P*GM4zu׫ :0aJ!7ݪP(Κv&ˈ 0B Ta8xi8vכ7IƂ7 k\m*0{==p`Nnh:Ԩ͚{NFjd:CjK,raZ #:(B5Ҙ4hN[i7jZSZIׄu􉆌Daބ>][9H[kZHB`.Tm,'rrX.TPӭb(Lh"O,.% &4SA4S\`B5oZhֲWօ7pVTȜfe lMP~ٙXָCSqM;  ;Q,,DM#:2(#IU,VRY EqkVcEZbV D vHVsJwVY"[Squ;UĞ+T<$NC;Arǔ|5i9Ҙkâ.rS+R)1ӥ%zn&$<P!nK ~NgEP5giqxlYNjLum"(/2FMlQIob'6mb!` V5if [lƭcFqw o3rJnj\A5л׹}ԃ)ݽp3;,b-@5q#SFxjQq#mwS)ZּyK[MEHM+>2&Tdtrk_AUVd];WOjUYKXįWAQşZb4% ?bZtA $VɥW< BnSV%%[aKiP{X"F^Sқ+׏c:ݙ2_Qsjy(p:|"E-j-,Jj yÚDK"XrS*}9'⢵z6(SOGK>VcBXt$RF״3\F'4ź|<豼=z|;r|r )ߧ!&Nkn2&؛g X3 T푏EUҦс5:flN}\kӤy-'Z3U Rr?-YCLAiGjP\3&ovLktp +.Vƾ-.Fjo.qgi5 O'UtxP!IRrA4SwdI)8LzYOaEw^ܥ]M,jZ퉼Ҫs<޸<"u)&hW~|dݿeZL-G[$JMZDpUDIRT'g0.zCF7ؐuFeXxuXt8PӺ4hǚtq0Ǣ_7V*qR1qVX Z³ȗFtj,qGVlIe x^f ^n=,7JD\d)Є5 [%VG qP! qVXYγsjW\,9' ixz#,r)EjR+8WZIUQד'<*R[Wqk^gBfPf(R/ @VnG +eztfM<eTMyaHEj'I4S8ɛLeBbBxu,=7MChqjgYXdk_E5%;div wd#LzNbdFSC3qU9 8%:e,.$N<*@^rG4"u(T힨XO*BV3HZҺy&]=XG٩E4Z[B\3-5Esav.TV6;/Qʴ)隄&xJ"N"Wȧfgu82g=i8vQ%,žSpbƊ׫(k>¢H."#7BMj'J+j26zzD6ԖcK9AӸ^e!jf(C3Zn)CŅf( Nң22=:&2ȈE_iE>UMC6oXH`GkY{XnJƢD\^c?^$ AJ~:RV?wŦMnS252F?)ܔ.IsmXq5>>k?!&Nckn2&^eZn֞_VAs4C>4We0Ė%;j},۽5@[.ow,Kb?Q QEoxw7eZ^䘯`-=vFLv뾲TEM9gfwh6>o\7zNc]0W3^H'@]՘^ 8ѹiA>W|it)E@&GYa'&hC˨}( ?/XmKz2σ~bA z]L؅o۸,,q`#J6=PEXh۔mӈ·T!.yFXD4ulRb ufdӄ]3Ɠc`Yop;ga>5^كS0֧7AX\<x9tڧ="bpAeΝt#Hv`W~a~Ĉt́`ѩT /.%aftz[\Cw@:/u; fKnXmߍjsmemგ:Ɩ;.G X)ޤFs552j<ä#ZLَ +c| kבeO]r 5n>6>_[=E$DDgSu#H)G6'larS7Eո3A.E4 | MKGEQL@{0{tB8y Πv;K88Eh#(_ֺDb3崡ۗZԼI/CAS_"8CZCZ7U'mFY_T/_>/C:.F#-Q>bCQWSEޞ^c-ocC2m~щyj"c#z|2://_H_% ! ܠoܮݾ0*K01D`ҐKxC 5@:znӘK>FgʈyIȆxp磸51daޠ+|cS%^6!ȳ7lM= ޳6I`4n*#*OmðQ}k0;;(c@B \WRU#ͨߓzH\~u UGfQQ3—(|:[y5t,XI]_s(VG%F_aZt5"Rw?_:%|+`91%`T$ SFEN:J.и = 7?4ǥ<GJ+/77qs)rʚ^(뿺p++F 6R 3ձ "M9; (|`v `8:".^PLؓz+3J,yP!x 㩌 ŗօ7p*=cx.T6;elq~P*QT?6[J P<FYT_Knl+z݄J-0%G;u6cScvިTW&μ:sϗ^ a⧼>|ݳ0\Pb)37o:+ K RVpeU ]1v .8{.?6CzS`pwTX>tTsW5ukZ8ѐ"-":+lwjm @䣌gmkM/ܪ-2iJt'lAICL?'5#/?mxGNC5 YX#[ƗwJil3'>@,U w^!_ -OT6Y`5HQ'8Ħ1|WЉ+бE=4K@,8 = 1Gf̂aO {P`!D¬襘I5|^ZJ5 kO*6[B^mC2v(- &G<''̑eQ1 Gպ(aCg>8yظ0P*Td`KO\ˠ™?IL,_JxNg;[)!OqfeU~6d DOLȊQX;?yvAu!4>5FXJ4Zc@LY _TQELbFOryb Jvtnc~'.ȉJurc9Ie4 ,:UĠTdaOTcX+q΃r[.+Gn*aNv?Ё _^Rw/#yQ:. W&0kX aIN֕ͽL1%h+ѸkBG;M mSr77\戦^\z{9 d\[?TF,7BB,J&(m~c-8Bs۸I\ުar#nHxij@d]Ԝ{)xl~ݡnÈYD=ov5Ɏmٵ#yPUGP- } ׏?žb݆ۧc}u*^jC?gC߲^2-RH_[^߅U(HmJv4SQƫza!|B&Rrmy *Σ‡R-.O&} $#`ĈqTpWǶR R98%/Й$x,y.;qr x,UZ񰷺/5B^_{&s*mԯ@)ʨ6JZ({;F H7 ou%*\1wl^;#^花@G7W@]w;϶NwxOP;.}K՞Լ㜏[矩c#z껊5)/c}|IF8ˉ*_W!Eߕr;TVreV,4P` =?F x2fU>5=݇77hr*덣3݆~Q |XM%hZ}ߴ1Kœ0X}hTX(4.*;5N 7~;L9L9etnCh{`pWaDf zHƑ:n0!.q^JڮMwTL0rȡͦm!IC|8o)H\$!b0kxR=䁁EK"+MmxyғK=% ܍r5/'( 1%Q{ Ju0//k/{hi C˨Q -nsq}P2jsM!eT\ʈ/z#+ˍOsE1?,-:7\!t" + H g91>6_i'4Fj ynۦE0㜓׋ȑ8VB7oZzأF@H_^Z!EZK7k.j7x_?Adۨ7 Җ$[gT-m&-JsmzXxocGЩ^ X׺wF7KfCE_.靖nx}6 @w2 Hw`aD3X Վa'32X&aLKi; D}w8p"6C(Y ѐR_B3xZ,@ wƘe(ALaHv?{ƍ$g^YJ3$%儻9$OtcYZINv4ɦcaG@h,'$P(BU3v<aǜz# 8,!d>%P*v5CQ+t:ƃtUqzy4+%W*`}bIF ^JW✥ˇXp_a. uJ24Ǖ,w/h>zK}m9'P1z'G&V0kP@ lyӉ%d+Ou<f@'@+b`&R@NeX o0n{ B|6: luPT`ş(8cj4|݈ʒ8}!t4 <m}|# o&n^-z:7ڻ~7?Z^kDs U''i?cﹱꃒA+-W^.Ys/P:5oo5=oz?prI 'Wпkii3ұ)]_ads o lYE!\جZJOZf}˃ӷo._r,Cy-cTL-O=?AGUf(~hxMFm5WǯߞZ_\{C\pJkוݕBx{pptqAW? yӻRjjvUNђ;^`j_)io`(CXoyC$d?N_u/WTgUҪ4>5Cd@;\w^{?LzE% @ËGn›p[>YWVဳq2hya8am$wFoj"7na{?=_v#G*^kHiO45~=ygWV{nfvoY8tAN@Ew"w|y}2{o<8sy~|ti| [g%Y͉js5O{tP` G`q`b/8E'0hѡƂ3B [ŔyA̺O2 `2 xc ?oS ([fgT;%mr\Ia8'm3b?M!.yG eà@]0 _[Ώ.Q7xp$(ho㝗y[lA9b4AkƖV¾NΥngw;UD;'^cX%8D< dqbq(y(ZzKzg'B팬ژz&5ppUygIb[M64]xvVc("s3s=S'i~yxF=+|d6 R$F',/H΃,Gw| J{2bDc@Y2c@_ム*φ=]$#3~|!?}.%%rWN2 Nŋ)>ԎQ-[+8˼倷6˻K\UJT2\tr0Jˉ8-'d0r(FhfAgFiZ9IgA cX6cC@-?vi Bf:%NLA8|#ӯuJ:xB=r2N@ @POi)`\gTL+.џQR(AۚEYw6IHB:!M DP{PP=B7"exk&,$(kz375R_hiʱ3 [GS>yz"~:Mg&W]y2hNØluI3?ΟtD%ŽuX$~A7:NzI,J+t)C{FV$ ?7٠iy~` Qk0Sjb&R ٬;4w-C؝%N{SV&C,jr0Bj"(A|S:Ihr&=mݏQl+k:ބaz[%3UPDvq|]z;s摼A([xI0!#5:p:' ~28ɵ!J4g.eXHh4k'}b`|}0 1Qh48!z~u+s*2h>}n^]ӛ[H#a 61s!13<>5PSՎaDmKD<>\PJ ?'fE!c"OF1)J&6zM{9W%1G $o/6`M]Z!bfV[]0N9'Z0 &C;7rY)Eez2c75ú|˶Ldx>$+֎'AW-S8eW'(^ Sݭvq,%AB Y|f05iL`L$ih9WO7 ^e" Qxs:O«8(3L"aBb=!޵] Y\A4 uHj} o)}=cAĐv sHEn2F#ZapRDhx#+݀BۦD4v!0#B43o׈Pjo[;͍pԶݯۃf۩ 0J4%-~Oc)kjBXԀ+F>Ր M1zoW"71 w|ֿ U+Bstt:%*/6xN;,\bהв3 Q NMX4h|@PEcJRɡs: -DgLzT$';ǻNYcpr b<ƹ{^RVK2ա?vc,!02+H\DۀQ`Gqec;ц >\3B6*ƺT~fvntɺM0rٱON?fwhzUC?wV&Mel(W\b_8YW@?:2dwQD[8~Yb{%-N 6MHCsz3^bi3tucL#6Ha;8Oڿ n>Iw>7ꙝg I./IN NOܷZ:,0'Kh m MJ< xOq4WeE^PG. pF4ZqDЉ99> ޠ/^4ډI?)R#.;BӼFh88x'$* *FgPiDa/9i7I c+H|`P `\YiQB٢Ր'dm %pm0(w4[2"`EK< v*xy׳>{kSCb" ?8$_|tp{>nh];N) w*XDV ᖋ[i0np /I*,N/`4xI򏜭FrYaPws>nRib( (C=nu3EVYd[%ۭdG飭uyEZa z gi`.o٪e虫QV"AMGb<]SD?#`(Kk8jнS:8ܰ3&EUN٦~?zՅbtLh*dwƱv]vGy397S+g6S{agp l/m\~>g.犽5kkz$14/MK4/MK4/MK4/MK4\o^]q*͘IV3qʝ}0(SjGϓ[L y"$]5/wt|^To8lSa5&oN(|5 &֯ f%:&q9Kc ^ÿ58aFvڳ(]0Ç3 r}@_7!J1dI̗u4-$HQ%Ż1Q8 |aGNx>82<L"_LXpFop ܀Hq H ~Ec?`F=4'dpˮ"e1<GZ&IZpڀmκ T'x&xxtv~tytH2Vɤk_$]3j Zs+JLJHgHe9 0]< {(?Q(dC|}`z(I djrâ(PaǑ7jZ?Տupthe&0&? i=L3Cm燓9bl'gǜYWRq! .Q`4ž|6IόDIͅ!)MN3rĀHOB:g5 |*S[ڑik[bGpSQve&g qhx}f/yYY(`g|-DMZ5q>2ֲ9bD! m4&3vt% 98ڬ`?2>kr"RTq, "Du,X)#gZ{p+^H%uq|wd&E4R\] i"ހM8cH(汑JPEa6BɉBK;_S٦XS۪,}qEc"K 4bs<Rr:7p:ӸGCG x1^Zb9~ ݰ3@K'w`; @{Lƫ0R&L>i<$^Gr  ?𮶻ÿ <Ņ>>G2s&g )oyr9('TD'ps33\iگ!(5^E"?wrIN 9=:\({Y/韁zu"ena]M^kpGAoL&A؟|J }+Р7*Cܖ37^ZϺkK e#3xD8+~:h4NIvmfڴc U`~n]]sa^~ \Çsu7Smyj+Z|赓yGl=%ÏlIY(,ly$uFFB>Ƚ v5S'ӄ:xi՗*ʼ#_H>AtKixX`(̢ ΅߿XTb( 86i/mɈ%d1gjGW b87|< n1㿆G1!4~&VES< MwGsUrʥd\jZR̈́^_I! qc4qL+gPq7P <,SnbK@0"ONBL1,׺ɏf-{xiV3<׬ 5=.̞-u|Ȋ`8 1*= v4R0k u|u 2xvwx"c] -n2=m?4uma|f"ܵ[R6.(^IV^&trZju^Ԍ9M t2}{߾{r\U5O2;Ii]QCi8nTWkdq M Z.ٶ(WH&D+2Aq!a?iU1%"W6OhrE?jQrIs\\V[CxG_8E,j-JX=IbT+a]JHMMUW Pc:S)5Àrp:2xѦg%br`)Rik{-URWiG]W|E<̩4A} s]Ԗei5 4Zwd?#$-:RTXwD?#h<B%E/# U9<{ {G52Z%2``8&}1mӥEu@kdR QxAws%E5XcY.o6ZPp{&eT6~8dHgdi{0`R~ݻ= 7cOĴ7Q΋#6tZGfNzWD.NF?K-402LȾyΰU>׏yVK6zkfʬBdcn\$'Βp E>cwDBaLSB7c$+MyoΫ2ٶ~,f%}.ex||دa~ [CA|9<9xfY˜9'͠ʹ_r:CPMB|<\&O3e5p&rh[Q>5 ' @;Ae0 ~K]s= dN)pYX=LKݝ嬑JkcU,Oy% q}<ob2VX/]<]g\(R {7&yv'oRmΔ3wM:Ae7y#<=>I)Fd-׎3 r :%EGiV Ŕ/;Wضr @,'$yJ_؊D,)QIHUI*D?HOq#εs Ӿo'a$|Hg}mQIp"A@"#7=0JEnܙ<^B3)wF/[fB1YQk]WjFNH|\}\tm$4hxUHXW•uSfqu}9ٸO. u7`L(h朙 S6D0Y7icJE=N:=Yq\P$pG .r{ {%TtlN4S@rcxhMA)- :hgb9 ߆)/?T$ۢ;pd< 殨M8g3jQNrTfNVPFhg6f< D7WVD2IĔ}8DfaD`< uHM8J7*h/ aS$Mo_;oک#[Dl3{lp2/BjhjC69ia"N& mDGQ ޝ2 l@-RRZԢgQ/AaN@Yf6~4Vͣ<,)ʝgLzàyU zX5.kR같)SZ":Y{%ȒLJ 8Mo /d/1 *۫jW IB8$C,-ϭLՁ%TU+b)Gdcf²;*3f=锎.M+`emq<%\ Ŀ v,] /|nn|ͭV/癲'L_Ԯ?߾|++KJoi< +i.ՙe mWiP[YC^p]c<-p*eCя{I&k!|iBN?bx_6>Tݟ?5'\([;/`wv7_m"zOyYY'5) XA矝lQ?RFbG5u~$*7{aOVHMQ(U6.^x=8)(p&REKύ1L0&9bE\?XO* U #V&48 'b>%@$4& cՎ`sҼi_yg?xZ=>:N/3Θi<귽J^Ĝ߆Ы*dAtH ^,;7txD-Č{Rz~mo bosy?_FrgcR}!n!ь(Fxt ;8{U`qu728yvrC[@oBP% ='ÁFuex:f|֧::z.ĸL!S-VZȽ4œ;AOB 8tM # *hzVS$6 ׂhi&2͒Z5@ś>cuQƓ);uRmowco^e'[kJDIc]P DAb茺5aֽ3ܙ3FG0΍t,3'iqw6[`ܯ3?n}l>T2+/h֋)>Up(,Zd/$YDǍap n4 ST=|{#6OWO9Cr;Bau /CwˣZݫ=;Ŀ˙(3{L:N@E6fPCSRg9y8Cln7ahvC2=ϛ?__\:=iP~ޱ3ّU|}0eT7 9g-&4v<#PmVv'(?v6U1FgaB}jl{|q"Y蒼TB܍̯}hjawhC@8RA,Ǖ?zT(8eofgHiF؁G*$x ֵ2,Xpb0楖*u %D:8*C;d3EZBO !p.W!?yu+RDDKVP$f/vXbks;G%tO^] ּVp_Sސ)%ÝX;$5 D4B*8ly$Tc=Q Lc*yXlN1QO*v(AL8†؂)|zbmI%E𰉨 AX\ еtduf:QlD7𹦨L/k8:M"e$$d9r?1f:ZYgR_V|88/J단0$џD"1OV%V]L۝_좼Ӭo&1S1ӏ[*KE 2Vh>~.rD8ُ:a9_x7D]Ω 3(/UܠiG;On#JAf2 a&0kHܪ&ԧyRx)~QYڴjB2~ ڢS]X[T`BsYgžWC޼ D'.3ydҥ[uΨ2ﴰA;vM]c: ;~si".'AH3r`TV1`ڂ>?K O7%k`h $L>M40 q`o^]Xø^Z%>ܦ3Ή?CֲS@Y[]m9sS\k[|^8 Ufu/f W:ʗP[jdIi)ٳvrzS}P?y 0TYiAcgٱ)dd*py6v՗C Y36oUc~,[ڼȕu4Ban❙{#5[;a?'|]ξ% \1BU?64 l+7Mueήbsg \\u/_>_}-5+*94FF d5sgu~rVG}|l׈:]2YEZE9<_ zױg~7D]wИa^q5`5Dn|jLmE6ΖyWzg'޿ fJ+3qt|ל)n6¶w%E {IԮ~sSڕ>jϒ뾷!ͯC< r`$d*Fˁp*BA\">ΑOVc&}vqq.>d5k6+KYϑ4ŎZg/>/\^6ր]FdF* !MA//2m"Khf%}]8s <;RVNpd9_6pagCrtKY!A4%VNS^dV  2=Y-E4kK4dѱ tugP.'Z.R< @"HMA_FB[^g%}HDvPI!FIz-+WMt:랔Lrʸ}[Ň',#oE!W6^æꎷF*6=5]iH*(͜cX} ?FژsΟ2)>rͮ_?ÍQմD DiۭR<*HRs'I9egzHД+ZT:q`r*k]ePqWZe=;iV:tj{C!CPP+XAȝev| x*B@q6 #WȐNRԩY͎[U w}E!t[AzU]lB'OH9X`S6Q +:10t`4c⪒>z/UVz5QN? P7'0Vx^Xf?3N&S6ⴷZ?AV}oaLSa=]ה[ \D7#U37KE1dة޸"e(Δo2|厢aIǡ*fMVDvWħPm׽qMsuzje6a4T͚'ұ␺195fC&>3ή(Q$3h'ͯ[)_K֣kMAs%pI%]pۣQTcOfpCmo*aRu S1:e-afi3;wV0ĝ]*+Bq)1n>.13|yXq4'ؔ227u(u꒍)) `jL~)ZP3GM"v/`Qd7Ih- E  A2?H uI[?Et*P| PoøxmD="^io/ |G7d-1gJ7tKtN؄/_>3y1G$*y3'xusI&ߡؙpAk HNqXCZc4) H z(^* ZQg4[m6yDϦ%tkVl Z,j=o:ív hdcE:Ig϶}a{ YwiL5D SAvqH E-|cmAs3aa5gFJ~P#rKӄg嚤-{u#otSݛm-3mF4$~gEG:ikN.ӎ&~<7f헤<N!擋A|/+nS9i\±lz-'8^0raHVE ">, T5SH1Cs,5C )2,VZUQ` &rL{!_Vpr.X1*Km*c_X-R=dMPpWD[ĜgIEO7 Dܧ7`3+G@жhRp[(Vߜ\|f2= [x^xFS} ND4Wu]K6i\5]H8oO 0Fg޵?bU.c_~f8{o.a" ~ (Tfk*64h' ͡ WM-IU;mO:r:ӓ7Nl:C1%RnG ɚDh][>@-@€ЂH|~F7]d&oϤu@ۍW s[77~q%Z{SL-"ӻWwy}6Pl%IZ{+)efMUR%n "|>z xVci4h7o^H~ *IˑKl.%XR?X]|j<Жī|'C~0zKG%oBp詀aZ:Frcef6'k:eUEBI8]F\j2Y%ۖzu> pBHo &n5tg^kWvgX͹*;qI֨ %[l5tJne]Iwƾ<=۲(鐛\E[ʡsQ :=YrW"m!f,ǟEǓAimX{Q%ZM.leS|ȯ}: 9 ̦ 5?Oɺ 4e !Ќ* b oDp3@M\05Y/$9Ca:*_dZi ry7EL5;6a>;W$*[T8'"'2{wyĀf'{$)L;{|'xd7By8#{>+-TG/y8=%yG EKvz!l|1_H6W@$r^k{y?_OD0~cYzc.c%vY`)^m@Azp t)6 ěƚjD6GDr옗v$jp/!u) ~}x a\y|xBl\~qmA8B2TcUG9ӽ%h3W=hOzװ, DK.Qܟ -EaGIq.\=p$L^_lOQ;?7;[)a >sPϪC_Fϳ>KBϦkvsdnom .R2>*3UGW} {k*W΋F]Ea:0np;P9%޳˭,3p8|ww,9!p1o$TrVf[y8I#jdxma)~8vm B21NT 4eqbt-JZH\4u$Zx61A&s/7v"a8 zxrRo"]QѲʄdXuCY qO6awCQti8҆O`wQ"yU45y@0VM&3#iЮZ7`᫘$J[aHoZ? G鰧#nTEEZ'o~89>Me.m(o[;]*@:RF9{-m# :a Ai|L%J; -QcF}2vuo႕^Ff K~W!I9%씋Y7fmXA(ׯq_M[mlYUUwL0bSCt?+$;ci;Y6L{#oOkg[M&1nv@^&"tI1>:$ $_q ͕ggd40j)l~Cq#rCVzsͯ$0, Xo K~mcqs@e_[ H_а&PN3Y^7}Ľb Fi3揙g%8;_s;K rT~XX/kf/]my-aDGT́n6 \$H[X&";ɬuѵ1@%I%1F3L&CM$>ϰ4wpQ[%(lPrOh#T Zd .@V3籟;:yb q AoRX"p> ^j*$ZٛVSE={<0= Cw#KL#A 3feg4z`S!q %g7q< +uq6W1}>TЦnvvs{Xʲi?皘s}]YZ+IcDȭbg~[{<H4 +gOl=Ƴ> |L;y< ]0UGRއ` &qG: F̖Z ҂J5)x|:sy5I*G0(]H1;|sA9h ,#y(H<g&i8M)nj;4 0jXXuE*:4`i.pS:UP"?a诲 N3t5>3.`2 +kN~7 \q̻z@E%T7x %{|u9ٱ7b]oV]8߁LG ` 4p5Xf'jJ)PKKIOn]Y f.}'X(EŽAQ$p iSEKam2N2 t7cf[} 4"`C!ƭHyȌh4kuoٻ/4C(ъp).Rz WsCex)qS;]'g cOcfsM0N8~JMty"}0Ģ?_Չ*Vb;KI>k!/Zo"OkηMeZ\37685K2?hVYU~ 0JG(t4A 5ʚ'=K 6FV9DFMͩ%OFhf ؞t`;o/:o_|9uttʒ_d,p-='͒S6LwR_jw$ܒqwPcqb׀>u7@¿zس6FKYt榷 VO3, xJʈ^^0&R@UQiz $@Y)פhSB?q{{ f<&\5a6=IxwdІ X[ Ab {; C%X;Jw|}q|Q~:%i|хwzx|y|?o^RߩK]dJPƩ]ͰuIKqr: G e&\YAu-dVe~1`!(Cؘ؊Ӕ1al8:za&Ы9?}}(Ƞ'cVj]Y? V5ؼ "[i`d Z%`\[}xV<F$BtQ#u:aCd]ZQ~Cx詓[ `,{OL[YW?Z/?UܰMn[ӧjm[;{)>5gϼX0/6`p| gx Yb't7?=_0p CbW$¢OIO#Bxdhi^Q[($-$IГQW*V' A$Bm&WWS5&~elJp.y>)0/.bWtC&xE_rr P!\7_19Mc $0tB8ttv~tytT^_%a>}r>M3 "dr *pRQvq"tpؤ SBI 4YKe4}HVI'D}P z0/@q~N)>0%,"]^;$ScW )"㏀%U鷂-50J'RqF*G Rj+$ sAmǩ;I~(`3Cٲ(X|&ePθ@5l!yA/":yfNmY+}'+LPuf_ 5lڇ#N T8qN KZ1&V$\NHޤf@EZaGNOkFQZGO(5ٱ MLopvi V# &"I\&-d!n gU);<4Q 6Q Ovom N:φdAȎ '$U?p2J-b-PO&#> +'ʐ19>ZyNG<,DD&:mƱoC1>mv|5ć9rOEzI39jb* B@ FҮa?`]K][sXS'DI=M $Őw Q,M!ƬuN}`(x}SUvus_l"`F?IC Dv?Z2Se~nðgKY.>Ҡ;PH9?bf4Hd^aڵ/*@]{G?up7$Ό9mS>wP#m8b|c5k%-Nh թ4O)f[ %/`6U>㔀b Zn46pXkˀ\譇AoCz-t%̦A%P\Ґ>Џ31*~{U>Й:]tWvaCVzd>7sұuD@aгcje`N_O FGNsרĎj ŏ"K kV[`w&V's+v|2gwWO+(J $P:}k҃(+uw߿WZ0Dq|  G Y;o4q"ம.nQWhüN>׍$#NF:gc:lXlm6&\S2Y b,IvGZ8.PCVh{5H;ۭ" E.d}=GV3;ef4ViU*݄tXx|ˌj^{(Sy:[(Ѳ>)sbOh7p>:Ip(`8JH[>)U"=U: ~[ɚs85s8o{S`]sƽaYVz'D {Y G|q^HMT_+2RͥBfrzA 7sC&μ!ԙe,h9`x|쐛D5B$35OA;U_wJ@jsX1eWơt5d__w'Pq>j417^\gm,*}j%E yW;4r6"\ջ~rѪ1UTT6 o8N3J{ԛMYFHۯbK6ǚ0 (Cyrq]9GugSJ769gQz4MҀy> c`2RGN 1W|<&SQ۳Yؿ_pf1n>;r7k E<8"~ F:koG!F wJ6ZY19F ,>S+w R6bgɌzµ5h @TV4ex} ]{9:tX3*m@ˈmR6RXF*L6C/T{ږ^#`갎~"[ <0+8e3G6#rWͩőzYWZB`$JR\ІF$%R23'c dl|%vqAQqC ى3Td[OGrA`52SCr7T[UK;Ps wI;?5l)ǰEIߞU/#pHQ:uUek\r Xv8rdLJDŒ~'Hԇ/0s)P[=;?>?㳺 "6OO_չܾxJ6.\>{ ?uHwtm3f͋O.9=l DLqR}R?9T-@EME=ٳv"L/2MғET3R~g&tm*QE) K0U ߿dc1n^wjm`_G/^u)[onA;C֟WY\3n-Xݭ/{[O)Xl,p+QX5EjB@ ݅I{ ϡH a+ J9zzz#kB~hΩOMՄS://31o7|1OH0YEWBV)`$ 8pz%=~_ D"L?ίC.[4>jUsWt xb+QhH.PbLa=E]\qRrl&44Y4U@[Ou{߮aI'7AdƔ:Lf,=b( e= H0nILb@w$=6i؍a' xDVx64lv2EFskG=:r]%ZTCrl XK –7MpW,c0l@.&HJs^dyZ|CZ#8~o? 1XeYQoE#B9Po~aSh=4=?Q 3%AoLp4G໕:)ocEy;Ѹp#7|(^QоɎ/ZELG MۮYc,W\H}\wc|~s&?C\Ќ;x ZγH;NJTgWa0Xn6gB&U0jNȿ9;+ff~6Ӎ{Y$h~WT_ l($. /]ɊKI1NUr"{.$-[LGF ! dzt+&G6\+J;5?Rqٲ?/6oZ^h, BOm6g ӊZ.n{)ҫJB/:&L((< XͷҊxz'd_`ګ7 9:=oOSdON/9ۂekpQwO@'4&j򸖉 fFw`LQ06D~߲k:_teGы LΙ)~3b*{Gg;}q.ڠX9B!ic0+wJ=[hF Q|eg  g9/_F̞dߙ6b؜Y̭\6N3;6x]1JaxLc,*t{Ǔ5Fkk`"NgJA t>@.4뇂s6U.̃Qo: 7د fUOH=1Ks[WG#ʙ,K] Ke =CY2քhR|\Lфj?>GWl7pCKQ~sҳ2}0H ]LqYpk-M4q@꽄S>>x"npCw7H137V[???<9jNp֋"xE}LG`G]1AɌ3\Q0͐WHh4x~EP K{"UL+"E(]' >bJcP<`}z@NX]E]wop 446pp筥^$WDyvZMLO!0uS+4qҁ++ǘEӍWge\|ߜX%\vjz_NEg Sԡ&i5?U,g`TT褣\?%S# Uf7n%:(\-z^]iA@D ++_MaPQH"{x-7w(%tׄ"^{a13=/RS6})rN /6EHɔM}Rl4^Y'ލu\33$M6Xż5%[Dza $nB2؋l".&{DiYvl&W6SQiiME޽ur룍 Wl2sPLBM_QxY c.0dp&t$7ۘ%|@@:MՅG`\b!4t9\,6=ń}j]xň?֌"CX!B}#KLK4G1eSn=k@z).w,%p,E an 1%FH˧RB 󨟡n%6Z\^y/r}^o?%MZUa x>\uINi|2`0,l3==jzUdk^%Xu^iDl`Y=}sIUʶUmŬuÓ82^OT;eef4%)GlN9,f.,fdfMxa:Uz_d FZv r"N0vNADx7}&(/-PJ^9P<1t,IڅTi͠ڇ1-;06pp 'vBd@FaǫAÚD^DKdO3ֶ UNiy $ NSq /<'Y& L©pe CCކ9r03Ծgkow&Yp6&3o64 Tdz+>B}#^j5b*Axb[ 0]7uK@aMQ]!;,\a,%6O&n,[{C|:I [C) zxsyA^rJsK3{k0|wN AzwM%Z;+MK0[%NPBB5eJEk@F|q_[^ZzRUVUA6ȡd2e,4{\pL|S{/M0㿪¨‰@Xo 3#ܡS! #Ie6q]XZ'bF51 o@q A:Vw0 MAo ,)8r7k`6a*7/ Y1n8 I&W3iϘRlv'ݭ,Տ{ISj܀a6Ҷs0h7h94C'] 6&[\m&GIhBnH5; m2!"8FEg:.%a{*q4pQ+bӛ0IPZ(CVۍ? W#}h[w>hZε7;ncgŮsItd cE^$H#I+u֞23 n$4^}:.})qF4tgMj;J3/n<_5 }#6Mv2m4 AayrŢ5zKPP\6Hԕf1K_Ifo1r&cY%I% [X c`J2#fYY)fsGV|&Vb:!nN7ft&yBtG)$е@EWwJ~OF ΀Z}AglRU= WBQ1dOE؈|RiMd6M U_\z) hUnJMMU=s-_DVYݙ'd1D'Vf&ߔJ!(S7A E >]֐ȇ߭KeQ_@-/w^rK_Aȍ.0a4sxXkArEh|1=Ƚ,]QOw˭'ӣ! R,BcqQHo-Hjk*Bd<8vV\Bgln|z'2\ 6ӡW|XZ7hR %0-@Xg,i9ζ#7|C`>F/^g~6< B8NJ57Pr`2Hs&X;ώ+ b5|K@o )qpQ`CB>L*qFU;j l*kmʶ144+pd닶UI1?a/tw3ckX46]wr#ȘI&0P\U?˄5n{T\&Aw `\}@,\%p]}˂_-66HPdE|>== "RT5ˎԝd–U6~L_n= }~p 'S@H3V} DT%Jh;9~Q)|UiMMGT*Y w [ =jtd)_[1 vWALP^ HjJj6]eJP)DЏAl V]`c?L&Ħ \0"w3XdIN.کb+,1BvLacuI38B T+Խ1 Ujc+ NLxt%(-O5f6 K}RD*tsbn\P pj5J|tЙ;N%T=TpvWZsj / 87WV /e0S[6%V e _TX w'lʕUshz$Q}ᦙ~𭑢3}bFϟ% yiAqϧL wgg 5<(Ɋ+$ ԡ}]a7Fw^4h7U֝$T@Z-iwȖ\T{7a< KΝ2p{oѠ 3<QwgM|# n9%4MVE18tY4diCl/wysF5f^ P V= UiiP<j2,+3/'䪝>5PdA׉NߒitsN[J:LkNj,UZq&[ hmۿdf{Gq;e 7nޒgXFtһBJ 6r;EYOԢdFsդrc-Ro 6zw|n,xh׾dɬR6;HN8MAv˔TcjҎM'>`;]tzCm~!ؕGvRR?C{5)Z;<3Ә?Gua"lun^F8ahN4mO;4ys#2_k o;Tifݹ;&Zh1j#985VOvݬ A@pb%;ٲ~ܛN v(HRMN46(,U;W^M:Q0v4 0U[1%P$G:[44uA*i$OIrTM~M=wzl<>?Wn&RT{7BSsAm@M)೻vc݉L,ϻM/|%a}UIeQע&TJ(O9:gyXfB܄gcYO#TB`J%Ψ q"{a^š>QpB?B%FBk5\ UuXt iE_)gnoXOow?ޞs.}r>(쪎' SIKS4+3}h] EcߚQX A@d6QspviHMG2Ȋ6|3 n% *;ǯ34о]m8͏X jeXg7 (s(:n=qSV)#gsF^07#q&8Sr+f~Άx5ӋWUN@Qˁg~&@zwN/k%/p_P f}K_~@Dm`ói^X(rJSht^6 ْm_52 @C 1Ȁc6E9+#hgmJQ)6Z4;iBy>Ɏ|rIpnv:O `x8{^c0.\#OᏞT[L`8Xofo,?g_ {/>HM`D3A͒74˸ bi(A@',-[;_b _"8q,mO䚏U</{Ëqw:_<8z^*2TXv{8^GA/]y «.,{:[%ڙ;6e*^gJ[f6;R&xlie>H7YR]WS1-6a6.鑅3n1ڏ8; Av" V%&?Vo}EZ$#>0 K3أ!'iO F! LOTy)*[JKqs/?nςS|f?&=jLd4CţHq#L 􄅿ZqSv$=c d-o򺗸\d. CZSύYwLۈZ5EBz걛-W$fxI)&A/S3 `~LiD":?و72]'Z*=ڎg, pk<l_L܏A ZqGRjqt% B0`jGPsNu9A=Ϥɺ xyRO :[wvz^j}&*6dbJF%SB砕NFa n3]6ơ7%82@TVi|EٓvxψM)Y?/sE.b*E Bo|%Qؘo dC/vw.yl~[h:ob{gy\oiν"jAk,}a9˽ AO/U:Kp)\zKŠBؙ]mU}v (]읝b^őUT410YW~fRIJ QXwpLlL91E o.le%Ge/J C"f'3 \TY}c/Ebā&5h <~Sđ(FQIx*c|Us!z8 @if+ynЌ/ک3&-h i[q|ϯoA֦֪onxX0d;}xɥkG?eީ#۟Q]{ޣtoA2'8rb}giɓMcUd3!p?C1qhzcςʎ NE4]4iB,*V73C1ؙ.:׵:bVi[;mՖOE&Dd.q1m|Mwۛ%6л];]0 |gHۖQv.l/vj'-ͭk{ 0}<DzY] mqF-pْ?&4'"-%x)RI'OVu^whHG/^,̫=H`_*W0~Gx<_Z+,O%vkǮbwI>%RlL _YxlWeOWcLr[Iz~㩲.=16z{IJLXƥ Eҧz(9uk<-m})!n_ol9hmۿhse(k4 S"ւ-:iE8b@rJjR+Q!owEk~Z䳸OV՜'ߝ͔0]b2;,6g|Ұa 2 mY&]\>@5qiKV@ppGWq.SH wNk{xv Myu#@bp}8 oUеHkdh9Mi>Hu20p.\_œ?uzI  Ţ/쐰h儑jҳQj=?ysPaer;iDE9uqIji2?* #6%@P@n¾s# FmU p`Q,ڲf""o˷ۄcѺzwGtIc+En[h.{sVe4^="fB1+#.Tݨ'DW(M9 TKQRw@V y s|^2`/sl_(ANHڑ io(!;ƞO``~W!]msʍ9K9LVˌЈȿKpە巊M0G @׽5sa8L6VwOpw*&+lXn3-܊=:_}j#v?BH[xNTv?`*xñYe!fIugN@KF39`ꀜ1b_ʟavxtv~tyt>!?yɲ&ƶ :d |؆ T;+Md"pBUY'GŰG6oX/p$٩SϋNRnGɏ NmeK2bl%D"tcx2:3) ޑ\ &#P:_g=Ŀ 1% nߦp$D9iHEd2&Ir&KX~_El7"V3fN`qTQf,:&Qdќ&WmO/rMį\}CWf/ە8/fQp\s*,02B;|ʫ?IDRZ."zBhZ|Bܟo!׻w>8w͉F::?K#aNT5S.Gxlbqr{G|ԣ<{JYY 5kKk?BSP%'Y{ }OcP#3G.L«+HaSv'(Q  Wh| 9cTJϥY[^H#DA]Hu>MaȻ2J>smD) 8q4%,81+*5LWM;gsL&1 3+>2.iQA*3C]'e(iMOy5WžNL%W,nF*]~+K'+?f9ENd(\~'Sk+F1~bj=_*~˄S3JT|Vj#nz?:r>00cu;3wG$$BrO,/A̵0TDr2As8~\͆%7 146f~@m [P!h2 qQ80"!VvfhŸm2 ^} - :x xƧ hHRJ pbW׍#"|qt8.qSkShz~$a78`؆i`EPⒸ=Ni҇Lc'w7l6+vf1V^ao%&,0Fw` ʟhf!T '˯=I\0cz+DW!pc4e-k,R~?Q)x%t Ry-:-@r*Hf ,}[ޫ @'Pf)lh+}?A3@:Wxt@j5d 6@o6 w? jҊ;tPkePpW8YO= 9jGJ,A,huBҪ>%9`X\Hxnؖ !+^uYR@='% !eOPH*lAؚܢ wjgcm9$ojluSiP89cD-ؖKNx``l(EN]P^uaeG:hd~.boK#=l@M<}E~9p$5=Ca$F0m=,!C_eImudv⋷$'0Di8dζtd0LWj:d.M g r`G!oQ)6w cubNy`E,#)d͍c۪?jA幐~jtջOښ܁66MdlQ2TkZ% XjE%@D<<4: licTγpV`Gth1.8t2Y2D2ymW<N&5(=pol3QV1 ;k !rH|9"QzY@ duaR*+ {8Լ#Rq}~vw q)BjB©`G߀#5;v)R1yOeY궷{´BTEO$f^?sKwer02ˮAuq! ɘr: z[}Q^H OVۄNM9Eyã|•LDs@D _<"uFD#@+y`5 ۨԫ/(RBJJ!i1Ck;}d# Z~Ie>W|a284Wer䊮t6cݯp*O'ɾbt'|XѲJYTEgksYrqGJ+B*Br`4A=<&JCe0qA= JX;V  mD]TC3rg 7 kǜ/x0ي`18- ٌqAWzJF=c0 q >, {6?EX30qf6Dw3`cnRBI|s (SFVsjM5qp]̡9N(ގ6KIN*iN3 k8$R5}̧7n5|6'&&JL8cƤ'ͥQC>SE@a_T0'*Lrtlde4=DDA-sPv9ml\Ɗ~ э1M]m28& *=83&W6N|nG)7|7L>"r>8!U/\i<TK >9a DwR\ji8񤩃f/NZØnLQ,RR͞Ԇ'LO]~~Zͩ@-h򀜯fU tgj$v-9f|oO6\e\p[Rȣ8C1O5΢>;@rԛa#:(#`bxŠ;q YKdƥ_Njly17뢞[!hʒ!zŒ:,lth#%DrW 8n8or@2z3)g)/%Itg&[ӹL8d;TuahYEX/OhC%gh^ pEmR}PEW i{e(U|4\_KTqE8)<ҥ+w7%{Z/Tf%厀s*NgW$$=Wqt$H~|Vd$yg۵_5eɏ<z01򽘋F,gFV%TAHJc,B/|XU/5([m\b#,AYƠ+Y_ɢ뉓V{ p3~/Tz}ƒ# RIyf/uiT5xЂ"Dxa9h1%Ld&22}~ē0{, |rs{3RtƛOp>cG+` ;:[X8a}L? 7+jP 8TlK+gAo/UON`{=45S^!-]qn~6Oqz&HZvJXq iTt`PsMxSRqJR IwLHr"aA8N:2ŭvbf3WH9ALS"h&_~Dχ~y1P&&V~ҫ;$aU1am%JJT=Ͳ0X_a **Ɯ - bEN\v9`c@\5ڦMނޞq8 &AjkckPlN6qN?QCp9709Kvxz!֐3N$ŵ|T* ,$D,ҏ:u@dm| 55&#.V[b )n>.F[LH[>#3\F(ɲI']|8_X'&)ܑ/AoCHw>ؼi͜˂bm(vUOBn'IƉ/FZ a (LF/8N>9޽uqQu%2̩sock|_ۮ#Rd4qͭFŸEvbOH"Nŗ6u.?(M'qQbjYݽI>FP=r HV|ocw02opV[ReڻB="&$+mو٪ Z$:'Ŏ?RWqvadcԟiIqx`/Ҁ["J)iǖ (ϧc4`a_f:a#>l8Su]}4rHԝ@#ZQDK2W?~_Yx$W; =as3;̈'d?N`g/qRO+*Y>Fq:DQOgS)ZZ\8C0RMQ Nu i1om=Et3HB-ڰhe#KG±VET#/x}qD%~`g3ezm ёWH; JtM->18LO eq"^:B՗øϮ`r'Y?mxyO.Y&; aO{R \'k4N{3`>ntB>#DWf=.hHQ<3D|xwXpk=?~o*l:AՊ)?ڰq~KޗGܙfQ/އ7.Iɞt;E2fp)Fq<鋻jpOd՗?QfD=;K z< sOHt&15^8M bWN pHk5/ó3 TIhɌH"v)c,Pqth0ozf6/۴L)64zKCio4膅b Yi[鯟N@) 4'SOcvkO)ʴ1ΡgE7\*Ǩ (Itĸ:,"E&t%]&<"G\(eu(޷ N]Am3:yteeGqpa5E ',P~rQ[mѪЩroȹYrhuEWg̚Wg~p =;ƹfGZti6_U(ui~8Ig bzYz \"aMyf_0È~{_3iՃRa{w{$piޖƼ1oQcyZ2OՐ̂1绸!F'B* enjMҸ?8 Lt`.O&(;A" M8 C/W3,ݔpXojiP{(&ak—Gxxs݄8q *;9\$u=ȵOUvB!3䅥&2B$Tq;~}:))t Է.X9z!!\,];s4?“, l\Q vqpp;S7ѝ:bTLt eQA22 ls%̪ˆ}'6v,U`ixU@yad ׋g;3$+GRg:e"mrʬ6&`jxB;SB/, ?VwR|3גijvKP7%c aïo(^|ϋ޵\#J#4'ōh{ >N;!h}y){?4?x 6}W{s+OL)$ Ge8ļ, W,4Oq?j^) ?8!bJi=*IƢ$M-SWdu:tEZ9#?pv 7XXZ)#-lUg+$HRg X-ip< mg$Rʣ~;\OAюPg]I&Edaۻ+A>]UԻZb]W+ӥ\4~31VaL2آWHY jbۤPć3g$#Px.r9Br{T=ySև &iHFRi=- it?sInT^߭흭ݭ]~_^cz#Ґ;|K^L|;P{媋8,K_Չd$bIHP:0έ?wJ,PB~h~If.҆fCC <(Cb~7 "} ʢ[o29@O&d4gƿ3ģa%H{=|\ŋWw^%Q8à7'n),ަuZ)>r_JK)`)~`+ (ի^Oқus,eytr&-z9oƣ 2Y]N׬< )-ܧ4g&zhdqY^FCd"}?k8 rrbϖ7' {R8x.4$Ҡ㈖ɂ1K;qT%W.Ⱦ)]6 A;0|#֭)$cՆ+SoGX</|:QEb"aT|\esKfI&x7qΤ[ʟ9ż%w>]~gqoGWq[#>e?{Vy^*1?^HÑG&14dL:p îFkJܳc,48r!)7qff|Y!i? fI5׽@[]cox#xy䳼Tn ^Wai]JOr #HdlW)%ufX*J7$OO;~S028q#K,0W_.l2 mt@~<1E]"HDpq_`w}r-} J S'S`_ kgV:w HVTm򜐶Hz_IIk[41|zQVx lA2!qfp`$ >Mn(\a)FlݮH79a>6C٥y߱qMAQ3õIZ9֤(?=ٛL|(9Jp__J0Y)`eQz)=aUpmYSCu$}^pfɮSy,eafHNI:G(=i!/X%I[Ȫ ZvSI <êjXMY9!EGV}"Q]<e*<#La F7K}\zUF]rΫEq 7;6Ȝ+ϩ+ga{.I1n euoE Ѽ{>𪝾<{{?Ʃu4Y&Z-$CS~X7vqyga,@9q@nDZ;-˕pSDӾbvO\* [׶ IJљ2{WiGY<%XoJǟj!]{cA^S&b$i=*><2} VCKs͗~Q%́`0 BMYkSz)oڔUXp)FR]#۵C`Ќ2Ʋ5߀fu4@_u]X+oK@#VMmo.. Jxɖvq~F|?<~YKs!Pwպ͉{ bmAD43ޠ3Pwg` JY7#:S XXuL~yjv-]ďǶR#+XwtpP)H]n繷f]$8O^wqx(.Cp^1U.(Kph;C*'5uQRUrsI-\vrV\tz50S>!Mۺr\ &SpDw˫,Qr#eZҏ|T "YyXobժ c4p292VI >zoO:?~zqy! % Q8o 95Ij͜m&ŚpyUYtlP`/_iKx _Acp$DfkYxσ~8y|\>>l/mH O& zTL'@5BFuSC840;J2ފ &ڛ܍avËpnҿl&+ ʲЋ=2{Nރ.\Ac/WZŽνu &sxZv$y)'(a7p=[?Q,JCw)G<є$tk$Y( ^sL)#!T) K['=J%gI3;R ;'m%t᫐Lj5<_I%͒Q5Q`ScKMrI~FMs,~k _sgVc,ًTk4US'jxXQ/ " <?:K~ \4\렔/ \09GA.'x.pa̜/S{Vjv7%oSsjKa#X /aihF;N^-5`6YgT%=~ )b"8r1)SB?.n7ljn֎cmN`: *VtЮcSeI|$r$p_;^QѕU L[9r}@c=NQ]>κΩ+E_t@:JiuMD72 U0Ś4\QN:7$+]_-Y*c[ W#NXչe$R,Xc:J90v[i^=;=/KEe8[[-W_δ_liy}ٌnY_|F`58Ć,f(r: kۚ&M5'cyo⩰YW@1 i<&MC66 6ޞ5:S$> DmA^nzN,T['o4c] 8 A oƗ UOWT`8o vj]T~ōQܟ b[0@(r6X_t-KAHe9`/jo Um^]kսUooyeo?xWj{իm8 ]ko n[iwuc;|6Rʄw-6&lkp#/TH:!GGc R;íMUf tLtg2`яJ@> sArExBTr7I3`']~ FqS2Ub< KGJp$WS:7'3 UPͿN cؔB3 \{RqBӘ̢F8h55crŒ.9&!z]BSʠ<»`A\$c4qNŜڌ7g:h)~g RC;3fmz0ާW\L~zɈ"凑Xğ_ޮt!3Gc:*r5_rҁYp@^g{;{]V[:Wz2ߟU Q`'D3߻#AfI"j{p"IU5Ҿ_X^bH{ ( EyLRijA#H|hCiM)&)OVPO;<:;?:ؿ<:li8S:u_xc {7L9|d'CEr@'h,Ep 0ޑڝr0#}ywcO+Y?#C׮qވ1[Vf/6x|EGi!hu" HAx9WeЌ;|قVpM2$r S)y-ZZ.˔}DtN 6d uj?~i&l,Y@^7lNAi#K/d1}?hS΢[\zyGvFREmwk=oAH`a71؎cTSǼԽtPtFoiέmI0S`=+RwE':ed;sN/~ J5}ntBR=v' tueo(Z<*.)T\\/c6)͚ܗVչY!u.Q-K/?k $[/"m+dBRJ@u0 5ܴdg *jk VZ[KdeLyܺZ,]U Hjj\Q+[Ǯ|aeѥFpE͝}M{pY*/GO67(nߜVs"Gt^v~Y%'%N-zP .`,ytQ'kM?xybd2Ɓ^]vϽ ֕ПLtD=l<{(,_^m0OYF[\H5Ћ9X.5293 - Ԋ &@~? ҉*n?Y`j&>L7kYws)Wx33Ύ6|C'LPdC;pAc/2 l0ۏƇпBAzJO%bԬ"(A7]:P#<Ӎ&7$pٌ-##E9%eܱ#3'uvc;3/9c-vCz/?,S0fܬU8(N/ĐxH4(F`"{;Gƕ;7Re57k/>P1B5F熟1_;#DG~?07>:Ԟ ?ЬvQyUV{Gnbk:*MZaO?vƿBLɉPѬI dxFbK~HElX iF,Kq xqٌs?+.R{#!ۺ*Nzpm{UޘJPd~6R!XMGG:]!?uk;*7wpJy'eD V!b"i^X4ըF^FV,L)<3_k`6ڵ"Rџxr GAJݢH y.PZT+=/~լoj%n[t-ZG+M [A#~t ;-i,YZdb@_i9~U2g&f;tiOJz.,.:k Y.2#^.I'it&Aribe9>fD?LtA$pԞnXrW7D/k'3s(2X.厸ɷɄt 3Yh{= p)y Z<OaOSW1j]fr;AzDArU[:9kFqՍV80xk>=X!I%`١Kf" WkbNԑC蠚Cl[[4DWb:69r c7=>K9ֵF]_nCy5oJh9T j6[Vp KY74GM?GpP|J~+/N)79bF4MȄBHW9;R֛-##%QhL؋֒'q.݉_7/0!{_[Ahc9{vN0aEN./HGI8W {txsX+`C9P*fO~EN~o.BsAXG2m6Q/ {9!{Y:mg~HZ>o(CÍ`ܠ~LHƩ)P,nk JhYTITYTY,E8a03gjR80RJCkwI R.XU V]F]:H`a/I mU0tY1/7!%<6 |2Z?}Pui|yY?ӪZ /CVI_|:+ 0HrƁe 7ݾKq4]ĭ+FJSK"eF%<-]V@*R0ǜw?.lZg4?(lg n8ula{_Ϧ鉤g/ls2M~~)MuVɧ!|6+q)\XCRrko_n?YDœop#dxeLaF_c0$9TZe(]=S&CmdyweQxAHAv|+xw4__]0 Zk iEzڜ]E B"ye9A7m 8,56.䉜ЁDu&Ts@\M*|dlR0Q`_{lk`47';rW({hEX30L*A&}O$L汢QJ!U{U^diE;'$N}0!A) Q֩ şaARCo:Q^ǞC944CNkE-L,/Ol(NfB dX! eSX{ \T݈"tdbj) ¶)fR(wGd6jo{l'9:q4z(l$qR j#V!#<0$a6YחrdJL|eJ(nPȚ8/냭 [.U2_=,O#2Z-RHj+T˵Hu1eZP%$[ EfEN3* S)5DN!2ȉB7ŶH5n^oy`6xV} [uΟȦڐAזwOsE+O j~^얡 g1liQuyYC~EfrNl3 -2U["ӖNMʜi+jп@bĭ>s+uѮg6éa1\Eq͈,]dHyOLMnq%N+xUW,[(O|K\~8bR+R+/-f|O {c8'y"`q42;DM+ ZsOj2,y-+hP cCE>,`4St"Y{M:nak:Z*aT%7o8>"Z( x` C.ݥre7y; h#u% ^ѡplae ФY7LـWR>J7na\Yc{\@DIV\kip+&*FY%\<.T,fڻ ?&,:i}V,o=2͂`e-Ud05VfZn58.Ŭݳ`uwfJI[hjU%^+%{ao.K: CZRb,g>EJܥܖ_({iWKU&c6bHg `U_;{mGyuBiV_6s+dz\2pj棩(g[sʂ6}S0G4 8E4GtEg+m8$ 8 u*g0dڲpaxS4YFJ;qr~$w/[6 ՟~͂2۪Ԯvz6*rꔰ4Ƃ ?PT)QxRu85>t&ɰ'9^<RM)kӅb#·JmΏ"D"Όh;o&n;bAW|2?qN$rb$)kN,+$RQ4Des5858AhMprR# X&7QTܶ ʬd-= m/w 8~!RBX1~!њaJҤ%0j;S i~Ep*XScwݗeu8cբ[So8 ?W*7[/H3N [Lt1sbHs j6H@G2/+LiT+yUtErgF _?iOͣ!g⼠ Iͥus$7 ǽц jN搾 52mJ֤AD [UOݒlwLYb 6\J:%H W&*4J+ /T^C m,鑇1y>}Vs)3S'ŹG^ӊ^l˭30%Œtw\Wr)@p=Qym9b p* 5=v*qY* x53)#Q!iثF YmTn b~|(`40^4DL^p~YuwAvB[&IQ|t8e[bIrx⮽R9 &xx1к3ӖPDޒ 4' {pRߛbGtPDؑ@UF(,SKc V* lsYL姴QZS%:%jc4(7W3rV!i&ou*J3"UלRqTn.Jjf5G󑔍a+o3stMزWu7MTXvZ̰wR2ijNƊ@uDw y.4$Y-T%q0[j 82ME 6.1(r:܋t f)Zy4!Ѡuzrԟx.2 X.+x`Fȑ+07TL"imxuf(> ݡF!b$=ʁZHZƮسBF%BSYl^쎟eXH:!7f=45XIt,vyFђ$MH=cBW ʝF`5Ek +ʵ" )a,-VrhA86jN`u;b{z1?zĞ>z0gK/)n)RԊ"e9Xo1f%lU`g^* -RB;Uި){ӆxyMIG+l\9z4>qOȏ s9Gf.G?!,fֵumm _ ~żSBnQ||J⺱ 2-5HA:)$ }mK =6rih\}⒏ NX1K{9t62 f$/yT?D 63m(Wx}Lt":8vg]lbS kf;0g0zT\,~h0c*Gls^?p|# `wh2Ų'XK3U Wܑ W}Y,Suӱ`RyTusE%oO?˟s3/jK7dqC!,^y#[$J#Chۆ٭ ;e@}{`]W[GMX mϢdMlmؚ7cu&d<(:Qbww`['ۓcrF$"p 2`܁^ae9On$ƈelb) --h Ռo?U'#~>q^"y=ϙ79lm K)@*'dj tr'b&K,Rȵrޝ3qM4M1_%73oԃ{]h|9Cةԙp^UN¤089W4\zo(D/N"W(B@Hř~3m>z sqfM~7ҕݓN?>Ifl_T0.|lmA;_\;^S>]hw2o8(Ժb*%wT: xY֔`Wtu#!G/BSM 1,^=4qdv'S$ON1iE+L+ee HF? rm)0fG'?GNO޺ʸђ5jP/yH[}}̼$'?pV=7h@nYă dÑD򌞿y겈%>f ۇ%1t kCÜ1o^l\P4)%4-hQ5.(RZ?1Hxfc#ebT><;}uXzx(&'pV! rkw#u˄⿯-p1VwjNT_t;uzp밋j&ə7x5vG.mʍJs4zBV>XĎTe ox PT=ޟjOlO޲'cPa֋:~b~^ij$'|4%.ICV:@6ԦwS㱔9?jGcvGbiWs|Ieszf;#뤆ckGc*\z•:/mYM_u0|s2y)U-^MڂWВx(L)fHt2Y}yfVcElbJlFq^{x1g I|%%ϓA¦M'(LV2T?ߊ?/ޝ41__]t>;]Rc;=Ǥp s̷%'TyA>[ {LK~sQE\شl;>fF7wvs롎 i~y5ldQqۙ~19L[ \dfq7+?&5VY<'}UT/,/)gkclZ>4mZŸ(Z5γi[^\+,=|lᓲ UQ)faT*ϵ]W"-sbua4nM67>֞Y4,u#]nnYP1\Pqt4y4hƋn^Ý>!mXK֯q]uqQ#Uɚ}%{ <bn\7OT@gk{+gGg&d R^, bOSNnH p}9v dDZF,P=М:TTXÂ'=R5]ۭZN멷jߗ*K:U Bw|L&g $1-;IKCaaKKk_tWU;m< >l[E*j+x:Jm +2ͽƤYl&J|Qk:UB򯪠QPD!4306xi9r$n:iYyTUW k]4kl}tV2IQW#2FDnħ,j6Nfc|s{:H"5 V : >,l2R@=_?8.mY:(',HP,oPcfHkEVthE -^>PnG7.+3d`j'b'f-^>4^G}A'G.B|UoC9; B^L?lX%] \ 'rQ[,)~*|{3Le9Hhxg`̃s;p>PC뚬7C({'ԓϢ4ٰ,Ձ?p=<$7ۆŀ3A,`Ȭ2)?ZuO90f窜r-p}f&hH i:?,#rSY"<.[!=PBlH X䮬_`j+{d5C_l*GuM[Ny*a^ڒ.;h1Hw'nN6𩏾;5#r#;)7g,U<W˲1݂O1)\Ce0`RM|L0*dCq7 ǽF uG.)ſ._1,rT8G8M|'!n4IeotN, mX] ?8Eok][g9uT@C9Λ`Ӥ0x#~?k}G9 o{+7OڋY&!X{uL{oH=qXL&,N}Ƚ#yɺ){O^w puV3<` U:u@~-cO ZG@ԍambqѰDiR PnqF@9Jƹh=gط_΃cJ,YMu|#$LZ_G՜0Qhq!&(KbDUϖ8$c{s8N,wZwSy>ղv[=]Z7 jho4{Hl!9x\dl(gXU&G2ܸvZ%KʔJ$֐d̒<E?As<1,o߆R^*W#6#qhlq0::3 WgV{unfwq3?jWU,VT+g3泫a.tx^QBRD>"ahH2<%dOn>BhbM0.C:\Hn-&IX< j8~(s`SE/S'ga(ͧx]Jѧ_ʩ/`Y#Ս;BU\W7l U$ڟLs\$vWpu:wgWov/_/֌H\L0Kz/luΚ?v0 MlA"ÁLfZ,@ 9eKs![dI-#OMcPW"*H*8m@|Zpk^2ʺU!]8Af~^M]{ufe6/|cc:Q[rxKA2 -(zTlm(gC)c1x֊L8uof{ L7덦A8F߁Lo<4̾2l 1C/Lvv@L^sjZg>fLz=,JggKt:_[1\ IJr jkԝ^Q#eW/sYX,i UXDq/:r|O gu~v9K ox4?s3/5!s@f,̀)Њ)ln5ws:o&A{+W* W> K|W>qJljl˦Zҿ!o0yBqŅۿ)cWv87^1 V9G_NϜK煳]sRsM>8k rAqBa1#u<ab##`kp_zeΆ;G9a{!U~iaY{ Ϻ@惚- ,@;#Xr`z 0gf8䔦x-'ɒ3 ,r)C Vhs~@O.[D?5T <0)sg?mY1,,T6|2UR]TPc2 ν,r[>}dfW,Ƒ?~?_|RbO QFq].UpRޑ[+wIy k &.Ag_i,k+l?[<ȉ<[ߟV3jW~x̪O+jZgRǨF.by5 ˆI*l'_,G8U~ @:AgY/nFO9?r6>xж́ėV,_ݮc`@8kxFQ_[wZN6\[͜0/%"S`/^\6)7㡾 m0n("6%Z ^Ida*IgoBpL(}պZo&1|wٮp~gyea  E1,bߘ AB$"jϩn),Q˳/[1g:Xޛg4o6wDQڲES/3- {}qrw_h\=%i]PܩNfꝴ.X_)'_u&KC0`&Cw$Vg>s(}׃] Rc: : T*RP4TaGO[R ocI#+p22sf9p[Ω}*;8P(;d~$vzޝAÀGYEFl)tfN`0]̘k=i[1:hIYu6Dͪi?TvwZc/Im脽T«*GGQ1ӢK͛_NO˓w'ΆsyryyL!l$b4b/x"Õ T5[pt~C3 1L eJ`R $3ocqj^_+FFG t`$Y'9?)n3AVbA\|4,2 apވ`e;> ]<=N+z8?x燹C}kN>~?ae!fS_PXTU~L:׀o>oP.OPϺJv?&q=̙}5X$c>V xNFfCOCywSRHvq+X-?k,b{]Ľ+ *XEB<,SvE]%;!H.r"ʹ{tzoEV^ϴPá 59eeɸ6 U؉n*cp' 3t>яi;ZIn;3r4.˧>%!C:Vde6}2g m UX 3^k8?l& {ƤpuJ]>gw@+ޞ-흃pp,9*%ߝ}=fk{g1yN6 pZKUG`\ZdؠӬ(o]ʿK9'qr幀˳=i9򺯹i3n2ߎ6E_ }>gb*ZVe>Ӕ{H\jf*s{{\='nPrVe,Tq7u+28u tBjcfYT?X[-څE)ͅekά[Lme٤~V2KK-?g/E}inyRBac)bp~'7 {͍.VDZpMФn>(HԺ2;U?K ]?,_oؖmX o#Nq>NNl?.h(2.gE #~u;䯌IJŖXƆLT~P-,qFAf 3V_+(~͵C] (_`ᬼc~9ʇ͎? vEXg*v?Ƴϖ9adղ-߲ _yqEzS< |1*6RC3Ej6Ac/qǹf7>(BȤ ~F#V^)Prɋq;fYS7g,W1yKs@i }%-<_5@OJG{;cg9+*)^ oa~ﴲ:1bl{-% f[,H=4[=29( weICi\`1^L*X砳)/Q-#Qp@To.XMf_BPYDyܱFJ;z߅֘qK3h:CbH %[̒>$5cx:߷zۮV'/l&~yyA תRgoSV{qCZ6ޣ[1pVW7x!2@ak( Wxȏ:׳lj4/nXVlK!s/ǠiS1<+cY &^7ۻwoiay[Fe[Fp/ck:mx `l k.m j{xnKy?6~Z'WkP+m2__tvG2T{-(OcggrSNySq^ =ASF O@wþ oyzM?K?FDGg(s;tz 7O)‘ssS_i}SI^crkowoY$5n,CB (fZQZQB6_Kp4)\hB:j 7]ԭi٫,{ʮ NP Iy fez? *tQDЌu0a")y>lBQ#rZtfzdʅpg$';Ly:HQ.O,ӂqQeey;1Ů"4_9j3,8Jۭ$ ly(P[v/]FG> +Ÿ>{'@9#RYr8ިm Ҧf GeGMXPHݣ-w%hq>)heLƝ@vhdRk@vK\a^Pn*ɕg p'˴hq *WN2uo ~=Ǵ*e+ošQ hF ?P”|}gÄp"oy {F\5%PI.;@Q<m^՛ּ*Wڳ$Sc)CQcGWP1so}/4kL̀nL} }gk{GNx}o^GNT~;Nָ(8^9 v90+d-;?4аI0rx|*4iSY w:?5zc4@\͒|WOYS^-i4&jŎóB4'ڃ7mh8:9JۖrY`/5A%)ˋatБ̟c;4˴.^gSMMs~O4 =s0Z5aWo7`+-1ebO 탷: K0lEaLˉro-p\n<>֏|b:qfÅtŜbkG,{~ k.a\EYKl)b35 3~koXwʷa*7X i_|76'C0A! tᐅiӑ!ut \v9_bj_N~@cF cZZ/NT/>f Oq>Y9nn _Gt}4-̮htA8v߽>w }e $X叶 Ő/h& ]C'df 6VqlV8]uY-e>bZ_e4$c,|ЄT6¥1ږϨ2jh7E7ŎmH$G#r$w+)˘NTv`FKTi$2[ 7Q0u CuͲ@ &Lb\rH3E~f-C:pLOѱ{`/Oon{l.}s`;|;4Gl: Ct7UbdA'ekRURIWkcU+_㽙) }u\H`> |rK*ҤǏ]*#UTa:,6S@Śb&ŀΤ8znk4SƢlE{F!8ezEjNd- ½: fi rWP EZ_D`6,^?K fU]U ]KZ2O%@GlUv^;d.(wOp].M6:0iUv߰pYTǤV oeowkY:˥ \y˕eQL=Q$!_&#ɯLI 9oa@KB˝ Lkȥ<å6;aKTjfT$4eeIb :64؅Gz腕D.(`_h_7E0"i28[L6qkq+%GjOUݭM>k4: (:;I[ɏ=J~H~TL聋! O|^(7uT"*Ef`_XXGz"RWhnVH$lt)5՝:>Fyz֨OY:Ts:؊HbJ"u6tp-U̘ IYJ\b=+Ť+O15@"rp)2zX R641jĻ$DE\\*楄WͲR&3vf}01wϽl?zqiNqGPBΠH]B՘? %^G&̭0 ̢YqzNy=u,ﭩK.رO.YwNd[64'̲Ѡdl?}kgw{o{w;{=ب/XeG$T.mmokV{ -÷@far_Z/YU]ýb½g2lwȵ‡ VLb(LێlhG&>1 i>7z}{Hf/'HKg{zF>(AKF/0Nm@<&aP,Jccw)J1,CC*KI & M׳#O!*WXѿVɘ^{sQIx ˼zΛv L1>.3(8֔%A λD8pO>d17b$4DP$A?"2yˮwW2iSTrLku"e9xK870Ϥ!̳MXBL=k4~/@y:nFce`@L%}t+gDT<^QܴX kI2]en/у*wmnoڛonn;VOPOr,7,d,ݜB+lXL@B֦Ne>Mvz`G)ٹN+5.CI#.>f?H_騲&p6S sK?6^1.&c$3#{%6|Y侹|g)$_O:Y'q*NfSl $@U(a)nyeL&Gdd$(3 c_&s@\LPSWGz* O^wp!yFa!˪;|mMfhR?: OE tչd+(!C?3rG`e\{?*$ nO)bw{{N/EdH=\Bqbz$SjAV+cP[l A+P]NZ wДO[";-E#. =Xcvъæ9j(J.;"&2\XǣS @A^>( &#FB3VȿAɬ{ P%%MŇRc嚷7B*Z@5߶FYV_'`?J??ƳH.mGkh hw~q|m~>^F;ѵr7+x1YY'+Tk60*g8pC&\ HͧY?c?/o |gPoe.ova!rǒ E[,S*:«V)G'T *:çz~?^1n`>,ILWacfDe X{ Lг-I㦈"eoȨ+b{d| [ք:t M,K~$"JPYi 2XeD䐅) `h.5sw 9=GN. ~4O>w6   X0(뵗\#i 6" Q ]D0(?nֿZ(/օF~8ν:t͈80^D8zn68B`7~vjYf~ע+w.tE~ň ֽO:FT547hMHGݗSD94|TE/9ؿyZ#ψ@qv9/ӏ#8s NMv3ugýbNsx*SH٣3B KD{MpJXyuѻfK+gmH9oERp+6'Vx]8 >H ,l&mY[PӁ#f#O&2NEIN;jΒG`֎w|0<7O#H:h?LE @sdl1[(zA(Y})rD|_՝ǁP"뙤* 06>8,W+R>WkO2<-Tq rOtY< +7{# 0b%? G.?[ yG=}KuӄlZ1jKINVY$o\n%RubQ 4lteICy✽y{".L6 -&:?y&=$Ej00B.+h?E (L>ȃ xx+Tiɯ`'&`am_Ԙbڙ,,n`\Vݢ||_(ArU#">BVԅRb~ɰ>pl k,B]zZ },z`!+ CקSXYη0Lɇ ,F\-go::k`]!Yr|%ٽnփ9d;O6< qfsdl;OA.l慎J،F GX#K,JDDʙҎTƥI7Xu ̸+ȳUhrqc)-uYyZ3M,i(kmjyĢeݩ0Z/ TEm]{|gOxOa(&?'^ȕ1;R8 FR9 jSZX"q*o``p?['A䍦1aPkjcXGO+c?oΝT\'CُW Þ숼zU;2.lmn?7%EAEq0p™R`"s9;*Џ3uV2CkLfeG̉X;<NM2tI9}vUrDQQVOA]UB7E}qL(r)jIV^MpBQ=>/ۤt29 煾`8n!YzZ'L(i* y@=c)䣌}^\>98kNtF]/~:"{9c!>$ϜF{CGp(xBxÀTRyUIpފ]p 'FÀ:7٭sJa*&sXfMs:de *]GDd*~<KXrXs)n_@ntFC`M]0HǦ۾KY&!cM\[k9 XLڱ)TccBβ 9zhi 'Ԏ *4 bfݣ8TVEFW)H'-x.Y4Q $Vda_U-m8N;͹UtF]ڄźqeIt|iV]Wӫ(LdOdW^JFo.{6GC*$2Ҁ;h :L8'eU2u*V]UJQV-%ڬC<(P n@%DA5+@'<6܆ZOPE@PJLS*IPxOaQH +ez]3no2 {,5=ۻ{sw?Ƴd'YjQNov,G_iBP>Wi^Rċ»c2Ue6aEX5pW'8:M,v7ƌGchq>|eZ4e,=g; 8h.Cz'OA܅ 6,1II"Zv@ 2d;7 4 _k}l1pRs"Pe]&_^j 8.WcP"\QR~K&z(g21MRK|Yf!EhJKk:yJEb䢘؃?_UFY咕rCK!H*] RKcYzU׸K2 ^//<|Nva"pR>ůY1s(jxKěW4/0JO1C'0 c<)\fށdv3vKg.To exfL7qeg [Hd湣)Et9؏~6˷i)^RFUtH4L711x2Oɤ|v4 Ӫ{c$|E9υR0aC,$Ofxr5W! f\-,Byߑr 88?w֚y[XYƒw&TT^X%|2fZU1mc,ٸ6TR!kcd445;d#5FyPB& ?L,(q`5*a&Rg8%GfCב/ Zt?!:+8N@aJܯрs?6$k}o$4pL3[LzpmdVBY1˚]5yXԯl(U׶r_qkݯp3ܾuaWshq]}WFfPʷeN+ڈvؘ vy%̓B:&fs GW=+'| TJY{>c߄}SZb׾3}^~rM`V=e0o!:G0yfū0bxQS+m vw.yP]O]/{ee6A&Hâ@@tz(!CV#;7;Y "5H|hbOc**ǔRM\c ,pX>ײ&T\%M }]uP\ f7K5Mat8Y>WWSӢ_UiM}6 4z6uvĿSK7U<̀+k!)5Д̭WӴD|gn6T\mZOMhW!: SgϬpe'#fVMT9 ʣq;|7gžM'VxUfѤ缤t.,ϫhHuAThR4VCj@z fg@2YUOiOKkpUmwgk{k}7jbybv) p ik̑.Q3]df 䭐rXbwG[oH U| +>Dz3J|\MYZ!B9L6s '7I(bR8 P89GRy Fj ,$f3'HرGV̞07yXcz0h̝w8_"\0O4_;nJڀ67^‹&ׯB|8e󆫿FRgcM[ŻLG_A*KY-,MV4GtΥX("H my8EIhyoUiXh,Ե+ooox>5y˟9jHO_UqScRǜA:`~xV̊LDε\^qeT@N(9UcfS K:6vG/ֹ}ܧ:νs_}XyJneͰN9׫U3rxvW8Aw.-.TmJ+q3rHd7TlRmԳ4izLgT:koKhϷp%@H~z͸iLSoWf@% e7 <[QSȢ.0%2Z:'NjSMsWKVrt\0^0QVP3(r:XA41.}4oE'B#"]CG%'{7p$Z(t,S&=)7Ϥ)S57-mR݊jo/¨><1;#LŐ*gI)woSGx oum]q]%'Q%_\-5X *e ! jSw^aY4M ,RvGެstaزb%>zW "31 ŝTg5*T.|<'*W_%rOR~ՂR#ߏ`Uy*h=tɐ_4]kV rVD;@bM5;0n6\!] @Vo$1V]?YI*wWkxumo:ZâOEQdwU%/4R^1@.M"o/h\IubWHsrURR7K.<+H'Ú͋'o$|7q\4AD3הY`EƧ{JsqrdudSdtX(ָi-yEě*KV.[-[1.Ҧo$Fh,/-gdk`s y<_m kuF1"s#ORr#@ִ#@2h8WL)kcQh(2Wlnk(p4 :ΛBmʆ.%_-7wvh2Q혭'σϲ@4qQp;N)H2[H^ʠ,H'SS~P@X]j[t~`;|D0aX2狑1 5nqG?Yj=gi]eWx=\ړ2樳r8[dO vTTYU+@̕;f|}-$mK]Na(<8vrN"ٸG"οlu6qgO VKJ4.۞ sѢ?8!5;{[A{gil#@_$X :{KǓ8B/x>У#h 9 Ph5Dw%Y/c Ȉ ;k02Oebaa:Gé_;OaVҀyM^~V#ϳECіwNOY?eԃ|xpbpEamFy9C_6ǩ3"N||)T*pb$QOѪJȑ,7LMEf+a,_Dj??f;GARhH +myG&C![qAxgτ˩'jޕ8W|M8;ZExķ&ac!]dNX1` æHI0:F)'4~se09̏j ڑGy gip'`\akԩ2&df~JʗkDL¹MmW+*HᤒU1Q gxt֎}~v.vI"]bq/cѼ8)J $*ƽ?kw)Vp-tYMJ1C6k%2չ:OXuA!`-߃IP*6[QtMj/, k$^3/#-0=g]Ԝ'?`<?+H<jGR`}/5׋>* zC3`%|J!<_%?bk*={OQ b0gqdW-r%C4 PRQ&8rѩLK}<+VUq|v:0><>v Ԡg1fA5/_UTpS.n>jY2VwGEu|$^LFtL: G<}82kĀy`:TԢƶ]ųBεVfx&S/'.s.Y, x0(sKܷlRK&COIS:v +9*(4FăyZgFb[[A&m@u/~J߃@n;κ0 -\`4jT>в"R "ϗ!G?  |pB v07^<A}8MBөPuAAFo&@^66ıjȼJ煍ז7~cA)h` w~4Ъf#lsw;\,j= 6bDZDRo+;7Kȕ!$Et.UYpG$) V~^%O#Ը7 F9Z]pJQG[ :H(*Xwa86ʷwe1ue)A{c"DfRP֬L},2 * X| FLS#V*iZ*V^*ibOnDY0y Zƨޥdجվ~pӂhʏ|S,ƞc4l#pu|0A^ZIVwWl֡=weuh ;YfΚj`S 9)^ݣ1컅5CC*%2]}W77䶬~ btJ2; dB:5R n :I4Nt#6d `:k:o޽u~=8<{{zr鼹pޜ=}sz_ӳ㬘iSλ2hlz\0Wd~U/BrK޻hV(Tϥ$4tt0p60Hy'{;at19(roXk2C'' dˉ_9E64aI q)X$/=䳊?FnLx;lm=q{w; Q%D˿2 NN )><4Qfޥd^wԃCxY]D±30ࢥzǓ/,ETU iUu ˨a_ -sA#0Gѝ[Ż܄rǬp AdZk?lLSjQ$쑽;b7svgH Θ RFgTD KWGFِyo`ۼqc"4_w<'7` s;:ϴY&0Ê gCL}M8RDnws>gDRSKyjbڦX @0Xsܫck".aD&U(90SϪ</p0ǣ ^K7:壯KԇQ48D؂y  \ּ LgDVbiU/MW=%Rdx")\|ڨtdD0qҐbz #Fv ,Sm ˚RƾPl+٣6򾐊ZI5N]N<zTFL _'ۘ%Ho|EkzKy?Wvy>ej-_eej]΂<4vbbWZy̓51Xjmyw3 /ͽWPRە3BIb͠k aABJfKPN+Oio$dwz ZC3ؖvuiB}Jt9/gg.n%u:Q42C a:3]oh#UT]hWR%T72=&Qa$k7m;<^ō)5V|00b"=(_!{Ӌgl))e z8m5ljw' ^%){l63,)L Ï֭GqԊl2iة1 E0*{UXnSw[<`#`EO ≺T >?>̑7>ϋs6 $j\I>RH6?~/Z7Ʈ)zᥩfv ` ҶɄYiJd.=bk?-6>2As>6&ڇSezh &u҃1}yyzl]ܲĆ=絘`;wj8Q"fqReUnBQJ[eF'HB=72㎇ͥW7vP%l:&-v;E;=w wsRQWuHH q-VfYd,y?1MɦO+›MyyZF`M6#`P]8*q>Û8O_JI̔ f9?:=յ6 2Vԧ9C&l'<"CvaPe%\9Tz;_fUbאo77{J/Z||m_7ggqa]لpQ4cl90 Ϗ>ūZ_p</deyWFs=st@m \$hy2̀T0s?>o.}/e͵T$7Tl] 2qfC.7c tQ:ޒ=2>C0 ?D8]$}uSVj1,MmVlXӡ^¸ 1A2pHoJ m-H;2&CJ/n9Wex+a YFL~j4)n|"uxl ;Mlgn ̂G1 f∏0s=?u&C?{T8fM QT8Y{'-pLyfvR:[%chLMbsob@38"S/9WeuYt[s.ޗz<eS ?%.梕&Q/$?S<_|UYq_4RvܰW-/۷!"$ HLt8l[ԛe;0C'XAGeV܎PWttؖxJ<{*LE;h3$0-92\RA݊7 @ji "@s o$}`GyҍBv}wvv1u(z]`<ؐ_C`j^!c U]m_M8YFERw8B[xFWڪmՏ!vHJ(ũǃƏ}}3`+psZq8N$)W-=?؎q0[dߏ,:@Eζ{oi®%ϒr d[ pC3 :W~#g4P~ka5SOCmf-HXi'e=jӺ)De0M2fI@zF3C&b nex5bNrP ҰR1P^^9<3\a1KS Dy54&BT< "gS{G.?_z g6!u9.>f~^A:e<2%l0OI$U( -ZS&dodzgdJzx/"~Y13ݬi4O"cK!0P$p5M 'KqCqOØnRL> º82ش RcJ1ԧz!E kp&2W%޵pIwv+טC|e]}muգ,/ U~gg<v+o}i)yk뫑PR{Ϸ"\h̕höRFztQr`hTpN+B3[(hWmVe8A%$c Vܯ8*ܗgtFɎ:+'6?a<:6I9'<g"QNqwlH+?p!,hhO(ԥ<8ZEϹ̈fAu^ yHGѧ&{Q\84`j]#]7tۜgF,Қf,z&fԋv$̒̆07eV&?Hjq߫s|sw( Z*%5 wqM ?xŽDDiҤ{5Yy&@6ĸֹsz¾Hi꽘<͂_o6+v8<Qko V`ӑ#TV<+|LXaf)¾^5BwQKy"Y_ -l:Cf& -&KQW44Abe.,`y.^-\X2}=Ʊk"1胟JP)`Kp Ļ 7'$ˏi_.,kx.\QbV騖"U+$+'vr%}jrN3Z%fڍXh sӌXFxd\aIՔʁ96"AsDp!2@~irKSKKVT)#jKGɅR2ڐ}eEV K`_i/XofA=JT #\Fmդlea pXw:83[Ż2kVW 9S;3͇>8A-?V25@bI2//_I]gJ~w_գs d?$dĬPj/Z #)X[_) P/ f-b>x^}SrQb8 ufe^ 1)P fR + uuX/)C}hlyȻ{~cNOvZaS4 "eaR@ÈՃ2BgǼ}̭WiۄGx2O{Ge.M-uiX &8[wul5J4['QEvuDVp^>BFL~1C2u]$O#̈́e(/LW9%TpJRl3X'KS 7 ǽF05y5e2Fi*U#0|[KXJ¸?tu "WW ScԼ-T9]+[^^H-L}8nz u4T7 ͆@1a.Kٗh!ҿ'NqҩSc,5 ݭ>ֱ2.а bqft`BTp_xj-gzʁq֨T򮪖j06-'EWd ğd `餌@V3auΓR,Qv"34/+AИΊT,4_{c|ǧB៦i[o|Zp^F7̜^E'nXC\ki"7x d]:#.FRVp?feV9wIJtJކ jlhUrSKE7 d]"ܓo(VY/cV -Ѿdl/W$=w:!f^ Qc#Zj1kmb0Ԡbr6 9SA90)/3<'qy$ 4d'"(ɓ{¼ܿPZnIaz`#SA֍lAg?-b6ɨAMOV`YtOxZx"ֆ 9d' 6ْWK̼Ʃqg]4tx gJmЧ`X,z.^ֲpcO6t$um`⋱R7z4\Ru5x .l-F"KT&9[U{^jmC+PyheLg&zb\oIzs..&Ԣ]]&U>Rv8XH0B-hsݨ <3v lr0 &,TlV<!Ňj7gָ*Sڌ 2Zh蹃y8h,T‰'d]ٍ>U\PEY)DL,hQ+Hk/qQ.ltk -DO8B7!&hҔ7g50O˗ IRFm\'F j0yeA lőO*ݮ$H"Oj,$2MBfj1 !h%m ƩcH7=0Hncb, K<<rYJ(뒌p-F6LŢ}V43yCk?onj6ӣ>-R'X*yrz ׊P&N5Ф4a~r 2fw#%ABZFy+=1--}3Jg3k/ud%w4i*e9_y'`# 8&? s-kv"G}',g;8g ߵTFjnf<|zOwHǬc|t/lHjT#e0ai5>_RSj| ŤrKT.KťJ*Js+ ˧{~3'p+] Š]Q}iVmƖH[whҽ'w7UYkf[k^p~vǂ;b A1/"7Lޞonma8{74 ѠF& og{O,*(^pSAdXn\dp]0dg(&50^GH4vb G*uɄ79=ٯ~l.lbOϑşgdS!÷ JԚƋqYPYa$GM$-1R #5IyP) N1]FSDQ)~J:ſE*Xiq Ct]0jIe S҉ҷ߳4W*֖?Z1`2&JtOQ !iԭWOO%WWL 61ɷ2|'}i}evϖT#x? ϻ]6[o_oчyh?Ļӳ7i߽ avOi<*{K=% bjH%̉E(9Rho>/PX,oNf $*޶rQ}JdHVQJ& KEL",)jntOǝ +A *”XRJ-R*veV&ٶolnv ,T9E;Dy A8rn R ÜpoSkc?5hd0V0鯂?1?y_գfYyLvzd>Lt ~rfQB!Iv~ianw:^{wN98p a2ۈ3GSak0}LkFwV" RQÿj{+UEUZM7Qakl4!]g吮KUTPkyYJPCڭ jNOh%r9ȿ NƈNdLn*'e`-#{5Y+)[F x?bB+>0vAԮT^-Xt*E޶N@)ȩ8V΀&f^SqetP(n |͸ф(bV?=KG[c{Gm?X䫍oU*f?;,Kř&1GlfUj+B8Oi9 \6+TPE}4W!ݷLc>K+{[G=Ӝe[Q?Ⱦ>eECMYPv'V'&lMJEק0ㅯ8W(&~8>NEl<߭bzEnk}5Ĥ1/ďuTYh֒*w|Wk/:x[o'l,RbLz@Nb~PHZv| 0t#\XG\~m]v \5/kOk/rdG' Ei k/"/S/bSv*i=h-2r6q]B6D32A9Z|;Sb iCq;;_^ŏۛin Ì} Gkhy/Q&}OxOHzT;M>}V.LesR|̢ztfmO2 &骳Ĕ[Dc!MFMZ9%JS$S[ӢXjϬM9|_f࿯g}_$TxBB3#8%9s5Ӣ~THw=jxT={jʩC9e8*&_VAAV:,{D bb죻Q |MbE q7 uROd9G"CoN8RwSLJ f_[G2@Ȁ1Af6c%E/+Y`!@ a;o,^B)^cW^,y2[6 _fؙZ{ks{iaklmҀv$: ۑ+< pdz",%#Q$% z#ժ$;`<[Ri7z*_CAO\l6|rŧP6e tEsjT\C\_ ' YZt|27'bםgrs`u"EMC W82kIN櫮[&[*)ʶ|(,|xG.\d&J\CHQq2-\ZF\) De5Xa֝2]4%;O/bKmrMXalSGmx~U>Wa.Zb=P"ߵWi7rF7KFRީɵ?0,H|uttحtQsA/x-:|p9<i^GiC6 3E6[ Q0x,c(#P:^"a|j耔-iT?屩)YKEixzS9[1J@|Z%`\F (S,S452'*dCj_vENd;çI='4կ@H;4Ղ?|ܙHco=nHtY=眎?C} 8|ѳu)NdyMkr1qi%87[I肠! W8z"YY9P0< ݛ=glX3xB ]Gq&?S ܪ)LoJB vdt0Z?62)NCQL¼y^GaF lg{ F7gˌ2ukd\bӃ~ԫ@ dY)>_Q~n?Ot>0@hR@e3CѕD'n?Ȳ`,vϮ(PTֆMiyX%5Ekʒb&9م߼?ocW;قwg$@4R@N$7Mnw- oyŏ/SD3iDƍ.: sbb(e}QSi38SoQy׫IX:{N+Р[}ʋok) Ʃ: g 3>~5DZ'j ă:/}pKpw*]A /]{kdXeY֟`kH7f. V5noPL8Х5.e3;%,~9ci^E58wӦ 73%o߬aSHO vo~4ngl؝7';brC\ F:HL-1܈̈́Ʋ$G\`f77,#68Bl1NфFs6Dިd_l4Lpd9Q,xN%l8٩QL_ڡɀ@㌅]j1% z.Fwq}$kcނ`8]28QzLw.iS3H}ξ֝ufj"ZKd[8̫ zhɬNYcd3mn T^ S `x3I.KRqr0Fٵ& d%dj$۴9ꅈ+ &1Sdm72m[7o؄>(Y;0ᖟ6k/!t?eYE>'FT˓o\?54jn" nbKFʛw$M}z{6 }}'&uUzf0xg nBvs9U;_kH}V ~nrcH fHmxHrcEz jNź‹QZYPeQsi.6l([JW ',/h8i$p/f ~;.* 5u9Dd:@H֔,Ȝd\nevi]^9CNyx#0gӢod%)Nqvy\(@{[(J8y!PfQ41-dpX) ;Ee p:<G0 4x!,4c󊖇PD,Ltu ~'E-]dc%9#Y,63 0nS+&-{c>m #.ה+ў@U BTd2,o@p nꜾew} G^ z7ׁOz.:  &֕bmPR8ajGAsn]]mlr].41.}4 3vǩ󃞀*dpĮDh3ls:{y){ڈB -H5)y P|go{C*.pT҂+呗>&,Pqg5g^Xгsojy4>0^\}F3݁lx?E)J"|dsËzY ץD<D(}[bmP§DtFz"Q2uLѮ hNiN00 Ҙ cMXĪy$L3Np_KP}s%~`3ʃ>&9/k6"@tN(S ,`Dʷjwf(V g2'Mb,ކq:#O[xH 2c?Fc6!Ͷlqem[he9څ3Q,!GӷoR,DDiʳZXX@Zh墸 %|iX>E *lxSh$h!B!4#œZJz4 ,OP?xҊ8h ?詮Y$5 m6>m629 :.J Mq0!"ѧV%(½za&*7z@0_[?g<e[6mBt5f Zw}aB 1D/K4Q\\{,Ok n[[wٯ]_x fy*⾱+YUql*&fE~*6W!?4IUm`Sr ^~**d5,Ƃ fPu;mmcZbc`o[kdM7 4ٛVQ$"[^YQ^ 'gJݠTv2ea'^TrV/w%OB Elk^oj(ӣ-*SU ++T `-V溹| 0QˀR* n)V_Ys]b s޽v/~\ӦQ}4p y4Ȕn4 RyBk4,hV,`ۢ5݇`G\vж1ge]k*xPjCr%b$((E_[^5 { GRzw,^Pk!0;]*cIq\F ]0cp%u3:0h:HF^wdz&Dg{>7݃p{߸ E6hg~ww{}u=9m <n XÝQwpḻu0?o!5~ ĻäWy%[0FxY'(y>pO`GuT-vZcbLkRBIt롰4yqް}H}" 9@kij?gsxNli;ؐCQM~ns*~zgMZvPS:&˼&g3<6啗ODp"y4Pw6%8p]SҰTe< s<@0s;uS-<[1D*6Ӣ}WcIr 4&>r2$^6w}BOx@q R9 j,|%j UΦ=_+}teY^3[SⲤ}/zs%asW? A?65忀Ii e-!g.zڔx|MqT a{{yiM=+HǶL"rgTm:3$7?2ȓX.-ԓu y/[!Oe+hBeKbZy h1}kj^m 洕RE/}pi*pUn ba+ #k [֬EBl&i$cuim AC%" | Fkp#(.yebhgas]IﭭbV{^+}Kՠ-N5}B1%hy{d_EUiMG;7|k;Kv/(>+Xx\:AKۼU_K`MGyڛ8\M]xnc<(/xJ/V2l%e$C<\foM;SίdĈ4M'r0S-.gTKFgQ8 gt@U+,݃ZlB+%2(~YzlbHZTQf;Zc~lS-]L,xS%4A9[HljS/QZ䩩MǦiMuTܩ0s v@59F[\lA+YMk[Tzgrgrek:]-F9,.4f,ϲcj{ehro}V oy^R^z*f βRkMìHu\Eü{nJU}ut`+a/4%j,<ZҾ# |]κ##bێҖEij i~WQvZ\]>0bw=ӑ:y\cՅ;^~sRLLwuPxUo }sRlYʀ* &7UXRebNb_ 6j)Lç11zF4%tV;'{?7uϝCwe%U&Q$( zz]:1e±b}OzQv0L]a_wN`f =-A=ų(_N4yG,kVfkVldq G(<+1fOwX" Wlu:o\'bLj[ZƙSg?8RdA'M[vHA8яɺX~ 7Wz0+Xwq$H1\X΋sa:e5rx@aNɜ躚B-&V@R% /k A븉sߖUgb1"-)CL4]DGҼ @(lUҋϦd"-S Y_]- %hmč~ Lx]e-tKn5\s) #ۮ*;IZ.Y=>Ԥ`S-Sܝ,>WV*[$ϢXF`WXX}_mK^LJ-D:/ag _ " 1 /N([5;jM*W@hZ)|ZhRS< >7f~gsf2yt>д(Z?*Mel\鍸?ONu,SfkY|uWw}u9zVa/0N`J)&~4L;CŚA&\}s7"}7rr_䒝p߽a0wooދG38y3@ Z aE4l k/`9ޝsS 'OpRN8Ɛ-1XX ws}tVw'?7~po>UhB;6] mo?;ԛ+K{~tu~=S1z{vAϏn׋ۣOa?vŅGק7?:9-a:nN/N/oug'%9?I?.NM A^8 hs1ݞo݄>ՙdɸ'9B`!If](A].Z PEGଲ̦Z]$Stc=zԝg+[`YXb?^D. 3›? &qR8{ v37~Fтhy#a?z0qّI©-ax,lMך-wVXif0yv6L $5\ K14Gu(g׎!9PxON| f-$g?8UC̢(eb|ں86S>ϋ98w]2M03n6+0V"lC8fqga!Y> {령5+9`@1f##JoPPeƋ+t?'o;;mnz~?o9=xoV KlNd;bۃtfA?3k J%6IO$Z5xXʯrgeLcֈH0xjQfϐ~Fg}??OM.(6u&~* Foܷ?c<lJw<'mcg$hF_2 -&mŪol"&5]˯PJ ,7須b , CIgKVm5w|ntV@$'*/tG}=&_Luj^6[t&jWRW0ڌ+84Rcۄ)]RcM厡U!ОD 6W~fnD`z@$0p IOnN^M<6K8EnE~?]xFQh<6 RQƛzu5QTsT‹yHE'nT,CCְPOs 棬_SQp>&PǹFmQZGjBf ў?=;t< SXf4H+ϓ<`tpXl#Xl#XlXlCTJS ~W?|68}ԻbG]Vˑx>5>hqYB}K}"Ϧ[YƍP"4bKRYņ1]UzJu{7:cņ3na"4*BcyGS8GS7[wܱOc̟pr2ɲDCbPÈN\c&1 z#Z,oS^W+;Dx+9źΧ]{@i0~H@5mzW>[w;tDa4f`ie9^4fD_E-i(x+0FlT9[bզ՜SyU!_Ip] mRQ_⣽LCVF.ݾ\p0Hp,1^L($Ofv:}" {┒ xoyuC,|[*UXtvitq=)o?.4d+^(j%`!i4' *^K؍؝;.No(G_O 8A+!3+aSCsW"_fƺ=F?z ,)DakJw)НT藳y[n!㳓b3HƗ[Ba!Fπ Z ̉3 .4 WDJ }Q5ߢXU}ܡE (v)7"h <=`~XOy#9tc[3nF!66-Vg!1P8rЭ0뷓#^\1VΩ[*l@V=JWJ\V=ӳT|WMrPS9Wc%ik}D^^=kij?7_nb01JPe9-0#ŸxMMy q%Ϻ 6XJPxX>ѳ>%J_ +|Y?UߋbW F5٠g>0wtM) :P% _[ gityA[LN#4F$_kiF3&1 {ɍ)! U޻A47Y֫צPˀAayƕ.;>ES~-(t~ZgQ!qCZhE1Y7I%hEjw,%Tu+8i3"f|BS`mV(n4T6sxА&.ɠЗ!Frljp餷F-|qGSWCC9VUxtLE΁jU<UHUAt3O2 loOlr12 -%YB7 ^b`mĖ]ÔPJK* S0(KT?0d*ƧTՙ&Dǭ6au?ų޲~9qW7S]i; SyG,4-8QQ_&q8t$Wp{뇑>=ŀ#jfrz7v'n ]n]P41=Z<=!`mIs#{4zĻdw7}K9ȸG98mB*c}}rgnDe3Rԝqn@ iiAu*!bd1]d;tKo,6m!2N!R=@p3Xvw?%kwD,;USQy;v?:-O˦cЏ[< ɏvĔѷZ(!ܓ[q A"gS0uov$K^N9Qa9f?oޝO:˛iaY l1 rZtO^n["h1nxaNμ4ί3 [QV3F*#ET u83x8VhRq@gJC>Y:Ʀtvr WA;RˀTb/D},%MI=V)sgM%GB]3Ǒ$Jsf]gH-(#+\&}M*1-, ²~"U:im6zCIz?<՟?4GV ]@tU|grpEU_EGU[h ܻvӰL™P +WM %Q4uC>y=` k3?[hm綢(y+Bő(2"չIGCrClf P+7bkk$ 7?#`Vxfz,In?fh\:8awK>4Aw NSQO'&˷D&ph+ %eohhQ,؃vz֥ !sSfO,)@eA*N*^`[v"I͢9D1[aAYIꁈ8gd/Tr!Q2꠽㏩opОE~#Iږ{Vԑ'h:)$,KuD<阈Eu@{` 1%f-) Fe0?84chqkTKe7Vy\cU;#łAst-oOQSLVl/ú!?2+ctUE}' P8_ޮw!%5PRf 7QQЧB ͫ偮hm{f_ zcD`0hFCr{חv?kg9X-ka)`G*YH X`[{??3>QpM16@mIJ8{VӳYVOVXgk};ꏸ7{(@F k}̜21_7]*{o6jXpd|Nh2,1]͗5dɋ`&}s){~pg|d~њT-K)KHBB ZJc͏ճb*wFYO#W%irJDk*VP֝+']M+uY ŞL2g~9oi\NSva (.밪h1"u>9%HZʂd@1EDitbJ&f4d>n! Rgg1+ S0T@C˨uHQU?99=>==i }TD,p'{;LÌ H PMAD؃.p1ڑsn@QW#p L`"bk\CyBY;([*ԗ[Ksg<d1, !.&>o0C#+q,Ki`~EDUIpjpd12*U5s5vSblemjaXi0+Ö2lB:B-|wWɾz,SĎk0uZy[t<]{wSԐnKkŌɀ;q+cH ґKz,i}=QD)Ǫf'T1%?fx{/*˓<$+Np=pJ2;)2ңA߷빓لSX#( ʦ5p~c%KŒeACḐ"L8h-p)ٕy{3%5Ē؞K*UKTAQoQQ>Mə07M(M!Cx9<$R6Ll;TzG}qy_C=^E?ų ~+^*^{Nִ\&ɚoeEDص}Q JP@ݙ$ d^rA]&j]hec!FSvTҥ;UG/4%ʪϓ/."ia$ 2}%uzeW(%aqW 7*8>"n˭*,_7SWTurm ityya]̟*i]k^]>4>Fƅw="q#G|̾?m}|7?qkTp4)!͊UZopisqRu"_Ehx21;mR4N_j$hN \7; ,s!l/oB =. +!EK\$[nLϟ],dDž۞ tҁcO4R/|$UfS G/(1e~Χ)\% !/_nx)%V)g`oW*S5ۜ+FIlX~Ǚ/aU abX4 =E~ t XOFA889bJL2[YYzLY A1PAY_XVy,S*#ާoby|2$a6J2c(0;:؇<_jj^nD)  x% 1r8 57_ *^ŧ4s&{ڤ{\paoj{Dv]&P{G4׸*BzW~+~t;_h> Ԩisx]f/&i( =D= epRyҌ/o_;y2 ɩvŷ zO|sq;rhr+zby.\o96fA=X S>&Y#D 6{͚<:n>BVkzGo~x#h~gcE84ej3. `pvkV3SELS? 'Uk=?t&ɂe^bbtqde/_R&~jJ$Hfws*0L@3ec]%vPbFU>j |j=jxi[+U(c}l{+Yx5WLVýLWWYʴ Eou$,9fY-i,3+REkEJ`SJ %j0!$k#Ő]w"hyk:>c2Qlfk2d@.`gȀ^*3 H5݅PK$;W"Yb~M[n%h`д$ALK l=jioUT%LH|Yt?g{俇_%}g ?Q a*Bzף* ' 8xS^IT`ETLe:ʎ'G::ٵ@ҀRÆ؎Ǔ5@:"Aypcws,&88V?#tó܆P.E1(J} ap1 dG!veVfO`i!I, 1 HI_6_􂝥E-"r-$(LT՛ؽ-=X ;Mi- ٷc ,t:l;"!MC*06P|p0$$%mvέ8F`Y8 BÖyr:0u#ۻC1 a(%N\jс?vn"ߐnk 㫘kNc_qAεΧ]~Gm8 S֬guZL˥!&P6ypXnN8f0GAŽȴm|*kU/^̻),b6hYP3ᔲ3p3 `4dгM 5Y66;aYr +ȾC!gp~:pHŋ >p#+Q+Pe{uGد&JILucäIrp=aho+;yM m`.dȿjjVs[w3j'Hau%ՎMF iOs[K0-ПQpe-9&S Ic {X{62J7l:o![8 ?&Hl?TxVJW*=Zb@#E;#SNdhu04 /[)H?.[-RM@9ax PWGl!0I5pdv*V%;"YE}LXYd"{g3ZOeП O)֕c;3A @ql2e=L$Ŕz֕ 6Q$Oq9:;&, ȜdEbcu%Š'[z1hckIIV(a}Gv`9i6>m6Zym yncN^ۢOO5Mƺa3)z`o;%'Z7斤z܎OLwNxQц\T@OCW7KuKT+IS[`0 (u]*TE4/do͵2%C6&\Ш Wg"! v/( &O<}VWG BHEGPKkWY͇>&D1jsWע1w}IE'ii5v {*P2̤?#yT,Ë ﭕ?;{nygQ--%,q+ O4< ۳_Vr'T: @kzy$$) 5 9e߆(j.>`Α0thQr + b>BT΁<%TS+6>7=sZeDi۴-uz6څ3};bo~~w?_|ۛ?dՑ2R-ץyXM<?쭹9TߞY}2Ê0\s$fBJ7/tؿYǁ1D!\Y8r8ߟ ;x:펵q kI9$.@!,[7p QH h#f!CZp1+4b HPf8xר2`Uu\T V2.kQH,P(;蠴T 21I8fM.- Na3xaZ-ů~MBS#zaFK)Љ!ZB8mSvvOh}|cfx33~ ' 1S? s\S{%myYЋ?Q.'1=ȏqG}"t'm зnlimgXq]fHO ֨Hgz8iȼxUTP1mjxx=ZbMӐ%w <g:sg^]+hy<}TT /j5?&:&^ve̷'5&33XԳ>da{LРF_u x"}(VAZ_)b6Μb"`NL%j LuX_&2E /ap*ejz踢WƓ&\R}éお̥ȵ fHIi( xAgÔoMI’@jlۢ}#eY*o˫S r\Ɂ<"}Q6z2NeŢAh$! 4p''KCLkp^9w fF{nk(:)QC/gF©Axg'7ebmEȣlh3 ךb!<ĢZڒ{S; w_RčN&s[U)f9Mk-L\4iy|ICx +ʀeދxF4_)rr֫)6[aJkȊOژ5ў{[@2YiW4iM\Ҫ(qu+3& f0p'X1rM6Sʵlϛk5k޳'lmagYkR馘®Tʿ_$yăW1yND?5J;4y; nޑ> JRg]0:$ CCG]j0][޳w? PLݏʟφ#}P1p 7A$ B*z|klh=3R4E-*$־Pș6X6zSm07Pvhm㝷=b#j`qQΡB{tk#=aMB8, k'ق&t1>t4eifY4!PH!)WON=N²ؑˆԡ ߩVw+|T[+|w!h3[u?8NW |cvVй.p3“ӟ^mnZ?ϖ\ ]Hu4\YB3XW)%dVjJ9A?&49ХjECUvu I\,qI@PJwɦ!7;ۡW B5 ߔGWz~\OBi^0χ&RG&m!&*a&$ej ߧp5(HsK7#Z(H`(V2GCңJecǔl,J5B CIF xdWJ=::xk$*z%x~[9^4d ꈭ u:k=[ !A벙7V@)!_~j}NSXX}4a4[)|S"ϴhUкY:;׺꜔ܨBnbDu#_t4S]?ST)Ùg{7{̓:#:%YzETmS]>W],Ux176K.)7;:qQvx\`{Gfsw"%AXO?ۼvSg7 A,bШ &g{I-ϟnz\_X.*OX '.na\N5 1u半i[|Htd÷.|B$_اHzC":I:X ::\Ԙ`f@%Uv@x-#R8^\0q=e`q7h$СFb^k[*OjAC +|]k_t&"oSCjǾm:è;d"n{7V`λCoչQ]&artsM%QB2Q ^fsvwMTJ#%s*j|)r(`6ܴ8PG66vXxu9w\Dic *kQ h wݦR*{.`rPLBł]T >5RjfXF;fLb1  o8R5Mmw$!lu$䚡ӟIJG1)ѶvwR8g òBEJ,AZ/+EEIx|ݸݪGV4>Ui8q82`Е9VcM+N?,o=@tKOEfLf^azGyR,(KUCF2ᚌuNspC%d|E&#awa*ЪEo2]G?zE IE+%TKdo.2\#څ+>]v$ɼ4ڄQF]TOD$M̚(qZ. 1Nt1S@J@ j%/1a(- P%΢grNW܆ec ΃A[:d2j2hN[wf <@<$(i@v(Fݾ0Da_2)%7T){Z4Bn/, ^ύ&Ooy=v4̉pk"'h5G8Ml DBh5r4D+\k?=H7G\\ĢRl#և:UXW0}<*SaG9$yIFW ͤ  V dN܁tC#͑]'AlMV=桠wbo{;bhO_]n^J8]dYKb@z3{=ݺn &-t0qr85eƶm a00F\ Z_}=4ˑR=NJʲ)j)_da!|Z{dEWػG.OP4Y˅X |o>P@rhoTi/vS"rTBVHέ{oY$jI_v̑Eo.o$*g܋睶ȧڔD:mN5y‡U)bBXL`*ɰ(za5|q|ΨWKŽYTMT /s KICHy2Mmǽ{Gd[yzƀDf9VqtcvOeJ WbQ4s:" 9,etUۚyeɜ˼]̚by`wJzurڽu19dzۋw) Q6,:7Oo7ݣ7][͝:]ـc9>°nϮ.Υ&uBۚ 7@N/N|`2;}UKV;ڼņ7~ yp0e>tYSVY0MD1e?2hVpc /]cQz^S= 7_7au>=8w ~P8HA8`́`"ox636|-zj4oV +8f/:% 6|-q^iTU =? aAp^X0\V04g ]8&IVQxb3L9͜"hB7ֲ,XW'We!Ф~:v+>œUo [iZMTZ*rDX`rbz ȝ9u+KP8V=bTAI À KIH'04! ( 6Ϭgeja^|u Zی;7V{ Nw\ulb_ KPx-I-BQQed`x3che kzom'&l5g />6j۟ vtޕCs?KLdz;ƭz ~}e74JϢT]u8Fc TJ\k, ]+ qMVjJQt!|P>ʮ)헩 M;e>"Sf 2`Jf DLcLE W$` w0(g& [2g-v*sc W2 X`3_[SK2 WDG;ԕ-SKUw[=" % b=FқEX^Pw>YKCEXGn>85~z(JŨMq`ƙL.jML;T5,=3+*Xw8t0eƸ"-(DZc -4RegL9fncpȾi76mMQz5풳M. Q#ۅ/Q7&>)C>(eyHXOgs?xQR})F}ɱBREhs5 Q J(` ;- m}57_ q 4aR/8 nZ645YZCءF5X0'Tc YQ/*$9zHN^m3z֮7#$muaٱ]Lj]oI)UO>`j5g{ lm+<>o㻒񹑦ORUhE[$ؗXaL [cmLt`=OT!k* Ljt'oxYsSL]Iu6m!299$M+lMQh.e54c `\Ӵ>̈;N77huv󝰶[]'a&yRܡv d_K8rϰKbz8!𞀈Nb$TocP<MX_~0c*2E\;rf{ncNM=ވ+|4PR7,E]7FYGC)mӻl<QҼ.NepT7m_*1-[nW07ie9PYb6Z0؉=$>*u@* Lj&B y _p&B12?SrN".-ӺɈRƳa[K iL ˮ:YN?C+@:TLД-;+&n}_#B6 T*Q*"MLB5}:c%`ISPҀbɲa5(4N}Г/{gХI ~o+% R d#-\U,K?PЪux3ɑZ{pw=ųO};@.([Y[<8)?hn#6c f/"Ϩ k;B17xg[2tasR$ mhgSC'$3&ސA4A#?xH$ZNIO, Mױokj;sc&VɎ^%ZTϗe'n07gHQ#56in&`S}:蜙fp=(C†-+v8pn#ntwuձ(X1Kod/&>fQom\)p&`:a6tʃ#Cg*DC9<0tJ J#BzC ?ㄕ8}?Rn/#d.}ڴvET{ع @ʞ륋"8Յk^ L+#)QʸFimRSr`stZ`&rtb`&7_)5)_%L]݋T!. 2vSRŌд π6ڳ[pA]~'/uyV־ga_4Ai?z ZMMS=ɳ|wT }Gyg]%GDU,^PΛAYpB|5f}\'NK$ 8EeQ!`65Sr9e4k*MT>_s o>kU<aލJ RI@w̷ُ9iR)g#ZK嗣ll9^m¤# rX +ơrj3=P99 - bt 0^ ?m4O4Q nWtˇÎNRP!N5l%t!&UGQ4 0}_S,L!FvXv B)ۧ ([=WA{C@4ëk<޳5n76:}-8 5N˖ŵEVch0tmCU9O}gJQ15EӄnXAȦ},;}߰ ?e*1"4F clߣ}8̸zpS8` ato@A"eD0T{Q:lZ?u:[۷Wnnn.oN;Սu|uyrv{vu .ɖ帔4e˝sq ǶwG ;7<2)qbY@! R;A56a( Ż-bb^J>s#@jThQh ʄslP,d욚ӤXn m%x㋻I$,` Z$Vgp⁧!\o|mlg-v,l턅fW.CQlޏb 'v8v0X~$Y}@9:κk3{CAe ;yf`eKl51ehJ{0 )쓡S6UJtT I++8Cp c!6E uo򋻱F>S1ܝ2uRj"3VnJo0֓.&^ t-zjOمt]llͭ[Z$໠;xErme.|UUC2'[9L)Nr3 B~'_[AivRJ$Ϛ/+UѮ_ 2T) -24 ):١G[XdsVmB\/ D n%-j=(3X9^q;]VZ,+sǯrp4 Xs@~ G8`EWh})LDP}B`ͼt C NZo;;& t#Y=eKb#w3=I>Gz!Cˇt,5Pk7 gRu2fbz)҃.l6d΀~})Z379zy\S"]4R"ל.;Z_8XbK O]fjîL(tFNO]vw7S1Љ@za/Rإ9t+VTՓ,n5.YjÃJϢiJWVA:.<:B-@fz9!nbLćMqޮYs wteCzY(DRCÂY><=V~8p鬋ʖQݸ+SϕbJ2Jz,=_>Hy뚣ѬG1^H;5DC(]p+0[(9!N`I4ú?4 @ob`)dώ|İ%w2o?.zUs_K#$m -tN@YFO -VBEH5y V笠5٦7Uzg0YH\f-jbl}nD} UbJDY83Dn/2&us;sԀhyv i\nr꺃<*aX .A9cbmQwHVͼ-]]w^0Ԙzzm蕱/x!cX+W2=쎲ٿe/m/oa'gogﰒ?ų|!\فaQpi U_8qz; pclt" ;C09IcaS+jQf)Oh~˪9<?oޝOMyA ('#FXE#w04,_M ^؇eL8"+D-?vnF$U}C<41(H]7Rvm8 c}2׬I+gh18X}xb& M~J?Qk NwN'-ZM5XM-q߻8b%aJl8f@c 9# uCO?CMFk(DDjePzq=<&@ jTmf޻8BpT%&b031CduR㪵tliy$J"ӇI!/-iTq8*eX(lde./i?J(x8Q۞Et[QrZkw"mb| gáC܅[+j0p=h BbY}c_Ն?_f4Mqh"q-0 ^@1J`h|( bM-͒2:I^qljFɗ_M]F@ |hڇ]N, V՗ed22/+#eFM玷é&**abth6|Hm(De'I[ { wT^'S;:s4=S̄Nߞ<4ް1櫌 ˩oM΃&h9S8u}YQ1'͠!z[(L$5+*>l)Uت<$ f6őeTJ[Y~vz,rs 폸%6蚶 ru /']OrSma\\L0V:9=O%{Ywj%PsDL71]K]i0B]6P@L'< \).my)gҒ/30n@ǁ^R^yR4 q|%'gԔXor WrH̝t0`g;@9vfCvBgߕi38'+dpGg_sƓN1z _O&IzdOwo:_U@ 닦I5Tb 0|燀`@ Z{)~;~ ~15Bs;[4"Nv_gnPEu| \jPL &OYZM5i!wJ̈́lO3c+3:#M{ukN#4-Ϧ"6;bsV6JP̯=哝p;G3",> .e&bT0vkd5PizU¤^YotT -(DPQxa{꥾nTSt ϓ8s׹olqZͿ 9KA#5<d"r$J ,dXiPPNtlҋғPϠ9rs_zC]CQijMQrZn\Xcri, gs6+S<\PT"Jij`Ey$göE9B"v>MCN@?fCNQdљ'wF>97#x}\##GaaQ'ӃP=(p1\Qb f백53[Wْml{ooWj D^2cjE*cfShjZpxl[>igFkHK~bNMTK[,jA[sjjfXE!0Yvl3cȍ0*VcGj @) N*2B"-)A`|$e BPj8v{%%li 9:YK~i Ӷf@޳@y$i?>=H`x t4p禰"m;)TU9pcJ7$˨lbXJff)e6TNe^? * biYrTN9w2 [mgeR3K)ڥ\ }qwg^1e@e75=:^8Q%3[XO]|?,ogioN";"ޮܭO.Ϯ[gҦ$fkd]zraaz|BBa/!<3}3aag8.Qh/$!ՎB4 `^yɘ0 2f ;Bnt 1DLDπ+n7W?c ^K|vƿ+xfwιO^R;ݓ @&^QkM(>(ѫE3RI_9"ϱnYOWe$fPZ.ƾ/2YfC/q?1sLKVֽZu lCI ,'J߳L!+)J4/Y%uy"P|ګYgMWVE?G{*EmP{]G&h(<#V^N/~tZMY8omXX8>dYDіSx&HxQS@qOf')2JYhwꔖ^%~VӨBc&aeV:#E#!ܦlm?QblY4*aRh鿽J$ 9N6Be MPTa 5e[hA[lHmƼbҗG;gJ$/$ ӝ$$V0 6k]L}{aQF 9}WnEYKZ D,AtĶ.K%aMRN\ګmmb;ߝ_>/aؔ5cML~},'ćN'ND+4mk6%< )?u|a&+ p@?:p(m LGh c^sX,I-%VlXbwoz'8%7yTg*#S#M$Gb@IJXIn6 ONKڙbȝLF)@Et5`Ia>E!4Phc]kKdrw ph}o'і5MoxMS?0kN*.LJ:&cBdu5𛵕$aLm|vӽ>yӽ>9ƓY4V ymt xrs½c^pbقiHx`qR0Kj|ClYnD.i'Hňw7g5Eq(p%8cgn3 [8K?3XmӨ[/Wԭ7ȒkۑRS{o7P?"K`6C2mhyM[ Qxy S,\mC=ˍF&.HPѱ#vNyH84Ȳc;r4CP[}d(L[=ebBLkLIx;,`F]{Ȗ#2KOp [ \JMEw# i443Au7U[挨oL܃cΚL1{%n YzHm8v&`/Ya#}xKRZ(-4o9>2!"2MV$\:^+oR{Z#`s$g_NN89gt]E I$Fb[DA]o7v]Fc  J r`8>+Jp)fۗ)Iץv[yBTvZ-Y;)? r^F&eQEXU\[$7b7(E^. mu̢ 8sPP}64$VMw"B["S0 ź)+e.PpK܁ f,U[$TCKdl2{Y$T]zSL2tvK+N )ܕHdt2LT\#O ZqVt?mQ{ݩ=^אŬ.7*@UUmQ[EGLb2Y%;Cy6Q[.Ç +d%AJ S&4E~e +TgSAi1JjSCb`3fdd/I /M˲ 1Ƥ)ï S4 Xv<C+U~hKj8JFGvƧ_Ńؒ_b_%~s?p$ϊ?*W0|*WlHp~ݖt/6{}Ϫ H&bSe,8uyߜeS\Rʔl.!j0lx?FM0C7m;Y.N6+*Yܭ{SZ XXsWvO%l#TL`VL+7gr3[J8XjqD8fL!P%ዬՒ—HzpҔ`%YrM fAYsh\ok-WN/HWr緘8i"k3ԌV=f9ך]Q,Vz-uX_Z}=)} O;$ϊ&H<P1/^% IPNAo`j\.c]ya P+D]˵,WB\fZg VHMtvn ƔJe`GGX6}B" L"SdM:X1JEܓeTBBea(a•֚i3wv[~gxU3#XΆJW*_I$UHX)Pu'YIEb'6"ܡ84uyu{ W 5R"|x* $I ODPFB.ŝ;wQ7MK[X.L9oF|)]X%?X;,;-Y0 /{o$!imƣa94T~wH~8䪌IȠ+\ؕ]h`!ƘF!@w1xMC QUMl+ᤩ͜)Nf^ 0ىWf:Kqt?pE4_L}t)RZw*dP,is}?[L! CZT`>]73Cl"A^7y eet +c:"kmw"ہ༣H)#{`Xq#I: a&Cl6?@dL" P#g Ex""&[RYp %ݚx.ԛu9ѽ"ޤrMV7ja+KF9VVŽxB]좰 2waL۞;rNK2*X(1J4O 7єeӊDf4!5Mu1+n\HÑ󰜡X-,]Wnh'n]ڇPNԾ>dQF;Ő8j8 "|n9ʗ,'n#.W͝*<+{}JWVz7M61P G0[5.=9:9%w'GV{ IC8M(Á}Wn˦ sT/ږU_un\|ts"~0llv1E(9 tIhH$ʭh]~铫ܲV[6/l^v?f.M u,:̛`0#u a8@xRڵY>T=_ Ug=QqʷrTAx<Uvv@$˶ "ȨRWzF㺜LLmb*aAG5_IQZ'>Ey횻ک_,w;]c%H݉#4;W;E~"n1"3w\O4")AZ%(D0,v0wOs9ޢ}Sza,ɐP*UJuH)LS DwЍuN?{(1WU5 qKCy6[ȹGLP7l[aU$y;[tAҜ2),v^QCV1j5ekiM⮯פ5&(Gɖɱ*'wV(4QfDu @JrCΉ0/џ?~d]e*;l4݃*ߓ<\<*;Σ΃w y!|*Os/$a@z4"ŧlcZґE#-* 3n”kHFy(TkSz؄aD[U{ X6jy ]qL,lcj8ܸxAa6<*5[%5L4h j8 3)P1 4͸`oLPb QM3kLce+T+9iqIӲ&?2!4G\kF@T1 !>9!CC….YZ 2(uV{gu)lFBbT JAX) 'NovWWwSe~ ?A<:rd2~LfsWghRFgqQ ?oϛw:Ns@>/G?DDv_b3CܩPx_ xNi>Uwe}7B pb%˭hn^ύҲ7Oeum4SPȾ964霷w`1nK1^V)$ܡ hs. l!:Ktqw]6d?84ds][Vd{wL5 Aq`zsUaJO0[ˀ`lF:W``_<0#Cf}ب ^Q)DGȩLVB,vwS(}Ӣ o~4jX&*BF#D [7Ri)m0ŲY/}vG,Gccpf6<H/ƿo+-YSŋ>XrA*8߰򸥹SA!os, +sS+5qtĚ~eVe ̀Dp"D_ Yf$2!q j$?8, 3@F0 `fIRDۜb"eP3͋6+e !$6TjϹeՋ&YnciuLE@Q zL(-qcv+>`$M8 +dstEi!ҳlmnw!י<$9L'7l *O&d,9zIQLSY'N墊@EA{,]EtvĠkj.s&2@-sL@-mQqOYWPTRER=E3i$a#&q4^b7!u'҄^72bi$3${WpL7_ysW:hifnFgV_Oa+JcUE]&ӑgN^lNi2 9,ìw(r?Y+e?PKLO^1~zHlG!F48LD'b1o= G8ʅ*%1 줩shwhXz0HVQM=~) t8 N)JqI(Jea7-_ C&kg1}|F~eÐs7z"D>fC&QQ#:gVUksg7} Q]1F'b j{ϜjJФb*-ks3&ySTnP#HVT%\uS;$lP\E]"f?ԥug 4$^ݽ]rJ„8LaI*-rlI. (Ɗu͖"d<!_!f'sb d2EK@eNs/=yNdi։:DKϮ!Y0T9%rX U:Ѷ;ufG< Gs\Npux"q KM$?QZ%nD̢A71{a$eʑd(IQBRp`WŁǗQʯ$Ҍ:U723BK9CUѦ01K]"Ik' yzo v qPk[4HI}s=G|VϼK2uUs_ U`T 9hVe4?F?Ǫ?KZ~ѝ &tP/|ٞ5X#Fnp14>gΖ9;Ln[ Ҟy`>Byc >'%TBt&I&7`H;ef]ti 0oNxi󖵃CHH&ԥaeʚg ٕMˇ'rשd;_,ѩVZ: <⋖$R677i iu8J͝L2h̀+7nhTU`}s[w(ue4'lU>Sq~HʔкS{o7A I(*z!{;x"Fq'B[)9nh}5tQ[ ֌`}.~PcXK?rȦ}8; DmT`*15Z&]0Ǝ`u?$kijkiH{ Px'aygc<'fρr8)Qnz9'~1z6pV F=g܆AQgU}B*KKИ-gp׏x&>݆wyϹG8c}dCR<3kd;p`eҷWWЖ5E@uXhLNh~q]&l )/uc)"V(Ĝ8y|܆#g ,, N LΚ܂:»~.r[wQ?{t9j{o}3MC#Ed'OT'F}k.7PzELeYpC4Ef"g$pg40Ž=t(_Dih݅Y!eNZW7uNE{}I^q'x.UKM\+ˊ*fqyVteuU<ųNl^T& _ on [X0P3H(nZn~@gӢt p1: A%_۷GL[h4)0-EBޅ0 ! d/AO7's'`!PR΋ܰ%Lc0I'p(`6M/Z۲jC@s>= ICLs2|1@5sO$FqZ''#G+MnyEz"J3U,EB5;yB#GP˞^ʄpcC()S\ ɰoCPfdXGklČu"MvXpXm1!'Ye6Ut7(X?=zSY4@1f*e*3L2KhԞ6谄 ݶo`pԠV\ h$@B5Z;klZurYD}3 J!6eѳQ_S?Q~w ILfH?p`K_ȸ(N885^k I,k rn`^窋5_!H Ռw5ZKf8*(i ӈwխ~J * $Wyk5? rƺp2sGRi餽RN7TnH;t(hlg`d+ـT0DWv- v>ebEsdzE=`Bf.1IhNxHyڪpbhř.\m{l)20y)&xt9aH8Z0,rtɦ֎HGmO5v~DQ]є5g <tM*S&^ Q*ZVwzToAe2ּ(JQYU?S~CT2 zAh}$s1~ 5a!)BuyT,yt:KLy5!v?d2yt6 k\4p;UMu6XIp&~p =qhK2IPQ GeE9fr健6N,˱*8Qȓ`^ hvL1"?p4.jXghpˁ!餳-8giG &`B!̨ GHn-K- շ.OB-<)ŏ;ML+I[*͇_ݜբ9aP}p֌g[5#wgW'ր.;ɠ6:"ƕ"(AE5Mi/)wo{xn)Ea9#wɬl^&īNڔLeImob4:IQ3:Cvm̴Ixau4-JN@J fARYФ632 =.&̦;AkFXwO貗rH oӓ6Pb)G8U0>#= {@)}w{؍f*Ȃ1mc 9Œ{.Lyj1Q\Z@'0p^Fx@oo9BضŞ:6 :, \63>[ 2eJe/VI&5J=OHt2ss|-XnyM';+vt.Wo&!Nɬ͌+o3}TU虨 LV_ D?Nyt "}ES2mfAdVj].{Q$<#kzk"+AĪj[VMݪm?[hY߶ZѠ׾½GUP2X6y @; ?+E[ڷZة7j(lȿ9V{m /@}T:)#nj3^`uw嚇ai\_rDB=\e>'cgw[;S<߬`.mK0Xxk`rIFqHPy* C #n%.&HB`9e9`pOV>J2n8rÐ8jC mQ>rNANɦ?M a簠q'ovYjG@d7xsyNA~jt _l:"a?t v3/sY[`/:?gٸ ^FӢV!4&W0Q8)(ؿ~qFCQ1 M F G#Ĝo R}KLiAty$wLGG¤=E_p_U'zM_ oIGx̤_'=F#A`Hsb/gto~<.\kGo`$6Qzz^ '$y>$tTr.A, nzN%mFrQYQ= `)k迃aE=ųnJpüdWL"'G#PǤ*:*:޿(-Ai_sY*lüo Hn~hꨀJRQBe =?)u߯#?P; |JS]]ŨA,جx}?0+|C.c M)f RB)1,P]5 -umY:7IiL"ɘU Woqqʽ餜\]ż58Q40X2pt8*Y#L/]wu3f*|[䦲|UO_h(o;f(Q닏o񛱾;]CEWDTG`+%|NݐRK$/a/,+{e$*_ctptt1:]!x5Kԩ,遠gѾӂ"fz;sJL5~:-S8Y{kw9rrypY?{`_8"E}͸Y;?w/݊{g)GF#_1\ &@=V%vQDY(K.|]>8 Se}qD8M_Xʐ_ =^8I^F)"Lb/N6Wq~TGxO<JU_Y7}a Y#YIA}0֧ZJȟ[J%_Y{|aoV?Y7}ah蟙Oy GņJ%Q(gw#H](b*fb*fb*f?gXHGEakGw*'yM_ka l(g@@JY`:fkhG6#{A J%j]Q/js3񋦢bP*sי;pŔEn:=ɳn;&zNѿZ";X‰DzzQ~twW)Ca4Pe(^ցR*C$UW)_Y~2|\dSY7y|ÔMϣyy:> vo(@8|{g@wl~ x .qonfq>Fu82?[p  @ xb^3Pkzon ;:ګxM_4<s[p1 J, WNA=AeQH]SPXXXxibC-Ck4Ϻk<ů>N4vb2((K63 $LpF8ˤ+ *Z_?BVf3eSY7}||5sّ冖E&o΀Qm=wYа~95 1jP4*_>׹]^a*X*OSqD,,}0T C0T CG =Niu&Y?"|?ꎊyy+V1P1Oi_5vwRaIu /6L1<&/m#?١Xj|ɶ>P2?cN-ٲ.08 ` g^0컘cytM"yV =B?g=re=3kb?p#˶O cG[V8,_j|ǶԶz E7xƣjJr/{:cdYA{;Gm0C՛\ #Ѕfv[T,@X6`?X6,-+~0+YOC('v؝9&.P(VVl{.7V#Yߔ+@Rn P*8מZ2Kb{^xl\AAgҷfA1W2מ_̱E?9Y9瀉K;?BG" F 'Hѡ-mɨ_JorKv)(۳pZBپ,]8oˍ.ة*o0,XRo.E+LRy;,*05ˉE).y=LF^?/A_>FsxxX֕WzV#lBB#*W/]0ɏe*|Eż~@n^ύ.^PO(vX˼$0-:eYR*wS 2:*|Ǚ;K:v1ujE{EҿgaxLq++ @?SY~=Omk)lR}(o0@.u OKs(Qa zG !yGAH=ES:g&xt}J+e񴜲V鲋c$eS k벂4j^FRU+uYϬ.ua ʪj+^( ?EK5mTO[ c-a@pȢqL g{,2l4<^IUTIUTIUI-F_?{;)OY7}a&p/$qAiMSY<+N`uN`m]؅]؅]؅bz;fQP:Y7}ldϣeX>槟_8S+eڿM֗~JZP8"݃C-nkY7}-m9(UQx9esA;P{dB=SLTΘ_S#/OT'y}? qx?/H9' DG7^Q N%F@ ynO=%8#)݊g B?c 1ťߖsD# ,, ,Cx%l5|cDC+~z֨zs_r{ۯ=ɳn* x4F!eE>C#.aib.*E%_g֞;aL@L*C~ܚKw*ߓ<lkJ2G\!H[d7zDO85T/'94*]ɡ"=tr=cjwY.!C QbߌJ.Peqf<I^-[ Y9=:8m24P>iýVE=ŃZ=>#{B߿rQ p% ۞s 93ܰFMÂH?m`\acc.'>+CZ3pccm$dc:@l>,93l'%Ʃͧ.YV8g}vwZ7~PwQ x Φ< R!'DVzaƎ i -n G3M@QtILᆺl3/f|*n, o;p3)x э`?f ZT,mNn N 8"*pc ؂Nϲ`8$e8jOECwW`( !ׇ1AfE9{pΑ"fwmĞ`G>CNPݪWn 'a7ރ@}iO6kEL3憩F@â8CNȸ߫roO#az__<'+Fσ mBMIr(!enwÏ˺ <6|zN ٳR76~斗7PHdb*;rNBĀ4<$ 9R /X3>Ɲ!vQx NK'& {z41XuC}-l,1ۀQn1]h@  IК1CEXڰ8e]y> 4S© o7fq1h˪CSY[瞡¼bv>\ۛl%XT HCgaVӤ0L} _={$Hv[#Ȧ H;oWd::?%c+:2W!uH8`x8 ݱ2W uי JK]'t}XvDX~/wȲwP6uL xk> ,~`Fu_٧)MudG<ݔ5XQZ+orG(2s..&-&G&)kJJBYĊc$'9(YOv Ȭ(63Hz2I"Z3,Hr Ne]] 3=lw_fYE]SJіf\WY@VifJk4 ̲,xO)̘fjDR,L'"RR,|@^G`uN6 bX;炁XR< ybI/ݎT{5˫ӶutRD"vqcѓ5M./$bq3;mU94hJcx4)bqQ$`82ւ!18(Kg N58* *z=.7S?4j77`V]O!g"9e1s[0T*OIXO^eϺqaol\Wys{ /;ly#SKoyo_kY7}%d,N%:Ha!#Y%CDj<7,b/-0Qbiee6edX #~ ʬ 8wh^p0;-,fB:˝z@"L<^<\Wryo¨>-K89>6/KCreD$KsXJGzd:0eqZ*BE{8*l{*UY"ˣx.De;d&w})YZCTN)' I,#Ȭ}6! lF)@VQ@R,悅+zx6)8@I}RQCL;/:~*5|p6ܫ>ɳnfrE8aqم _y<مblo]!i3طq-@FF[RB,íc,()f2dJ& ^0kP~;@- @ն`ݿp 9nY?B_j|GږU]}؈W?n6Z*0/YĐr>*|$S|LKFz+b ƒJZ1EJJѠ*ȅvXp! [)yxY&nU2[T]4KIQ-Z)ѥ?Z|ALwBe:v\APoXaՇX`🔀8,` Q.pVWk}c==vۣMKo_M>/ׯ0Aimq{AM@;o=͉oy~dYci18DCQ㟞u[>֔N/Oi-mPl0C囫n !bˢ0V޳?;w*oI4Y{ӌ=T[?`^d3d/^7 4Ĩ-lN2iAU$B͸*6 ,{W'ťzҡ-`: X[z +QK;O'^M_,D͊{GP`m , -Q煔%&KdB>]" ςXo1o$ ],N nTcm77>x &V=Z߲?n76|wqwm.-Ac\+W 760R^|Ht1c8txzxq ?D0>lsۡFҲٸN)iaQ&lIS.D:l`p[$~ =mݽݽÔ*YȚk{9w79S/[{Kj5vWX`|":Fx'tEL@_m=JhXC=*1`6T墖m|þ3i{{aS~p=fi.ư:sfbOg}fپ z\X[V{ C!,R&O&@.@yԱ:֏GΖ۫w G77Gg:<9=XGedr\N1p*vciΠauxGYVaBX{fhzϝËx7%7;T+k͸E lvm?W& \(!QuM_Ad,}Ӥ3L.? mEme-R,[!i^K]i؃A}xUO13DU7(Fs;{}txFx=0`!B}*F\g܆`x:k XoNNORwoO;ݓۣ ;}n6 ,V5w_8.OoJ#ᰗKrh=triٞFo:bC^ ?Ii@rC"AlIY4Eu@mw{rvSW[1&> wc{kW9vCƱCuqd?P"a屿fC< XN!Ý4^3 ӤPqqOn% ވ&,kIW,^_k9xO}s& ")Z15b'VF92p-ÒOUܶ֬?) FGJGp(Ւ+l 8p*n_R[^5_ uYDZQi@B0t|]9rJ)ﰁdFr(ӛW(,2J7P)Z8Ci"yź}׍aOHF{wM'd_ZإW3Ь<'|x/A[^H."B; WH%Z2,&[ikm-JDZ)!"D"˯lh8N8x&C?t(^ҤrbU+pmMv”«d +47pp-k ٭bP\/Km&ᅳ<ܣpo?*=ƺ=}5S'u:C1`x:ZUDB0!?|<hK5{Wns|߀404ICb@;M$g6Y#kW?ˊ-ٖ#Y9ѱ=Q,Ŷ>89$'Q9։Ol9v^_@pvW&|Vխ[nݺњL_xŗB|۸/b)2EHtIv4:`ғ&䪽b2ZP2 }x]㶝/kXDPת# Ze,[(cS.H8 jy9+m8MZY BD^ ⦗|ErN5qd2֋w cڠZ2p]&.arx۸ '$0ghD Ӣ .$A95bj 6+Ֆ@@"1@ vm&r̒  ♇IUr]jy^yW.>]0{n5O[CT_Z-oс9 _Ƣ]>bFw㋺IODlO;cchE)Γ+ɕ>^g'%/.O:x6_-]\!> dqCXO5/Z'j]@hDqZ8b-)%4SbdQ"([˰BD_!>vntb2y9!M"gNXU NNm0&(8A斦a%V`Wr!peg"ۦYb۸S' J\l"dUn]o> Z1ItB`[Ƶsд=]dUadG[_Bq&$5ioP<NSVLO?Tꕊ]'5<$C% ĝe%m^v,_Eo}Aѵ4ibp&BؠF7f4%/T?!Arg4~{疃 $$I UQFV#(~,e?-Vq'TW 9#;2ri|BN2IMʚS_p4"\4'seesQ|B(X"ݩ\vr OΎL-.NNоm慁Ufˆ9< j.aI{"o?2\L>Wm>}KMj7R*zɹ[Tvrfjvb2;j.b2ڦPDCQI5 [,؞FUJ_C.M΋%G]|CMP.u踉(zT}ى F6ΤREeš 룳.VrQPa6_ 7+:> {6G``,p;mTP qp!!ǃh3D80@DD8/R ٩yre` Y]VALy7/<~S)Xp=@{2KZ=;63%ldOO6SyW>l[a3/xnZjvmM\;pSO[Ԅ_O'&Og%OE*ōT4hMp?bH}llFت\?]Jz@,qʰ)Σ+Aƥ-BRȣ|nH&!ǮQ3K_͛H^ %sKK'۸/D_jy˱cQ ]Ҙ鮑[ٲp*6ǀ[[Us^*SmFĥ y)BmPkEԀ&ᩨ.n E4ԠL=p7ixM X1в5MO<S韙ґSQ@G S{<#Ӥ:9QCoB}xzQ{+ۗ`Z[U7;Wߵ#7:ߧ;R7wxgߛřyENIY$`+t_ ͅR{'ZUqQ\d,O\^įFR.ޮ 5+ DL&cC$^&z 3`DA@ :yXJnE}be<+|.< qO Z ba)sǮSꬪEUxs G"7R ]$Y*5џhQvJ# H*Πq٠&LXSl>-؄S@ %%PP NNO'sGQ>㢂qXB闽aÁ(z0`(-ӭNU>U"pNZ/ ]Z]$0,5)M]~ݎ>$ qՖ ] AHS`b dcC:bc ڍU.L?m@g $Y z!K)0Ԧ D=/PGV*t} zZ|Txo7m~ilf&`n[9PTEМ4ObޭJ,s+[e&hUfE :7>S"ye`(Oe?b",P|)|5! No$FbUy@߾i^ Ck 3n<\71ܩd>J!!| lD6Qn>z5:|e)$)(]tF77]4bi߰!hhUZkr?BG`<-CFf:\Gd xbgh 8Пyk4[dDtWa7=Z@o`0;Fm_CY땁+1bv]Ƞ/52#X^;lLHh4GCKPvvUp}\ҽ0rd-M%w^AIe^*d=I9M|[ .j:V@| @SI@we? zG Er@́ChD, JI/<$=N+t-kŽj/sb4+"~H7jxC߭f&Gͅ2ƨ\U]*:>']( )g*ChVDv&qh{>Q߀!E(,Wo@rU[͕`/g9ڀkWh*i'p*D~hM$p %\ګn@*WIBE^.y?YcbX68v[?5ޤo4,ea!ckg| 4EcTMB|`D~5n[7S/`U% XK*' (挗f*y2챟h# ƿnT$ 61=-P ;e1bhfl~~j6QF]R CEzb:tpCDwr+# }BnX7̤ɢ#ˋHBUÞ./廦V)JIrlP]C&c# Jka}! e"q -<@A)q[xlnat]- ԦC$Cߋ{llL~~pޜWn;s^ G.2vM9+N6([65d- BA\\w%Ӿs !n%Wݝ`S A4 /`MD)6,L-TB̯ ̮ fe-z+S'"存 ,:2jI}rE,tč3jٯJWs]K0K;vQĐ:Z`Zk 56x6hI]olM*E>BM- }DIS{$D$W=R@?NQ>E+Kj?ww\$K :SRM-xvz]Q %ޭ9T8gR[`J]i.55i/=;d%eu2@ oԇJ[VW*WzAǥ>Ez>_Gw'+Y)G;t`t,:_↾gw`O Mu6Q` K֗.n\.޹8sq!@]թWrg~ooU}Tgv re[BI CQ70,k`;˱)q6-.%mVgϩ؋͇JKf?eFEd cCRx<0R1ln d]wOv]C{mӮ3%qLJWU#@-AMDq|cEƆQ!*i?RaMC!őFK,K[o6t>? Ϗ6<Ց{7^j\v|:OЄ`ϕ\}wO:$qBX[)7*]wk2E,,iɞ/]qe_UVFC xM)Il9\ҵv&_A;ȝ v]v]Vub:[.ڳp=u;"K G;NMeLy] @hΨl]t[)M#diˮn d9n]?64j[A2&5SKlϺkZlWZnP-3)h<}46;,_Õ2Jn7Dp*핇^nnķV/RsStę55='">{ʘGn86[gҁ#Hxo.TTvr"717d—zAy\xdWsJ& {-o9׮ኁ`Ee Շzelq Ux6H-\A;X\[UJV0 j{-(abQl ^TC&(+`ABKtj#@|^#by9_l%*k|CQR^VY,zQ 7jc?Bv &;*r\qW0^Xk'Jd^Zk7560dnql >}TԄM!zm!$Z*&3LO.@^5u?vh:Vfډ !9 F6(]˯1=}mlؽoVj=;3l:w6,}Њc-33uV0 *C{Je0HS+F,- bǣgIyP53vgӜSuQʻ^4E2 b1PIXvlfrhb,mrO]8Q5G ~)(Nҭ]ɗQXܱ2VT:*6)SG8ֈx{O24A96-ڥS8LĠWht) 74!"G.oo-v%EF|+2xt86*"CJo "tk0b>r'GfhO[1ag.F c_1KS (=;+E}\Qdm\/Y̭ 3K;i ogMnxX9iwnmRw1_<ϖ-{d;E%ze ncjO!駳Oߺ R*~bÄZNaw_{ `7Q <d񨡳"ڋ[hx8sEA6kaY鈖h/Xt!$T bV`R[YT͏LN ?i08PAz02OܴZ$yzpM4ܭ:FD{~%>0@ǐ]i(yB L 2Md O "PhjBe)-HWPv>e` *r,V}AQ B ^s kZXklԽ驉Iyn2w=5!Q|^bH^r5<ieЂ&sCTۦUe{n566%v}"߰]7'ta*xhY҃8^fbr~zK3xdnK 8ɌrP39dڷɅh(Q(+-TuKZOϵHsRK)<1;;;6'Ƿyǵ>5Ĺ8?rn#:%My WpR>  x]{wkUQpbuř1p`Bk5\پDrAn%s'ƭ}{o?TiER 0`AO)-Ϥ?uN?rra${GO D0'jH\~tUIL VFw+W`$B^7TYɛ7<%l.9%2i"oeڴl Zݸ= $팤pZ+8.BiN.4RɞW._ܔ9NB:9eX7"(-L16اAKm*e2nhꎜfguu ˡ1N`ʄ G K|MV6|EbD.adg'uȁ. \a?@ AkWIٌB[AEQ4kG$)FO,r)808҉ZGWKt}g|*@ ֚V;T=nUL@}~D g4C4 ͒6~ w "yXD1ZB'H ߜVoZômϯBTVO҈ENP{;}C7|1 Mem`9l+Szrabꁁ05*l8nH~G0 pQL[GV$Il| YϥldI'޻XDMtDJDpeO! ,-{L­؏dLNB-Qb;!JT}mH,n.+}pV&8]6.z9'ȵa"j$bqMpP~#![vc7sc+Dȕzº֌[Y&חi-pG1/Ӳ6~-ZPɮ̖q,sYJb'm 2m@S|'Xz\uЮGSD'~9oṗKC!B˲jy.R)\%|M_W|ytUlj+ȠiUu@1/޴xӪlGM-$ Ylb썛~V1W? @hR-/ :*bM v)nlq Surp0PWPeA@!Q-_4@*-cٮ8YX1~-k#{>$dϏAK˜(F8\ig3d%SCJٰƭ?5"H6BXڢjx4D>ZjjPzG_,H&*52< ۸21+7:F|N%- T[+Ez[P,/AJ +-;%+gRK$&94y?"_#^Wt"B!yxcD I)Nw`׮8M@-ZW.=nF5P` xWٕٚYY@IA|͋6i  ӸIPx XWIp=b07Jl8% >$gҵhʡ8A!{'>set"-R#>NP3P'SU5AQw%Ο![#Za}L!@t o,xs *wS>x]~LzIC !xálgSyxCRLQn(:C7YRwdr)T(l<>T ++NeBjM'n^W*wHT*6LoDO:o}l)}0dohoq>6NKWUۨuY( 9(uaH33) (m(vb i( E,T3'rŋ#.H*6%;nVGYm% ]|9րAOȻdDqX}ьnA}P 硊}S8ΆQkX-+c"LJ;}hcs/9 IjVd:1ΎFy֮Pc@\ȼD T1,I⣤"&hr1!('H V`tL'Ÿ#c暂z"ޫ:t%|WRH| O;-lKQ􉟨Ilcf0~ڙ>AӀ;ׅJtKYӔx Z/DoUɊDB]}y:V޸$D| u1Y"Y x37NƏ/f:KS(ꁳ qTQxTb.ZWGH\$fY]]R0?$3Œ¤`H|j!?*d22FHpI3))@#>nVAa#k^p Z , f/gTbȮca.P*16Bo7@i lvp4 hl* Qڰ)PNuJMG\|3 L&_#C2-FAZ_.ko 7m41Gxrx_ldA?Oh]#E T%4 A$[318;RBK!k@ ` 0KPrƫ]r=ۦ|Gw0r^* ֠\q\S\>(D:v" ?5`0} v3D[1|wPu`Y<" 6WP42ڏ8I\u9LSdTf%K o|-Z= ?ndzӄD*t^;h`\Gly{nJ甋6ݶCU !p'mhלo_9wP.ȿ+4-w𼿮+ Nr,Z%d-wWgݾcP[ iq_\PRhD u#ku[J-x"eAO2P4L޺֘潩yZ0ڦĂ&E'i2iq֡if1#v$xHy*/8'Gxgi+ZJQy/X6EIPnNcuiR箬seTyu^:L*"3ji^tqc_Qv"!Rm9:㍊ⴐ@2Zqx5]XsnܞMZ썛f;`<5\ň-1T Q3^:-2S.{I0ThPu=sMxY4Y^xE Y~4C{&U@6¦o a>+ͪmx@+cNDAlZ-a{金i߾܉H4+l Szj(5ŃiG3;hHsf`Bol%nG`^6U*Eگg)wjs}KMhA%)It ?j SpZcO!0R[j3jp-2E6a"/FVk\0;/♐?׼2fő%aJ*{.mkM`J[_tKn4 I} q42ZySz}.voth{>X7󐁶z%ѳ;S:20'ւ`gމI7d ה(EcƵkR*+MVi {zj?N_t?Nv_͜TP(| CA'*-LQfH;l 2FE-nq4U:g_Mw|/8 j$&6 D|=|&v}KWxȩ/lzfmz @Jih2|6 z[*3 ٥|>LY|3cl䅩!9>Ia^bd8$y@jQʒ0N.^F)8UWUu=JHr(bx@$1@mpAqFA-D{rVv}:*\l] ] NhUbc^7{2C+P)W9.,o*s l[_Hx 0*=" LCb Ol|.Q!^IpjM,:rL0e}4=j6'~d_s\#A@ CxF UٗCIYT5Þc+nrvb~njv173655=ߘɉ[fMځD\vl`7# D+W9VAW52v!ADug*5"5U,b+xh2ńp~5IHq9 LM "+ I ќKCy &??PccD}jsfyZK7Vф(_~/\r;G[!V"Zʋ68:xl F7G^QTZIt=KA程cC]j8%TX5[L8~-pΘCk\77|&Cɕ2 FhU\C 2:P(gUUZvTd& - X|)7OBn2ֆi(,9`آ1 Gѩ󦲀h%mvD:irJf^72?oΚ2 蠞5<)W\wo?}$iDųO rIDt&H/N|ќ9#@XMmdձlyD,%ohFt {bMmocٱlnbFgM~4>}w4ўODӞAխ.D ئ)NOݞNNUy*_ex p9nP8QukCȢώѳAWm(c{_9YӔϥ&_GMkuoo aFԉAAy%陸`Ѵ_j:q {_`JI}fke 8eQGǚ)xNv J{V ԭ`ԋoTwf'v0e<j|ZhXau8wUOjTv q>ׯ]#ƕ._|굫呫׮giSG40P|ҿM?]XW~_p0,0@=' 4$sK>w!!c<1줱kOAʰwCuG bAau ՝0Ca)>j؇9 y~X?CgO QI/R9z~ɨ%S23>2%ӰOjoeLÇW2ȅ^@3[H"j!7%?Pe<\OO Lэ/G)Y#HJKMYH+@_ pc\qR^BWx[͏-!rCu͢)eqGO@t!HB^ñUVݣT%4Q%9;7=[s߂U:[~Yq:dݟ5TׅN.ޟ~ >,p ?)'3Sժuz6Y/qxK ߌR kZ_|2l9UrSYğ|‚w_ҵH2K Eئ7??AZƭQ&UdWY"88ȗk{S ?"85,E&픅XT7"TV+WjaT^+A:}DO7o N>,`%z PSrg!WFIgoye.5iK,88ίB%Cf_5> J$F19g'g$7|' p\?H#4^FBg??y:`clalv<aig"  3(!!ȿ R9Uz";g<9g{dJnbe8;RˀWP/8EmHw=b|Uk* Ujk&D HQï^y])] wÞB5llkƓSƣ^+ݰxw].^*/Z_=ͻ8#w~,ebDZ!G?B xYq9ߗ牥3i\aW<'3RlҫԤ9#_SP%C$N$ oX~琎%TplP4p辇ש%{ r6!ݠ>ᮿ꺿C 5Ͽ7?q~flΓvWpqMw#Mư 풬A 6,ZeCVTIǠq'sz]ܥTƋ%pyb.+]=2CTBJ$?̡\ #~kqoxߵP +Gtf/uKc?6GvrW(u_QIFPG`4HHF1>P1Q+%8pE^ `RvDJdE`pڲd4#dQK5gEcJx7Ѫ]k"Nq4.Ғ%io&t sE@R Ǵ8.zF*"/L, )@a\ #U,9x[ onK"|l8b\JK*$> RaRP1W }"Z؏zRrS;7ڝ{P EoN$g]Aoz+1؋j3sOw ؋HE;9=H$}M7ǺS]Xk$9  /2{>0TdRPdJ'WnX⺌Wrl܍ TS$Iq+%*GVrRap,.~q % {q&w~ S YEZ~MHN__@φ3tD᭜ $珰dߡa}UU*| d*9Jz(}U);h 8kMTtDT]&q/~o,*8}]R:wdd10[ǯ/Ij>2wTIM~,;._j o;;$_;jWK}?ɗ/pD)[n?zy0N#n"say:׃_c)|nܿa+_+K,Ut$tiRN=|-qdITVߛ}-Jn\ҺTE7=߹z5 ]6r#}?/?w^A|>l<8,}9 ED@q%!@: a1J8b&J3t,^%9@Or4 (z @l(iGH^}d~ /'~c1ă7 XO&qu Y~OS&’om~XB2*yqC ;eb e`H3o=:lTGigWA isèvF ׯ1:vQuxf:rש(tQ4\hӵYe#^w6X; szٟ 7hA¾`دS0܀q='nwYxWPS\`F!%PY&CB;4v)%MG=-tAsOlO'X}Kr[xKg "P;%e.# j4\vѤ]wM2w (i[<ߣ7d7 8Q#&G /*G 189G%]ؕP>,UPSUg-oPhBmo[bZRF` d8¶jB6]ϱ#Q9'hQ٣)$L0lS.h{N&u9 :nl3^c`_7 ͟?yZĢu=J_`,ҍ`~c=&|K>3V7mK;alOxHVZ1T4&+] C7{& ed [' XS 9J/|"TA-ޕ:8:Ş=3%Zp&\lK3f$ԲI[[∻@笺PAIPTx!XL}3{qUZ7yLpg# O[6q%ߒ![P *HELժ-IҬ_JD'v/'_NbKJԜQo\t|nEA4d'aJTrDPGq֐G<ō;g..H@3A(enx)8O `Ni/7^*-t+$e*}@X¯BL7X?K7ֺ"xv#hH>lU/ZZ0h&z J -җ0b p"ql*旷,W.DPf^:BK!aB- Y&\͎_@O^?WyUiR=RO^Ú"L}1=EŦcNWp>$8Yne[PwT?>Ry~?TO%eU<9L0lXH ˆw b\Vxy $@Z7GU4 **VŰ6tP`=lڦ;ҾqW7Z_ezG0=s;fw̲àa}B0Ѿ`~ zD==ƾN {a}=\1+x0b#h*=\C+D`l3;&_x9cUcb%K7 ]R9׳o&zR`<3oX?`ct!xX7Br7 }(a'aaߢ?z|4-0oQ ?10f?GP[`t/,>L c"h_4{QEܳygp߼N} =~c#ZnaLAs8L xuQ hNc4ڻ~bɡj@nhP3=cZRIo }ǰ~=L;cVL?F1ܻ\P]F'zYsH^$7fqV{xChRr=˪п"ճHJ!{q=ZG {s!> %cE =03/͗I x1!ic='zOzy7^f87i_FAOw8齃I]tnnׇ} zi2;t@h8Ic} zSCr|! ֵUAYޠJd]ZΖ,f&ZÕrvGVIz%.S"Q-fWm-CG;GtHזS v5ʲ]Vip9+o&i CGkxT#Šq9kҾ)0obTTJiJZk+QŸՉRK2~q˧}AHq ahP^TNt,/qby9_ Aű*WW |{E|*j @XvXPRC3 8h#-l %8U]%M/;Et+k%K0J+q6D.0)+K G\.U'^sfV7T^ɥׇ3MT S%֊נ{t8+|fȌũ ݠ03l/قeuY;85~U.v^wVQCZX1{*r+U,VM+j? |ƽ@>ZxB%kJ^S0ΉԧgG>wcc{ e[5_jjģp `qqͱp xBכќtRwqlٲJLc&MW"? %w4šNQp #r)4YCWʰ~Nj5Q@ 6?_2&cAEYjܨ@3 HM,}pz|. j sּْER2E.N[v,W˪/VxTIGR؎'Nr(]22i2kψ"OU'xϓ M*ˋX1*0t:PzdP%"w’O+svfd9D%[zAdEKlv$E,͉ۀ:+s"gSʈ.Q ߄xC&mFnX%覰W~~B}==R3s Cg,l~ـlCCښ yX\%y2H}bf2@(1Ntz=T GT:T>DHH@xwkHL_7n aP̢:,;SY 9WE$wB}Hů}/$Vp+jG#q)1SsHPŃ ,I OcM=S,HK8&JK*?]DB)-[ro%e\8R?  Ј;hhO?c?'eV{bq4o:y3z2]TGCʤ'Wb\V..%%v"[8ĭ9qP x |f"vB+ L q'၉ov#g>f''偓*IU$W*EŶM0JFtq-߇ 6}da0fc"iO4Փg`r9/.V( K3v2c5]gVMKA5ͼM`eWmy Y"~슽q,e~exSAmM 3-TIzif~G`n-͎Fs"U&OND`C3 0#͹i>OEr 8ۓO} ̹:Ѯ`21k9KXK2M$PLd2h=NRUo!Lۜd]ɿ0tW+6v%"KO0W&mk/\[C'da<Њ٠0e-#afT&񣖘vviU?܈i)FL{f =Rٛwp_97X_|[s6/В =Eg-C|FC uДP6X5aXud )RH LcfhL.7ǗT7+{KmҤf2ۧ]>Դe2O`6*G$TT $Q,M"f@3\ԎwH ljbY ԕ8%G -D^L/Bu"fduiiT~\#:cȇD \sz]cE /;.ٽ>N'}MIHTFR4QS%%ŰiG >ާQJp6+^3f!AO^* -/2jyho&ͨ\QݜM6 38ii~|mF<}q6 nww&\)X9_ǯ!*Քc+}PThe#хd{9Hxۀ!uB}v a;aPW$Oex+^NiǒrKm3ą{t!:?1 0e|yGIM*&l! *UعCؤ~ {\c|AMSxyzJJd)/%6 XUIa"H-1S?ٻZ,G>0OqD0(SXl2-|qKߛAƿ7)aZZZF`6=ʠs.#u0,(IKYC cKT b~cʿ 5eISD (OiVԝ,7'̮Y&T0ZPw]B^mz2?oWg QQx@]Cc %M1|oa5Bǀo-;%+1zdyې,Z,KדHCq_'ws46%ɪ`TL}cp`3 ,<9@Kf$\+N9R:)!%O$NwM oB E/ly`{υǰi1u_W_^ijCI(֍3`]+4=|!c!Cж_'׻NHxiu5]OIu^lYm RZZ\Q@.{DX; oz -sO%<,@Z~;_|gpF!j5JyI&`uy/j[ކ6᏾Mr<9+t܀ z6 :LrN\NA;Yϲ_iB>Gݕ\_C[O$iAuËAlKLH5`?38T%|n*ی4(MQ4Huh9:D@GGE$S~"![<ThTݓMLTzPzU A]*+2C c7I|6Ic ЕItwH]] 3rHvQe%%tލ~gn!JKSJFS"'fOGE LVI$ 5U{c i'a GQkaNaAZ.U*ʼ7֑pkaܷ(+fHz&iDI9Zz486=GP?U2AH5ZnP I +$1lď-"d#c] oq *;Jَ6I ޟV;OJQ[p("mw9Ck7yZGkKSŗsLB.鄾lz,JүPimxMx J|CxK.uotar뮝kobi;F4OԿeG,2$F;qsDT6dZ/3fq\ +-g⥿Nqfܯ%\m emD]ƜZA WױlYxEuY+փqB7|JbM[kedSiA](x'5Kl":ʮ^%DwU1Cl>28{`ы0 bFp\L#>'_HXr\CJ=-@Fwj# %۶\qیYf(@JԢU/||$}&p8LBNZNC(F7t0ޝB&ũ |UfǷcl.<60dnql/^N= Y{3󓋹[S=(oF5[1Ėe}: |[Jx36)g+EmuvGޞ H{1A @S)(yYW]l+K<'KԨsP&K<%6 h )F8ʙYΔLZE6k[xL s+_j*gfbZk5Q|\JZoP3͒c=Ҧ\{=iug t I vo(GF=(+]^cl_d3Z }~DGTd΃blx+!OvC`+R&`B*L%ڙiFFxM֓I4)//_Hi 1OX8魢 XaCL3@$=D1^zGES"PP8zCO|*bW\]z{8~*)cO$P;)N=JvnX;ZQ8.}, Ic%k.=JiܢX<6~CXҊ#AO֋Cz),a|IBsjpX MO 2F8&kQ􄎎d"dL_=@au;Z0eZZ'1˶~,D^㥒R.NL-l[ȃܙK 5+iQ^RZ4LN7/șP_SPɵ[a&k{)toQ%A o^ H 12MH)zUw q 3<gWB~%C/kvBZ[m04(42DPG.tԪm ;<}_~xJ8ᴹ-wb=sSWcKgcSyooެ=gpEQhUQ%#J,KE ֽJaOcHN\uUk]1Y;_\t6-ÝCT#  '(AIIi]`[#2tJ_tl!'Oմ+]U:.ŵ^BS2BxזgojEO5KjQl73@G"-7{uǮEQ>j~%L1+mt>:ڹ76=518)OUA&&Ҟ!&\DIB) Bm2~~KH ~>JC[2{їy ]vMl܆J2%b.;0w7;>[ D4ޤp%ZF8bO@.{9! tJ޾{ќc'RN-BEܰe ږw%^N!}TWW>խOtixG$17KIH%ψtw4qʜNMݮxsԊT5*~.AHzޝBo:\E;.^6KH?ͽ%[ݤkR6FLanv56y%PbsF쓠^ώjlPfrCGBqyРKڎs/=9n :8=KS '$a8|V(nW]wcy9⃴s[4Ә}&G2VAwqƠߖ!N v)->q@`Aَ3KX+pdEժ7M=\GZKHuRRVKUTy]҂$). +CdލKsY%) :<5qqs6`M旝Sۥԗ~Fzgﴀx:-9i+pwvvlmn/RJ'+mIG.uS{l4;] 7y?x.ۅM`Veiw6 Y rft_.cgG. e8';VFKFV (.Y |DSH8 R,`ia}'"+ 5r 4E G[Vl^HA'_5J2 cW3W<~.w)_K {JʘUi cvE(G_Z y8MEObX.q+zB0N7C`Bma3+o_t$#X\nDv9c`EZ,/|J&M"-\Eh|uUJnw8Z IUϤ#S[HmͮPEBKW1ʢ▿AddK&őd'VyyYbTC;NDPa OR,?!Of |$F,N؆i'Dk&Ǐn,$b.VԞcs:% rb~m-mIOLc*\f0cy|WP bp:X-=ԯHҽ$ $zYHc'2mҖz:TJaVҐXvvj(g&@knY#D6ї Qgu ['>wmVY'6⡩|iC. `8\IVDx}:ܡ/yUN8kغFˀ)zϊҮ8wusΗy1! kr:ZUJn0 ld9v1V2 / o}aD6}5ajYB)* !`XM5۳s a4>g(KHK3i"}wpaHC;X~}f2}_߉_!UDvEv${>tEcЭ,á)n݄fR)$Ip1 mpG?,nY㛖 K4AņR \-wn =hNFsHJ0)N pA92a)W/n 6!+5rRR(rx(>E152#|lz~'qsOT/qGWHhs4٣q^W9zN%S]K;>%Ӓǒ1E9.C$וuAM^]'}O_Ѿn>w|k}{~\_xߕ~%xsXBï'(X mm%I9AxI1؋PaOѕ2/$uP|>'q.1w j] XF&;}Isv ׮ OW^|m׮;ye+4o iwM|ҿM?MQٯ-!m}poNk!ȕׯ;ϢOg|8\*[qZ~v;?KNtk)v*R/}e] aO7ͩD+ GFW.Tśy᭳b>|h^ ͛7>G>چ\W+|M(J5ëyZaBY6++Y6\6 73/Y%tdj4MV6iO[Ŷl+/2vՒy~Rb T1PHdхї+mb^GS+Ty svp66maW<5o3[ ۶I,4 vdߕ@A"čy݂"_E /Y+#%' [!MƤPATbt.aSR,oUhX!QXޭo\z|ѻ0]^c^ S2(A=U[6nx_D XU]!K9zCG'qLTzUjxrIzQ 20팯G0JR*E4_(cL`sV%w`YszV~D*I*mKr"õ%b<ɿΛ]8;.:_.#e ef= a}t;X`P"fXwC]ѫ&W5 "Lru]" dTu,r=T6kyWA%Ni=o`=f75||Mh6chp\~`Q%vz9[7bo:@ q.vuF~AֆS-@ bxlZ!SDaqr+HP8(/aw!@^Q>5 MD33}P5O0ydl $$X!Rɜ V7g+2̪ҷ1Maeqֿ B&8Y*L+a9-]i `&iDCH *4 G[ۮCgk.N/1ƋjV m@0%F_ $ϒ" aS*M{JHh ^e 3/@#UWRtlknrٌiحԋT` E/ /rMԖ0qZ{k*ClۇSj}%r}c^+AZՊoyR!jQ j{⁄"h/BjBB.  #6*0zLi9A뢂X756!8l9'beo/Fl6ZVQAֆ>jV5d-W,oXH$هpƔ #FUI"BmqSIh>fU6%:VTJ{,h7ݤgtzjm{QAr{AAI=_$~@ꂬ:DiČ5(*lZJmZU=FL_m҄6"/K.GץV 6D&h7jyG3frMZ &F.رbu̶RuD˚QOkc `5';y e\u`*>Q$IDgOy A$Rn&\es$R HĘdyoE[#@ڷ zDD/p1YO s{Lg `navupjC(PLoV>7V nM60t;j]yݝo6__˖N䠕^- ][\hePj/쌇]r_ f`O߰\z{#*%O7Ӻ?郊i+//;Rf4hE,ʛV 6ݨU݈]J\dm8zE)a}V)"١M7Uv%_tTZQ@b Bd|[*VKQ0[fm ]Fw-).mRE4U1ٰK{ a-GhqZU!`10 ZfpJVP+G I-|Mh޴T U}S 7`YgQՆ_l՛tśy?_]UnYHtzAsnV7j-߆3P : Tp\icek-rqCB +36T2Ŗ-BduU-@bY(RI$+L|?^WlGH`J 2Kb[vؾڱ FHvnC(N["S%Gv[y.ֆ^tr3 WDfJ~ b현"rzյZ[#jcA^Kn]#ϻm ؃f KȭSV䗟cmqv Pə6Ż((M-/L=%-f'g枦) )#) /E{jne२] w'ڝ]" echo " Just run PEP8 and HACKING compliance check on files changed since HEAD~1 (or )" echo " -P, --no-pep8 Don't run static code checks" echo " -c, --coverage Generate coverage report" echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger." echo " -h, --help Print this usage message" echo " --virtual-env-path Location of the virtualenv directory" echo " Default: \$(pwd)" echo " --virtual-env-name Name of the virtualenv directory" echo " Default: .venv" echo " --tools-path Location of the tools directory" echo " Default: \$(pwd)" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." exit } function process_options { i=1 while [ $i -le $# ]; do case "${!i}" in -h|--help) usage;; -V|--virtual-env) always_venv=1; never_venv=0;; -N|--no-virtual-env) always_venv=0; never_venv=1;; -s|--no-site-packages) no_site_packages=1;; -r|--recreate-db) recreate_db=1;; -n|--no-recreate-db) recreate_db=0;; -f|--force) force=1;; -u|--update) update=1;; -p|--pep8) just_pep8=1;; -8|--pep8-only-changed) just_pep8_changed=1;; -P|--no-pep8) no_pep8=1;; -c|--coverage) coverage=1;; -d|--debug) debug=1;; --virtual-env-path) (( i++ )) venv_path=${!i} ;; --virtual-env-name) (( i++ )) venv_dir=${!i} ;; --tools-path) (( i++ )) tools_path=${!i} ;; -*) testopts="$testopts ${!i}";; *) testargs="$testargs ${!i}" esac (( i++ )) done } tool_path=${tools_path:-$(pwd)} venv_path=${venv_path:-$(pwd)} venv_dir=${venv_name:-.venv} with_venv=tools/with_venv.sh always_venv=0 never_venv=0 force=0 no_site_packages=0 installvenvopts= testargs= testopts= wrapper="" just_pep8=0 just_pep8_changed=0 no_pep8=0 coverage=0 debug=0 recreate_db=1 update=0 LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C process_options $@ # Make our paths available to other scripts we call export venv_path export venv_dir export venv_name export tools_dir export venv=${venv_path}/${venv_dir} if [ $no_site_packages -eq 1 ]; then installvenvopts="--no-site-packages" fi function run_tests { # Cleanup *pyc ${wrapper} find . -type f -name "*.pyc" -delete if [ $debug -eq 1 ]; then if [ "$testopts" = "" ] && [ "$testargs" = "" ]; then # Default to running all tests if specific test is not # provided. testargs="discover ./vmware_nsx/tests" fi ${wrapper} python -m testtools.run $testopts $testargs # Short circuit because all of the testr and coverage stuff # below does not make sense when running testtools.run for # debugging purposes. return $? fi if [ $coverage -eq 1 ]; then TESTRTESTS="$TESTRTESTS --coverage" else TESTRTESTS="$TESTRTESTS --slowest" fi # Just run the test suites in current environment set +e testargs=`echo "$testargs" | sed -e's/^\s*\(.*\)\s*$/\1/'` TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testopts $testargs'" OS_TEST_PATH=`echo $testargs|grep -o 'vmware_nsx\neutron\.tests[^[:space:]:]\+'|tr . /` if [ -n "$OS_TEST_PATH" ]; then os_test_dir=$(dirname "$OS_TEST_PATH") else os_test_dir='' fi if [ -d "$OS_TEST_PATH" ]; then wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper" elif [ -d "$os_test_dir" ]; then wrapper="OS_TEST_PATH=$os_test_dir $wrapper" fi echo "Running \`${wrapper} $TESTRTESTS\`" bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit" RESULT=$? set -e copy_subunit_log if [ $coverage -eq 1 ]; then echo "Generating coverage report in covhtml/" # Don't compute coverage for common code, which is tested elsewhere ${wrapper} coverage combine ${wrapper} coverage html --include='neutron/*' --omit='neutron/openstack/common/*' -d covhtml -i fi return $RESULT } function copy_subunit_log { LOGNAME=`cat .testrepository/next-stream` LOGNAME=$(($LOGNAME - 1)) LOGNAME=".testrepository/${LOGNAME}" cp $LOGNAME subunit.log } function warn_on_flake8_without_venv { if [ $never_venv -eq 1 ]; then echo "**WARNING**:" echo "Running flake8 without virtual env may miss OpenStack HACKING detection" fi } function run_pep8 { echo "Running flake8 ..." warn_on_flake8_without_venv ${wrapper} flake8 } function run_pep8_changed { # NOTE(gilliard) We want use flake8 to check the entirety of every file that has # a change in it. Unfortunately the --filenames argument to flake8 only accepts # file *names* and there are no files named (eg) "nova/compute/manager.py". The # --diff argument behaves surprisingly as well, because although you feed it a # diff, it actually checks the file on disk anyway. local target=${testargs:-HEAD~1} local files=$(git diff --name-only $target | tr '\n' ' ') echo "Running flake8 on ${files}" warn_on_flake8_without_venv diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff } TESTRTESTS="python setup.py testr" if [ $never_venv -eq 0 ] then # Remove the virtual environment if --force used if [ $force -eq 1 ]; then echo "Cleaning virtualenv..." rm -rf ${venv} fi if [ $update -eq 1 ]; then echo "Updating virtualenv..." python tools/install_venv.py $installvenvopts fi if [ -e ${venv} ]; then wrapper="${with_venv}" else if [ $always_venv -eq 1 ]; then # Automatically install the virtualenv python tools/install_venv.py $installvenvopts wrapper="${with_venv}" else echo -e "No virtual environment found...create one? (Y/n) \c" read use_ve if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then # Install the virtualenv and run the test suite in it python tools/install_venv.py $installvenvopts wrapper=${with_venv} fi fi fi fi # Delete old coverage data from previous runs if [ $coverage -eq 1 ]; then ${wrapper} coverage erase fi if [ $just_pep8 -eq 1 ]; then run_pep8 exit fi if [ $just_pep8_changed -eq 1 ]; then run_pep8_changed exit fi if [ $recreate_db -eq 1 ]; then rm -f tests.sqlite fi run_tests # NOTE(sirp): we only want to run pep8 when we're running the full-test suite, # not when we're running tests individually. To handle this, we need to # distinguish between options (testopts), which begin with a '-', and # arguments (testargs). if [ -z "$testargs" ]; then if [ $no_pep8 -eq 0 ]; then run_pep8 fi fi ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.250255 vmware-nsx-15.0.1.dev143/setup.cfg0000644000175000017500000001325000000000000017021 0ustar00coreycorey00000000000000[metadata] name = vmware-nsx summary = VMware NSX library for OpenStack projects description-file = README.rst author = OpenStack author-email = openstack-discuss@lists.openstack.org home-page = https://launchpad.net/vmware-nsx python-requires = >=3.6 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 [files] packages = vmware_nsx [entry_points] console_scripts = neutron-check-nsx-config = vmware_nsx.check_nsx_config:main nsxadmin = vmware_nsx.shell.nsxadmin:main nsx-migration = vmware_nsx.api_replay.cli:main neutron.db.alembic_migrations = vmware-nsx = vmware_nsx.db.migration:alembic_migrations neutron.core_plugins = vmware_nsx = vmware_nsx.plugin:NsxPlugin vmware_nsxv = vmware_nsx.plugin:NsxVPlugin vmware_nsxv3 = vmware_nsx.plugin:NsxV3Plugin vmware_nsxp = vmware_nsx.plugin:NsxPolicyPlugin vmware_dvs = vmware_nsx.plugin:NsxDvsPlugin vmware_nsxtvd = vmware_nsx.plugin:NsxTVDPlugin firewall_drivers = vmware_nsxp_edge_v2 = vmware_nsx.services.fwaas.nsx_p.edge_fwaas_driver_v2:EdgeFwaasPDriverV2 vmware_nsxv_edge_v2 = vmware_nsx.services.fwaas.nsx_v.edge_fwaas_driver_v2:EdgeFwaasVDriverV2 vmware_nsxv3_edge_v2 = vmware_nsx.services.fwaas.nsx_v3.edge_fwaas_driver_v2:EdgeFwaasV3DriverV2 vmware_nsxtvd_edge_v2 = vmware_nsx.services.fwaas.nsx_tv.edge_fwaas_driver_v2:EdgeFwaasTVDriverV2 neutron.service_plugins = vmware_nsxv_qos = vmware_nsx.services.qos.nsx_v.plugin:NsxVQosPlugin vmware_nsxtvd_l2gw = vmware_nsx.services.l2gateway.nsx_tvd.plugin:L2GatewayPlugin vmware_nsxtvd_qos = vmware_nsx.services.qos.nsx_tvd.plugin:QoSPlugin vmware_nsxtvd_vpnaas = vmware_nsx.services.vpnaas.nsx_tvd.plugin:VPNPlugin vmware_nsx_vpnaas = vmware_nsx.services.vpnaas.nsx_plugin:NsxVPNPlugin neutron.qos.notification_drivers = vmware_nsxv3_message_queue = vmware_nsx.services.qos.nsx_v3.message_queue:NsxV3QosNotificationDriver neutron.ipam_drivers = vmware_nsxv_ipam = vmware_nsx.services.ipam.nsx_v.driver:NsxvIpamDriver vmware_nsxv3_ipam = vmware_nsx.services.ipam.nsx_v3.driver:Nsxv3IpamDriver vmware_nsxtvd_ipam = vmware_nsx.services.ipam.nsx_tvd.driver:NsxTvdIpamDriver vmware_nsx.extension_drivers = vmware_nsxv_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverNSXv vmware_nsxv3_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverNSXv3 vmware_nsxp_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverNSXp vmware_dvs_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverDVS vmware_nsx.neutron.nsxv.router_type_drivers = shared = vmware_nsx.plugins.nsx_v.drivers.shared_router_driver:RouterSharedDriver distributed = vmware_nsx.plugins.nsx_v.drivers.distributed_router_driver:RouterDistributedDriver exclusive = vmware_nsx.plugins.nsx_v.drivers.exclusive_router_driver:RouterExclusiveDriver oslo.config.opts = nsx = vmware_nsx.opts:list_opts oslo.policy.policies = vmware-nsx = vmware_nsx.policies:list_rules neutron.policies = vmware-nsx = vmware_nsx.policies:list_rules networking_sfc.flowclassifier.drivers = vmware-nsxv-sfc = vmware_nsx.services.flowclassifier.nsx_v.driver:NsxvFlowClassifierDriver openstack.cli.extension = nsxclient = vmware_nsx.osc.plugin openstack.nsxclient.v2 = port_create = vmware_nsx.osc.v2.port:NsxCreatePort port_set = vmware_nsx.osc.v2.port:NsxSetPort router_create = vmware_nsx.osc.v2.router:NsxCreateRouter router_set = vmware_nsx.osc.v2.router:NsxSetRouter security_group_create = vmware_nsx.osc.v2.security_group:NsxCreateSecurityGroup security_group_set = vmware_nsx.osc.v2.security_group:NsxSetSecurityGroup subnet_create = vmware_nsx.osc.v2.subnet:NsxCreateSubnet subnet_set = vmware_nsx.osc.v2.subnet:NsxSetSubnet project_plugin_create = vmware_nsx.osc.v2.project_plugin_map:CreateProjectPluginMap project_plugin_show = vmware_nsx.osc.v2.project_plugin_map:ShowProjectPluginMap project_plugin_list = vmware_nsx.osc.v2.project_plugin_map:ListProjectPluginMap vmware_nsx.neutron.nsxv.housekeeper.jobs = error_dhcp_edge = vmware_nsx.plugins.nsx_v.housekeeper.error_dhcp_edge:ErrorDhcpEdgeJob error_backup_edge = vmware_nsx.plugins.nsx_v.housekeeper.error_backup_edge:ErrorBackupEdgeJob vmware_nsx.neutron.nsxv3.housekeeper.jobs = orphaned_dhcp_server = vmware_nsx.plugins.nsx_v3.housekeeper.orphaned_dhcp_server:OrphanedDhcpServerJob orphaned_logical_switch = vmware_nsx.plugins.nsx_v3.housekeeper.orphaned_logical_switch:OrphanedLogicalSwitchJob orphaned_logical_router = vmware_nsx.plugins.nsx_v3.housekeeper.orphaned_logical_router:OrphanedLogicalRouterJob orphaned_firewall_section = vmware_nsx.plugins.nsx_v3.housekeeper.orphaned_firewall_section:OrphanedFirewallSectionJob mismatch_logical_port = vmware_nsx.plugins.nsx_v3.housekeeper.mismatch_logical_port:MismatchLogicalportJob octavia.api.drivers = vmwareedge = vmware_nsx.services.lbaas.octavia.octavia_driver:NSXOctaviaDriver octavia.driver_agent.provider_agents = vmwareagent = vmware_nsx.services.lbaas.octavia.octavia_driver:vmware_nsx_provider_agent [build_sphinx] source-dir = doc/source build-dir = doc/build all_files = 1 [upload_sphinx] upload-dir = doc/build/html [compile_catalog] directory = vmware_nsx/locale domain = vmware_nsx [update_catalog] domain = vmware_nsx output_dir = vmware_nsx/locale input_file = vmware_nsx/locale/vmware_nsx.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = vmware_nsx/locale/vmware_nsx.pot [pbr] autodoc_index_modules = 1 [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/setup.py0000644000175000017500000000200600000000000016707 0ustar00coreycorey00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/test-requirements.txt0000644000175000017500000000106100000000000021436 0ustar00coreycorey00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking>=1.1.0 # Apache-2.0 bandit!=1.6.0,>=1.1.0 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD flake8>=2.6.0 flake8-import-order==0.12 # LGPLv3 mock>=2.0.0 # BSD psycopg2>=2.7 # LGPL/ZPL PyMySQL>=0.7.6 # MIT License oslotest>=3.2.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 testtools>=2.2.0 # MIT pylint==1.7.6 # GPLv2 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1782532 vmware-nsx-15.0.1.dev143/tools/0000755000175000017500000000000000000000000016337 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/tools/__init__.py0000644000175000017500000000000000000000000020436 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/tools/clean.sh0000755000175000017500000000027400000000000017763 0ustar00coreycorey00000000000000#!/bin/bash rm -rf ./*.deb ./*.tar.gz ./*.dsc ./*.changes rm -rf */*.deb rm -rf ./plugins/**/build/ ./plugins/**/dist rm -rf ./plugins/**/lib/neutron_*_plugin.egg-info ./plugins/neutron-* ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/tools/coding-checks.sh0000755000175000017500000000243600000000000021404 0ustar00coreycorey00000000000000#!/bin/sh set -eu usage () { echo "Usage: $0 [OPTION]..." echo "Run vmware-nsx's coding check(s)" echo "" echo " -Y, --pylint [] Run pylint check on the entire vmware-nsx module or just files changed in basecommit (e.g. HEAD~1)" echo " -h, --help Print this usage message" echo exit 0 } process_options () { i=1 while [ $i -le $# ]; do eval opt=\$$i case $opt in -h|--help) usage;; -Y|--pylint) pylint=1;; *) scriptargs="$scriptargs $opt" esac i=$((i+1)) done } run_pylint () { local target="${scriptargs:-all}" if [ "$target" = "all" ]; then files="vmware_nsx" else case "$target" in *HEAD~[0-9]*) files=$(git diff --diff-filter=AM --name-only $target -- "*.py");; *) echo "$target is an unrecognized basecommit"; exit 1;; esac fi echo "Running pylint..." echo "You can speed this up by running it on 'HEAD~[0-9]' (e.g. HEAD~1, this change only)..." if [ -n "${files}" ]; then pylint --rcfile=.pylintrc --output-format=colorized ${files} else echo "No python changes in this commit, pylint check not required." exit 0 fi } scriptargs= pylint=1 process_options $@ if [ $pylint -eq 1 ]; then run_pylint exit 0 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/tools/generate_config_file_samples.sh0000755000175000017500000000144000000000000024537 0ustar00coreycorey00000000000000#!/bin/sh # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. set -e GEN_CMD=oslo-config-generator if ! type "$GEN_CMD" > /dev/null; then echo "ERROR: $GEN_CMD not installed on the system." exit 1 fi for file in `ls etc/oslo-config-generator/*`; do $GEN_CMD --config-file=$file done set -x ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/tools/install_venv.py0000644000175000017500000000465400000000000021426 0ustar00coreycorey00000000000000#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2010 OpenStack Foundation. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Installation script for Neutron's development virtualenv """ from __future__ import print_function import os import sys import install_venv_common as install_venv def print_help(): help = """ Neutron development environment setup is complete. Neutron development uses virtualenv to track and manage Python dependencies while in development and testing. To activate the Neutron virtualenv for the extent of your current shell session you can run: $ . .venv/bin/activate Or, if you prefer, you can run commands in the virtualenv on a case by case basis by running: $ tools/with_venv.sh Also, make test will automatically use the virtualenv. """ print(help) def main(argv): if 'tools_path' in os.environ: root = os.environ['tools_path'] else: root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) if 'venv' in os.environ: venv = os.environ['venv'] else: venv = os.path.join(root, '.venv') pip_requires = os.path.join(root, 'requirements.txt') test_requires = os.path.join(root, 'test-requirements.txt') py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'Neutron' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) options = install.parse_args(argv) install.check_python_version() install.check_dependencies() install.create_virtualenv(no_site_packages=options.no_site_packages) install.install_dependencies() print_help() if __name__ == '__main__': sys.exit(main(sys.argv)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/tools/install_venv_common.py0000644000175000017500000001350700000000000022773 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Provides methods needed by installation script for OpenStack development virtual environments. Since this script is used to bootstrap a virtualenv from the system's Python environment, it should be kept strictly compatible with Python 2.6. Synced in from openstack-common """ from __future__ import print_function import optparse import os import subprocess import sys class InstallVenv(object): def __init__(self, root, venv, requirements, test_requirements, py_version, project): self.root = root self.venv = venv self.requirements = requirements self.test_requirements = test_requirements self.py_version = py_version self.project = project def die(self, message, *args): print(message % args, file=sys.stderr) sys.exit(1) def check_python_version(self): if sys.version_info < (2, 6): self.die("Need Python Version >= 2.6") def run_command_with_code(self, cmd, redirect_output=True, check_exit_code=True): """Runs a command in an out-of-process shell. Returns the output of that command. Working directory is self.root. """ if redirect_output: stdout = subprocess.PIPE else: stdout = None proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) output = proc.communicate()[0] if check_exit_code and proc.returncode != 0: self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) return (output, proc.returncode) def run_command(self, cmd, redirect_output=True, check_exit_code=True): return self.run_command_with_code(cmd, redirect_output, check_exit_code)[0] def get_distro(self): if (os.path.exists('/etc/fedora-release') or os.path.exists('/etc/redhat-release')): return Fedora( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) else: return Distro( self.root, self.venv, self.requirements, self.test_requirements, self.py_version, self.project) def check_dependencies(self): self.get_distro().install_virtualenv() def create_virtualenv(self, no_site_packages=True): """Creates the virtual environment and installs PIP. Creates the virtual environment and installs PIP only into the virtual environment. """ if not os.path.isdir(self.venv): print('Creating venv...', end=' ') if no_site_packages: self.run_command(['virtualenv', '-q', '--no-site-packages', self.venv]) else: self.run_command(['virtualenv', '-q', self.venv]) print('done.') else: print("venv already exists...") pass def pip_install(self, *args): self.run_command(['tools/with_venv.sh', 'pip', 'install', '--upgrade'] + list(args), redirect_output=False) def install_dependencies(self): print('Installing dependencies with pip (this can take a while)...') # First things first, make sure our venv has the latest pip and # setuptools and pbr self.pip_install('pip>=1.4') self.pip_install('setuptools') self.pip_install('pbr') self.pip_install('-r', self.requirements, '-r', self.test_requirements) def parse_args(self, argv): """Parses command-line arguments.""" parser = optparse.OptionParser() parser.add_option('-n', '--no-site-packages', action='store_true', help="Do not inherit packages from global Python " "install.") return parser.parse_args(argv[1:])[0] class Distro(InstallVenv): def check_cmd(self, cmd): return bool(self.run_command(['which', cmd], check_exit_code=False).strip()) def install_virtualenv(self): if self.check_cmd('virtualenv'): return if self.check_cmd('easy_install'): print('Installing virtualenv via easy_install...', end=' ') if self.run_command(['easy_install', 'virtualenv']): print('Succeeded') return else: print('Failed') self.die('ERROR: virtualenv not found.\n\n%s development' ' requires virtualenv, please install it using your' ' favorite package management tool' % self.project) class Fedora(Distro): """This covers all Fedora-based distributions. Includes: Fedora, RHEL, CentOS, Scientific Linux """ def check_pkg(self, pkg): return self.run_command_with_code(['rpm', '-q', pkg], check_exit_code=False)[1] == 0 def install_virtualenv(self): if self.check_cmd('virtualenv'): return if not self.check_pkg('python-virtualenv'): self.die("Please install 'python-virtualenv'.") super(Fedora, self).install_virtualenv() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/tools/migrate_v_conf.sh0000644000175000017500000001436200000000000021663 0ustar00coreycorey00000000000000#!/bin/bash set -eu # This script will generate a set of neutron config files for the NSX policy # plugin given the NSX-V config files usage () { >&2 echo " Usage: $0 [OPTION]... Generate neutron NSX-P config files --v-neutron-conf-path Path for the original NSX-V neutron.conf (mandatory) --v-nsx-ini-path Path for the original NSX-V nsx.ini (mandatory) --p-neutron-conf-path Path for the generated NSX-P neutron.conf (optional) --p-nsx-ini-path Path for the generated NSX-P nsx.ini (optional) --nsx-api-manager IP of the NSX manager (mandatory) --nsx-api-user User for the NSX manager authentication (defaults to admin) --nsx-api-password Password for the NSX manager authentication (defaults to Admin!23) --metadata-proxy NSX metadata proxy name or UUID (mandatory) --dhcp-profile NSX DHCP profile name or UUID (mandatory) --default-overlay-tz NSX overlay transport zone name or UUID (mandatory) --default-vlan-tz NSX VLAN transport zone name or UUID (optional) --default-tier0-router NSX tier0 router name or UUID (mandatory) -h, --help Print this usage message" exit 0 } function process_options { i=1 while [ $i -le $# ]; do case "${!i}" in -h|--help) usage;; --v-neutron-conf-path) (( i++ )) v_neutron_conf=${!i} ;; --p-neutron-conf-path) (( i++ )) p_neutron_conf=${!i} ;; --v-nsx-ini-path) (( i++ )) v_nsx_ini=${!i} ;; --nsx-api-manager) (( i++ )) nsx_api_manager=${!i} ;; --nsx-api-password) (( i++ )) nsx_api_password=${!i} ;; --nsx-api-user) (( i++ )) nsx_api_user=${!i} ;; --metadata-proxy) (( i++ )) metadata_proxy=${!i} ;; --dhcp-profile) (( i++ )) dhcp_profile=${!i} ;; --default-overlay-tz) (( i++ )) default_overlay_tz=${!i} ;; --default-vlan-tz) (( i++ )) default_vlan_tz=${!i} ;; --default-tier0-router) (( i++ )) default_tier0_router=${!i} ;; -*) testopts="$testopts ${!i}";; *) testargs="$testargs ${!i}" esac (( i++ )) done # verify existence of mandatory args if [ -z $v_neutron_conf ] || [ -z $v_nsx_ini ] || [ -z $nsx_api_manager ]; then >&2 echo "Missing mandatory arguments" usage fi if [ -z $metadata_proxy ] || [ -z $dhcp_profile ] || [ -z $default_overlay_tz ] || [ -z $default_tier0_router ]; then >&2 echo "Missing mandatory arguments" usage fi # Verify config files exists if [[ ! -f "$v_neutron_conf" ]]; then >&2 echo "$v_neutron_conf File not found" usage fi if [[ ! -f "$v_nsx_ini" ]]; then >&2 echo "$v_nsx_ini File not found" usage fi } function create_neutron_conf { # Copy the nsx-v conf file cp $v_neutron_conf $p_neutron_conf # Change the core plugin sed -i 's/^core_plugin = vmware_nsxv/core_plugin = vmware_nsxp/' $p_neutron_conf # Remove unsupported services sed -i 's/neutron_dynamic_routing.services.bgp.bgp_plugin.BgpPlugin//' $p_neutron_conf sed -i 's/networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin//' $p_neutron_conf # Replace service plugins sed -i 's/vmware_nsxv_qos/neutron.services.qos.qos_plugin.QoSPlugin/' $p_neutron_conf # Replace nsx-v FWaaS driver sed -i 's/vmware_nsxv_edge_v2/vmware_nsxp_edge_v2/' $p_neutron_conf sed -i 's/vmware_nsxv_edge/vmware_nsxp_edge_v2/' $p_neutron_conf # Replace the FWaaS service provider temporarily to allow the migration sed -i 's/neutron_fwaas.services.firewall.service_drivers.agents.agents.FirewallAgentDriver/vmware_nsx.services.fwaas.common.api_replay_driver.ApiReplayFirewallAgentDriver/' $p_neutron_conf echo "Created $p_neutron_conf for policy plugin neutron.conf" } function create_nsx_ini { cp $v_nsx_ini $p_nsx_ini # Replace nsx-v drivers sed -i 's/vmware_nsxv_dns/vmware_nsxp_dns/' $p_nsx_ini # Add the nsxp section echo "" >> $p_nsx_ini echo "[nsx_p]" >> $p_nsx_ini echo "nsx_api_managers = $nsx_api_manager" >> $p_nsx_ini echo "nsx_api_password = $nsx_api_password" >> $p_nsx_ini echo "nsx_api_user = $nsx_api_user" >> $p_nsx_ini echo "metadata_proxy = $metadata_proxy" >> $p_nsx_ini echo "dhcp_profile = $dhcp_profile" >> $p_nsx_ini echo "default_overlay_tz = $default_overlay_tz" >> $p_nsx_ini if [ -n "$default_vlan_tz" ]; then echo "default_vlan_tz = $default_vlan_tz" >> $p_nsx_ini fi echo "default_tier0_router = $default_tier0_router" >> $p_nsx_ini # Add the nat-firewall flag to match the existing nsx-v status echo "firewall_match_internal_addr = false" >> $p_nsx_ini grep "availability_zones" $v_nsx_ini >> $p_nsx_ini echo "" >> $p_nsx_ini # Add the api_replay flag under the DEFAULT section so that the migration can start sed -i '/\[DEFAULT\]/a api_replay_mode = true' $p_nsx_ini # Add comment to manually update the AZs later sed -i '/^\[az\:.*\]/a # Please add mandatory availability zone config here' $p_nsx_ini echo "Created $p_nsx_ini for policy plugin nsx.ini" if grep -q "Please add mandatory" "$p_nsx_ini"; then echo "Please add mandatory configuration fields to availability zones in nsx.ini" fi } testargs= testopts= v_neutron_conf="" p_neutron_conf=${p_neutron_conf:-$(pwd)/neutron.conf.p} v_nsx_ini="" p_nsx_ini=${p_nsx_ini:-$(pwd)/nsx.ini.p} nsx_api_manager="" nsx_api_password=${nsx_api_password:-"Admin!23Admin"} nsx_api_user=${nsx_api_user:-"admin"} metadata_proxy="" dhcp_profile="" default_overlay_tz="" default_vlan_tz="" default_tier0_router="" process_options $@ create_neutron_conf create_nsx_ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/tools/misc-sanity-checks.sh0000755000175000017500000000461000000000000022375 0ustar00coreycorey00000000000000#! /bin/sh # Copyright (C) 2014 VA Linux Systems Japan K.K. # Copyright (C) 2014 YAMAMOTO Takashi # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. TMPDIR=`mktemp -d /tmp/${0##*/}.XXXXXX` || exit 1 export TMPDIR trap "rm -rf $TMPDIR" EXIT FAILURES=$TMPDIR/failures check_no_symlinks_allowed () { # Symlinks break the package build process, so ensure that they # do not slip in, except hidden symlinks. if [ $(find . -type l ! -path '*/.*' | wc -l) -ge 1 ]; then echo "Symlinks are not allowed!" >>$FAILURES fi } check_pot_files_errors () { # The job vmware-nsx-propose-translation-update does not update from # transifex since our po files contain duplicate entries where # obsolete entries duplicate normal entries. Prevent obsolete # entries to slip in if [ $(find vmware_nsx -type f -regex '.*\.pot?' | wc -l) -ge 1 ]; then find vmware_nsx -type f -regex '.*\.pot?' \ -print0|xargs -0 -n 1 msgfmt --check-format \ -o /dev/null if [ "$?" -ne 0 ]; then echo "PO files syntax is not correct!" >>$FAILURES fi fi } check_identical_policy_files () { # For unit tests, we maintain their own policy.json file to make test suite # independent of whether it's executed from the vmware-nsx source tree or from # site-packages installation path. We don't want two copies of the same # file to diverge, so checking that they are identical diff etc/policy.json vmware-nsx/tests/etc/policy.json 2>&1 > /dev/null if [ "$?" -ne 0 ]; then echo "policy.json files must be identical!" >>$FAILURES fi } # Add your checks here... check_no_symlinks_allowed check_pot_files_errors #check_identical_policy_files # Fail, if there are emitted failures if [ -f $FAILURES ]; then cat $FAILURES exit 1 fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/tools/test-setup.sh0000755000175000017500000000350400000000000021015 0ustar00coreycorey00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # an anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/tools/with_venv.sh0000755000175000017500000000152500000000000020712 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. tools_path=${tools_path:-$(dirname $0)} venv_path=${venv_path:-${tools_path}} venv_dir=${venv_name:-/../.venv} TOOLS=${tools_path} VENV=${venv:-${venv_path}/${venv_dir}} source $VENV/bin/activate && "$@" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/tox.ini0000644000175000017500000001530300000000000016514 0ustar00coreycorey00000000000000[tox] envlist = py37,pep8 minversion = 2.0 skipsdist = True [testenv] basepython = python3 setenv = VIRTUAL_ENV={envdir} PYTHONWARNINGS=default::DeprecationWarning passenv = TRACE_FAILONLY GENERATE_HASHES http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY usedevelop = True deps = -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt whitelist_externals = sh stestr commands = stestr run {posargs} stestr slowest [testenv:common] # Fake job to define environment variables shared between dsvm/non-dsvm jobs setenv = OS_TEST_TIMEOUT=180 commands = false [testenv:dev] # run locally (not in the gate) using editable mode # https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs # note that order is important to ensure dependencies don't override commands = pip install -q -e "git+https://opendev.org/openstack/networking-l2gw#egg=networking_l2gw" pip install -q -e "git+https://opendev.org/openstack/networking-sfc#egg=networking_sfc" pip install -q -e "git+https://opendev.org/openstack/neutron-fwaas#egg=neutron_fwaas" pip install -q -e "git+https://opendev.org/openstack/neutron-dynamic-routing#egg=neutron_dynamic_routing" pip install -q -e "git+https://opendev.org/openstack/neutron-vpnaas#egg=neutron_vpnaas" pip install -q -e "git+https://opendev.org/openstack/octavia#egg=octavia" pip install -q -e "git+https://opendev.org/openstack/vmware-nsxlib#egg=vmware_nsxlib" pip install -q -e "git+https://opendev.org/openstack/neutron#egg=neutron" [testenv:functional] setenv = {[testenv]setenv} {[testenv:common]setenv} OS_TEST_PATH=./vmware_nsx/tests/functional OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs} deps = {[testenv]deps} -r{toxinidir}/vmware_nsx/tests/functional/requirements.txt [testenv:dsvm-functional] setenv = OS_SUDO_TESTING=1 OS_FAIL_ON_MISSING_DEPS=1 OS_TEST_TIMEOUT=180 sitepackages=True deps = {[testenv:functional]deps} commands = stestr run {posargs} stestr slowest [tox:jenkins] sitepackages = True [testenv:releasenotes] deps = -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:py36] basepython = python3.6 setenv = OS_FAIL_ON_MISSING_DEPS=1 [testenv:py37] basepython = python3.7 setenv = OS_FAIL_ON_MISSING_DEPS=1 [testenv:py38] basepython = python3.8 setenv = OS_FAIL_ON_MISSING_DEPS=1 [testenv:py3-dev] commands = {[testenv:dev]commands} pip freeze stestr run {posargs} whitelist_externals = stestr [testenv:py36-dev] basepython = python3.6 commands = {[testenv:dev]commands} pip freeze stestr run {posargs} whitelist_externals = stestr [testenv:pep8] commands = # If it is easier to add a check via a shell script, consider adding it in this file sh ./tools/misc-sanity-checks.sh # Checks for coding and style guidelines flake8 sh ./tools/coding-checks.sh --pylint '{posargs}' neutron-db-manage --subproject vmware-nsx check_migration {[testenv:genconfig]commands} {[testenv:genpolicy]commands} whitelist_externals = sh bash [testenv:pep8-dev] commands = {[testenv:dev]commands} pip freeze # If it is easier to add a check via a shell script, consider adding it in this file sh ./tools/misc-sanity-checks.sh # Checks for coding and style guidelines flake8 sh ./tools/coding-checks.sh --pylint '{posargs}' neutron-db-manage --subproject vmware-nsx check_migration {[testenv:genconfig]commands} {[testenv:genpolicy]commands} whitelist_externals = sh [testenv:bandit] commands = bandit -r vmware_nsx -n 5 -ll [testenv:cover] commands = python setup.py testr --coverage --testr-args='{posargs}' coverage report [testenv:docs] deps = -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} -r{toxinidir}/doc/requirements.txt commands = sphinx-build -b html doc/source doc/build/html [flake8] # H106: Don't put vim configuration in source files # H203: Use assertIs(Not)None to check for None # H204: Use assert(Not)Equal to check for equality # H205: Use assert(Greater|Less)(Equal) for comparison enable-extensions=H106,H203,H204,H205 # E125 continuation line does not distinguish itself from next logical line # E126 continuation line over-indented for hanging indent # E128 continuation line under-indented for visual indent # E129 visually indented line with same indent as next logical line # E265 block comment should start with ‘# ‘ # H305 imports not grouped correctly # H307 like imports should be grouped together # H404 multi line docstring should start with a summary # H405 multi line docstring summary not separated with an empty line # H904 Wrap long lines in parentheses instead of a backslash # TODO(dougwig) -- uncomment this to test for remaining linkages # N530 direct neutron imports not allowed # N531 translations hints # W504 line break after binary operator ignore = E125,E126,E128,E129,E265,H305,H307,H404,H405,H904,N530,N531,W504 show-source = true builtins = _ exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,.ropeproject import-order-style = pep8 [hacking] import_exceptions = vmware_nsx._i18n, local-check-factory = neutron_lib.hacking.checks.factory [testenv:genconfig] commands = {toxinidir}/tools/generate_config_file_samples.sh [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file=etc/oslo-policy-generator/policy.conf [testenv:uuidgen] commands = check-uuid --fix [testenv:lower-constraints] deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/doc/requirements.txt -r{toxinidir}/requirements.txt [testenv:lower-constraints-dev] commands = {[testenv:dev]commands} pip freeze stestr run {posargs} deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/doc/requirements.txt -r{toxinidir}/requirements.txt [testenv:venv] commands = {posargs} [testenv:requirements-check-dev] commands = pip install -q -e "git+https://opendev.org/openstack/requirements#egg=requirements" pip freeze # must have openstack/requirements on latest src/master in ../requirements {toxinidir}/../requirements/playbooks/files/project-requirements-change.py --reqs={toxinidir}/../requirements {toxinidir} deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/doc/requirements.txt -r{toxinidir}/requirements.txt ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1822534 vmware-nsx-15.0.1.dev143/vmware_nsx/0000755000175000017500000000000000000000000017370 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/__init__.py0000644000175000017500000000131500000000000021501 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.common import eventlet_utils eventlet_utils.monkey_patch() import os # noqa NSX_EXT_PATH = os.path.join(os.path.dirname(__file__), 'extensions') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/_i18n.py0000644000175000017500000000203400000000000020657 0ustar00coreycorey00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_i18n DOMAIN = "vmware_nsx" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1822534 vmware-nsx-15.0.1.dev143/vmware_nsx/api_client/0000755000175000017500000000000000000000000021477 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_client/__init__.py0000644000175000017500000000205600000000000023613 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from six.moves import http_client as httplib from vmware_nsx._i18n import _ def ctrl_conn_to_str(conn): """Returns a string representing a connection URL to the controller.""" if isinstance(conn, httplib.HTTPSConnection): proto = "https://" elif isinstance(conn, httplib.HTTPConnection): proto = "http://" else: raise TypeError(_('Invalid connection type: %s') % type(conn)) return "%s%s:%s" % (proto, conn.host, conn.port) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_client/base.py0000644000175000017500000002324200000000000022766 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import time from oslo_config import cfg from oslo_log import log as logging import six from six.moves import http_client as httplib from vmware_nsx import api_client LOG = logging.getLogger(__name__) GENERATION_ID_TIMEOUT = -1 DEFAULT_CONCURRENT_CONNECTIONS = 3 DEFAULT_CONNECT_TIMEOUT = 5 @six.add_metaclass(abc.ABCMeta) class ApiClientBase(object): """An abstract baseclass for all API client implementations.""" def _create_connection(self, host, port, is_ssl): if is_ssl: return httplib.HTTPSConnection(host, port, timeout=self._connect_timeout) return httplib.HTTPConnection(host, port, timeout=self._connect_timeout) @staticmethod def _conn_params(http_conn): is_ssl = isinstance(http_conn, httplib.HTTPSConnection) return (http_conn.host, http_conn.port, is_ssl) @property def user(self): return self._user @property def password(self): return self._password @property def config_gen(self): # If NSX_gen_timeout is not -1 then: # Maintain a timestamp along with the generation ID. Hold onto the # ID long enough to be useful and block on sequential requests but # not long enough to persist when Onix db is cleared, which resets # the generation ID, causing the DAL to block indefinitely with some # number that's higher than the cluster's value. if self._gen_timeout != -1: ts = self._config_gen_ts if ts is not None: if (time.time() - ts) > self._gen_timeout: return None return self._config_gen @config_gen.setter def config_gen(self, value): if self._config_gen != value: if self._gen_timeout != -1: self._config_gen_ts = time.time() self._config_gen = value def auth_cookie(self, conn): cookie = None data = self._get_provider_data(conn) if data: cookie = data[1] return cookie def set_auth_cookie(self, conn, cookie): data = self._get_provider_data(conn) if data: self._set_provider_data(conn, (data[0], cookie)) def acquire_connection(self, auto_login=True, headers=None, rid=-1): '''Check out an available HTTPConnection instance. Blocks until a connection is available. :auto_login: automatically logins before returning conn :headers: header to pass on to login attempt :param rid: request id passed in from request eventlet. :returns: An available HTTPConnection instance or None if no api_providers are configured. ''' if not self._api_providers: LOG.warning("[%d] no API providers currently available.", rid) return None if self._conn_pool.empty(): LOG.debug("[%d] Waiting to acquire API client connection.", rid) priority, conn = self._conn_pool.get() now = time.time() if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout: LOG.info("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f " "seconds; reconnecting.", {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), 'sec': now - conn.last_used}) conn = self._create_connection(*self._conn_params(conn)) conn.last_used = now conn.priority = priority # stash current priority for release qsize = self._conn_pool.qsize() LOG.debug("[%(rid)d] Acquired connection %(conn)s. %(qsize)d " "connection(s) available.", {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn), 'qsize': qsize}) if auto_login and self.auth_cookie(conn) is None: self._wait_for_login(conn, headers) return conn def release_connection(self, http_conn, bad_state=False, service_unavail=False, rid=-1): '''Mark HTTPConnection instance as available for check-out. :param http_conn: An HTTPConnection instance obtained from this instance. :param bad_state: True if http_conn is known to be in a bad state (e.g. connection fault.) :service_unavail: True if http_conn returned 503 response. :param rid: request id passed in from request eventlet. ''' conn_params = self._conn_params(http_conn) if self._conn_params(http_conn) not in self._api_providers: LOG.debug("[%(rid)d] Released connection %(conn)s is not an " "API provider for the cluster", {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn)}) return elif hasattr(http_conn, "no_release"): return priority = http_conn.priority if bad_state: # Reconnect to provider. LOG.warning("[%(rid)d] Connection returned in bad state, " "reconnecting to %(conn)s", {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn)}) http_conn = self._create_connection(*self._conn_params(http_conn)) elif service_unavail: # http_conn returned a service unavailable response, put other # connections to the same controller at end of priority queue, conns = [] while not self._conn_pool.empty(): priority, conn = self._conn_pool.get() if self._conn_params(conn) == conn_params: priority = self._next_conn_priority self._next_conn_priority += 1 conns.append((priority, conn)) for priority, conn in conns: self._conn_pool.put((priority, conn)) # put http_conn at end of queue also priority = self._next_conn_priority self._next_conn_priority += 1 self._conn_pool.put((priority, http_conn)) LOG.debug("[%(rid)d] Released connection %(conn)s. %(qsize)d " "connection(s) available.", {'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn), 'qsize': self._conn_pool.qsize()}) def _wait_for_login(self, conn, headers=None): '''Block until a login has occurred for the current API provider.''' data = self._get_provider_data(conn) if data is None: LOG.error("Login request for an invalid connection: '%s'", api_client.ctrl_conn_to_str(conn)) return provider_sem = data[0] if provider_sem.acquire(blocking=False): try: cookie = self._login(conn, headers) self.set_auth_cookie(conn, cookie) finally: provider_sem.release() else: LOG.debug("Waiting for auth to complete") # Wait until we can acquire then release provider_sem.acquire(blocking=True) provider_sem.release() def _get_provider_data(self, conn_or_conn_params, default=None): """Get data for specified API provider. Args: conn_or_conn_params: either a HTTP(S)Connection object or the resolved conn_params tuple returned by self._conn_params(). default: conn_params if ones passed aren't known Returns: Data associated with specified provider """ conn_params = self._normalize_conn_params(conn_or_conn_params) return self._api_provider_data.get(conn_params, default) def _set_provider_data(self, conn_or_conn_params, data): """Set data for specified API provider. Args: conn_or_conn_params: either a HTTP(S)Connection object or the resolved conn_params tuple returned by self._conn_params(). data: data to associate with API provider """ conn_params = self._normalize_conn_params(conn_or_conn_params) if data is None: del self._api_provider_data[conn_params] else: self._api_provider_data[conn_params] = data def _normalize_conn_params(self, conn_or_conn_params): """Normalize conn_param tuple. Args: conn_or_conn_params: either a HTTP(S)Connection object or the resolved conn_params tuple returned by self._conn_params(). Returns: Normalized conn_param tuple """ if (not isinstance(conn_or_conn_params, tuple) and not isinstance(conn_or_conn_params, httplib.HTTPConnection)): LOG.debug("Invalid conn_params value: '%s'", str(conn_or_conn_params)) return conn_or_conn_params if isinstance(conn_or_conn_params, httplib.HTTPConnection): conn_params = self._conn_params(conn_or_conn_params) else: conn_params = conn_or_conn_params host, port, is_ssl = conn_params if port is None: port = 443 if is_ssl else 80 return (host, port, is_ssl) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_client/client.py0000644000175000017500000001312400000000000023330 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from six.moves import http_client as httplib from vmware_nsx.api_client import base from vmware_nsx.api_client import eventlet_client from vmware_nsx.api_client import eventlet_request from vmware_nsx.api_client import exception from vmware_nsx.api_client import version LOG = logging.getLogger(__name__) class NsxApiClient(eventlet_client.EventletApiClient): """The Nsx API Client.""" def __init__(self, api_providers, user, password, concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS, gen_timeout=base.GENERATION_ID_TIMEOUT, use_https=True, connect_timeout=base.DEFAULT_CONNECT_TIMEOUT, http_timeout=75, retries=2, redirects=2): '''Constructor. Adds the following: :param http_timeout: how long to wait before aborting an unresponsive controller (and allow for retries to another controller in the cluster) :param retries: the number of concurrent connections. :param redirects: the number of concurrent connections. ''' super(NsxApiClient, self).__init__( api_providers, user, password, concurrent_connections=concurrent_connections, gen_timeout=gen_timeout, use_https=use_https, connect_timeout=connect_timeout) self._request_timeout = http_timeout * retries self._http_timeout = http_timeout self._retries = retries self._redirects = redirects self._version = None # NOTE(salvatore-orlando): This method is not used anymore. Login is now # performed automatically inside the request eventlet if necessary. def login(self, user=None, password=None): '''Login to NSX controller. Assumes same password is used for all controllers. :param user: controller user (usually admin). Provided for backwards compatibility. In the normal mode of operation this should be None. :param password: controller password. Provided for backwards compatibility. In the normal mode of operation this should be None. ''' if user: self._user = user if password: self._password = password return self._login() def request(self, method, url, body="", content_type="application/json"): '''Issues request to controller.''' g = eventlet_request.GenericRequestEventlet( self, method, url, body, content_type, auto_login=True, http_timeout=self._http_timeout, retries=self._retries, redirects=self._redirects) g.start() response = g.join() LOG.debug('Request returns "%s"', response) # response is a modified HTTPResponse object or None. # response.read() will not work on response as the underlying library # request_eventlet.ApiRequestEventlet has already called this # method in order to extract the body and headers for processing. # ApiRequestEventlet derived classes call .read() and # .getheaders() on the HTTPResponse objects and store the results in # the response object's .body and .headers data members for future # access. if response is None: # Timeout. LOG.error('Request timed out: %(method)s to %(url)s', {'method': method, 'url': url}) raise exception.RequestTimeout() status = response.status if status == httplib.UNAUTHORIZED: raise exception.UnAuthorizedRequest() # Fail-fast: Check for exception conditions and raise the # appropriate exceptions for known error codes. if status in exception.ERROR_MAPPINGS: LOG.error("Received error code: %s", status) LOG.error("Server Error Message: %s", response.body) exception.ERROR_MAPPINGS[status](response) # Continue processing for non-error condition. if (status != httplib.OK and status != httplib.CREATED and status != httplib.NO_CONTENT): LOG.error("%(method)s to %(url)s, unexpected response code: " "%(status)d (content = '%(body)s')", {'method': method, 'url': url, 'status': response.status, 'body': response.body}) return None if not self._version: self._version = version.find_version(response.headers) return response.body def get_version(self): if not self._version: # Determine the controller version by querying the # cluster nodes. Currently, the version will be the # one of the server that responds. self.request('GET', '/ws.v1/control-cluster/node') if not self._version: LOG.error('Unable to determine NSX version. ' 'Plugin might not work as expected.') return self._version ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_client/eventlet_client.py0000644000175000017500000001462600000000000025246 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import time from oslo_log import log as logging from vmware_nsx.api_client import base from vmware_nsx.api_client import eventlet_request import eventlet LOG = logging.getLogger(__name__) class EventletApiClient(base.ApiClientBase): """Eventlet-based implementation of NSX ApiClient ABC.""" def __init__(self, api_providers, user, password, concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS, gen_timeout=base.GENERATION_ID_TIMEOUT, use_https=True, connect_timeout=base.DEFAULT_CONNECT_TIMEOUT): '''Constructor :param api_providers: a list of tuples of the form: (host, port, is_ssl). :param user: login username. :param password: login password. :param concurrent_connections: total number of concurrent connections. :param use_https: whether or not to use https for requests. :param connect_timeout: connection timeout in seconds. :param gen_timeout controls how long the generation id is kept if set to -1 the generation id is never timed out ''' if not api_providers: api_providers = [] self._api_providers = set([tuple(p) for p in api_providers]) self._api_provider_data = {} # tuple(semaphore, session_cookie) for p in self._api_providers: self._set_provider_data(p, (eventlet.semaphore.Semaphore(1), None)) self._user = user self._password = password self._concurrent_connections = concurrent_connections self._use_https = use_https self._connect_timeout = connect_timeout self._config_gen = None self._config_gen_ts = None self._gen_timeout = gen_timeout # Connection pool is a list of queues. self._conn_pool = eventlet.queue.PriorityQueue() self._next_conn_priority = 1 for __ in range(concurrent_connections): for host, port, is_ssl in api_providers: conn = self._create_connection(host, port, is_ssl) self._conn_pool.put((self._next_conn_priority, conn)) self._next_conn_priority += 1 def acquire_redirect_connection(self, conn_params, auto_login=True, headers=None): """Check out or create connection to redirected NSX API server. Args: conn_params: tuple specifying target of redirect, see self._conn_params() auto_login: returned connection should have valid session cookie headers: headers to pass on if auto_login Returns: An available HTTPConnection instance corresponding to the specified conn_params. If a connection did not previously exist, new connections are created with the highest priority in the connection pool and one of these new connections returned. """ result_conn = None data = self._get_provider_data(conn_params) if data: # redirect target already exists in provider data and connections # to the provider have been added to the connection pool. Try to # obtain a connection from the pool, note that it's possible that # all connection to the provider are currently in use. conns = [] while not self._conn_pool.empty(): priority, conn = self._conn_pool.get_nowait() if not result_conn and self._conn_params(conn) == conn_params: conn.priority = priority result_conn = conn else: conns.append((priority, conn)) for priority, conn in conns: self._conn_pool.put((priority, conn)) # hack: if no free connections available, create new connection # and stash "no_release" attribute (so that we only exceed # self._concurrent_connections temporarily) if not result_conn: conn = self._create_connection(*conn_params) conn.priority = 0 # redirect connections have highest priority conn.no_release = True result_conn = conn else: #redirect target not already known, setup provider lists self._api_providers.update([conn_params]) self._set_provider_data(conn_params, (eventlet.semaphore.Semaphore(1), None)) # redirects occur during cluster upgrades, i.e. results to old # redirects to new, so give redirect targets highest priority priority = 0 for i in range(self._concurrent_connections): conn = self._create_connection(*conn_params) conn.priority = priority if i == self._concurrent_connections - 1: break self._conn_pool.put((priority, conn)) result_conn = conn if result_conn: result_conn.last_used = time.time() if auto_login and self.auth_cookie(conn) is None: self._wait_for_login(result_conn, headers) return result_conn def _login(self, conn=None, headers=None): '''Issue login request and update authentication cookie.''' cookie = None g = eventlet_request.LoginRequestEventlet( self, self._user, self._password, conn, headers) g.start() ret = g.join() if ret: if isinstance(ret, Exception): LOG.error('Login error "%s"', ret) raise ret cookie = ret.getheader("Set-Cookie") if cookie: LOG.debug("Saving new authentication cookie '%s'", cookie) return cookie # Register as subclass. base.ApiClientBase.register(EventletApiClient) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_client/eventlet_request.py0000644000175000017500000002147200000000000025455 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import eventlet from oslo_log import log as logging from oslo_serialization import jsonutils from six.moves import http_client as httplib from six.moves import urllib from vmware_nsx._i18n import _ from vmware_nsx.api_client import request LOG = logging.getLogger(__name__) USER_AGENT = "Neutron eventlet client/2.0" class EventletApiRequest(request.ApiRequest): '''Eventlet-based ApiRequest class. This class will form the basis for eventlet-based ApiRequest classes ''' # Maximum number of green threads present in the system at one time. API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE # Pool of green threads. One green thread is allocated per incoming # request. Incoming requests will block when the pool is empty. API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE) # A unique id is assigned to each incoming request. When the current # request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0. MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID # The request id for the next incoming request. CURRENT_REQUEST_ID = 0 def __init__(self, client_obj, url, method="GET", body=None, headers=None, retries=request.DEFAULT_RETRIES, auto_login=True, redirects=request.DEFAULT_REDIRECTS, http_timeout=request.DEFAULT_HTTP_TIMEOUT, client_conn=None): '''Constructor.''' self._api_client = client_obj self._url = url self._method = method self._body = body self._headers = headers or {} self._request_timeout = http_timeout * retries self._retries = retries self._auto_login = auto_login self._redirects = redirects self._http_timeout = http_timeout self._client_conn = client_conn self._request_error = None if "User-Agent" not in self._headers: self._headers["User-Agent"] = USER_AGENT self._green_thread = None # Retrieve and store this instance's unique request id. self._request_id = EventletApiRequest.CURRENT_REQUEST_ID # Update the class variable that tracks request id. # Request IDs wrap around at MAXIMUM_REQUEST_ID next_request_id = self._request_id + 1 next_request_id %= self.MAXIMUM_REQUEST_ID EventletApiRequest.CURRENT_REQUEST_ID = next_request_id @classmethod def _spawn(cls, func, *args, **kwargs): '''Allocate a green thread from the class pool.''' return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs) def spawn(self, func, *args, **kwargs): '''Spawn a new green thread with the supplied function and args.''' return self.__class__._spawn(func, *args, **kwargs) @classmethod def joinall(cls): '''Wait for all outstanding requests to complete.''' return cls.API_REQUEST_POOL.waitall() def join(self): '''Wait for instance green thread to complete.''' if self._green_thread is not None: return self._green_thread.wait() return Exception(_('Joining an invalid green thread')) def start(self): '''Start request processing.''' self._green_thread = self.spawn(self._run) def copy(self): '''Return a copy of this request instance.''' return EventletApiRequest( self._api_client, self._url, self._method, self._body, self._headers, self._retries, self._auto_login, self._redirects, self._http_timeout) def _run(self): '''Method executed within green thread.''' if self._request_timeout: # No timeout exception escapes the with block. with eventlet.timeout.Timeout(self._request_timeout, False): return self._handle_request() LOG.info('[%d] Request timeout.', self._rid()) self._request_error = Exception(_('Request timeout')) return None else: return self._handle_request() def _handle_request(self): '''First level request handling.''' attempt = 0 timeout = 0 response = None while response is None and attempt <= self._retries: eventlet.greenthread.sleep(timeout) attempt += 1 req = self._issue_request() # automatically raises any exceptions returned. if isinstance(req, httplib.HTTPResponse): timeout = 0 if attempt <= self._retries: if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN): continue elif req.status == httplib.SERVICE_UNAVAILABLE: timeout = 0.5 continue # else fall through to return the error code LOG.debug("[%(rid)d] Completed request '%(method)s %(url)s'" ": %(status)s", {'rid': self._rid(), 'method': self._method, 'url': self._url, 'status': req.status}) self._request_error = None response = req else: LOG.info('[%(rid)d] Error while handling request: ' '%(req)s', {'rid': self._rid(), 'req': req}) self._request_error = req response = None return response class LoginRequestEventlet(EventletApiRequest): '''Process a login request.''' def __init__(self, client_obj, user, password, client_conn=None, headers=None): if headers is None: headers = {} headers.update({"Content-Type": "application/x-www-form-urlencoded"}) body = urllib.parse.urlencode({"username": user, "password": password}) super(LoginRequestEventlet, self).__init__( client_obj, "/ws.v1/login", "POST", body, headers, auto_login=False, client_conn=client_conn) def session_cookie(self): if self.successful(): return self.value.getheader("Set-Cookie") return None class GetApiProvidersRequestEventlet(EventletApiRequest): '''Get a list of API providers.''' def __init__(self, client_obj): url = "/ws.v1/control-cluster/node?fields=roles" super(GetApiProvidersRequestEventlet, self).__init__( client_obj, url, "GET", auto_login=True) def api_providers(self): """Parse api_providers from response. Returns: api_providers in [(host, port, is_ssl), ...] format """ def _provider_from_listen_addr(addr): # (pssl|ptcp):: => (host, port, is_ssl) parts = addr.split(':') return (parts[1], int(parts[2]), parts[0] == 'pssl') try: if self.successful(): ret = [] body = jsonutils.loads(self.value.body) for node in body.get('results', []): for role in node.get('roles', []): if role.get('role') == 'api_provider': addr = role.get('listen_addr') if addr: ret.append(_provider_from_listen_addr(addr)) return ret except Exception as e: LOG.warning("[%(rid)d] Failed to parse API provider: %(e)s", {'rid': self._rid(), 'e': e}) # intentionally fall through return None class GenericRequestEventlet(EventletApiRequest): '''Handle a generic request.''' def __init__(self, client_obj, method, url, body, content_type, auto_login=False, http_timeout=request.DEFAULT_HTTP_TIMEOUT, retries=request.DEFAULT_RETRIES, redirects=request.DEFAULT_REDIRECTS): headers = {"Content-Type": content_type} super(GenericRequestEventlet, self).__init__( client_obj, url, method, body, headers, retries=retries, auto_login=auto_login, redirects=redirects, http_timeout=http_timeout) def session_cookie(self): if self.successful(): return self.value.getheader("Set-Cookie") return None request.ApiRequest.register(EventletApiRequest) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_client/exception.py0000644000175000017500000000614300000000000024053 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from vmware_nsx._i18n import _ class NsxApiException(Exception): """Base NSX API Client Exception. To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") def __init__(self, **kwargs): try: self._error_string = self.message % kwargs except Exception: # at least get the core message out if something happened self._error_string = self.message def __str__(self): return self._error_string class UnAuthorizedRequest(NsxApiException): message = _("Server denied session's authentication credentials.") class ResourceNotFound(NsxApiException): message = _("An entity referenced in the request was not found.") class Conflict(NsxApiException): message = _("Request conflicts with configuration on a different " "entity.") class ServiceUnavailable(NsxApiException): message = _("Request could not completed because the associated " "resource could not be reached.") class Forbidden(NsxApiException): message = _("The request is forbidden from accessing the " "referenced resource.") class ReadOnlyMode(Forbidden): message = _("Create/Update actions are forbidden when in read-only mode.") class RequestTimeout(NsxApiException): message = _("The request has timed out.") class BadRequest(NsxApiException): message = _("The server is unable to fulfill the request due " "to a bad syntax") class InvalidSecurityCertificate(BadRequest): message = _("The backend received an invalid security certificate.") def fourZeroZero(response=None): if response and "Invalid SecurityCertificate" in response.body: raise InvalidSecurityCertificate() raise BadRequest() def fourZeroFour(response=None): raise ResourceNotFound() def fourZeroNine(response=None): raise Conflict() def fiveZeroThree(response=None): raise ServiceUnavailable() def fourZeroThree(response=None): if 'read-only' in response.body: raise ReadOnlyMode() else: raise Forbidden() def zero(self, response=None): raise NsxApiException() ERROR_MAPPINGS = { 400: fourZeroZero, 404: fourZeroFour, 405: zero, 409: fourZeroNine, 503: fiveZeroThree, 403: fourZeroThree, 301: zero, 307: zero, 500: zero, 501: zero } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_client/request.py0000644000175000017500000002673000000000000023551 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc import copy import socket import time import eventlet from oslo_log import log as logging from oslo_utils import excutils import six from six.moves import http_client as httplib from six.moves import urllib from vmware_nsx._i18n import _ from vmware_nsx import api_client LOG = logging.getLogger(__name__) DEFAULT_HTTP_TIMEOUT = 30 DEFAULT_RETRIES = 2 DEFAULT_REDIRECTS = 2 DEFAULT_API_REQUEST_POOL_SIZE = 1000 DEFAULT_MAXIMUM_REQUEST_ID = 4294967295 DOWNLOAD_TIMEOUT = 180 @six.add_metaclass(abc.ABCMeta) class ApiRequest(object): '''An abstract baseclass for all ApiRequest implementations. This defines the interface and property structure for both eventlet and gevent-based ApiRequest classes. ''' # List of allowed status codes. ALLOWED_STATUS_CODES = [ httplib.OK, httplib.CREATED, httplib.NO_CONTENT, httplib.MOVED_PERMANENTLY, httplib.TEMPORARY_REDIRECT, httplib.BAD_REQUEST, httplib.UNAUTHORIZED, httplib.FORBIDDEN, httplib.NOT_FOUND, httplib.CONFLICT, httplib.INTERNAL_SERVER_ERROR, httplib.SERVICE_UNAVAILABLE ] @abc.abstractmethod def start(self): pass @abc.abstractmethod def join(self): pass @abc.abstractmethod def copy(self): pass def _issue_request(self): '''Issue a request to a provider.''' conn = (self._client_conn or self._api_client.acquire_connection(True, copy.copy(self._headers), rid=self._rid())) if conn is None: error = Exception(_("No API connections available")) self._request_error = error return error url = self._url LOG.debug("[%(rid)d] Issuing - request url: %(conn)s " "body: %(body)s", {'rid': self._rid(), 'conn': self._request_str(conn, url), 'body': self._body}) issued_time = time.time() is_conn_error = False is_conn_service_unavail = False response = None try: redirects = 0 while (redirects <= self._redirects): # Update connection with user specified request timeout, # the connect timeout is usually smaller so we only set # the request timeout after a connection is established if conn.sock is None: conn.connect() conn.sock.settimeout(self._http_timeout) elif conn.sock.gettimeout() != self._http_timeout: conn.sock.settimeout(self._http_timeout) headers = copy.copy(self._headers) cookie = self._api_client.auth_cookie(conn) if cookie: headers["Cookie"] = cookie gen = self._api_client.config_gen if gen: headers["X-Nvp-Wait-For-Config-Generation"] = gen LOG.debug("Setting X-Nvp-Wait-For-Config-Generation " "request header: '%s'", gen) try: conn.request(self._method, url, self._body, headers) except Exception as e: with excutils.save_and_reraise_exception(): LOG.warning("[%(rid)d] Exception issuing request: " "%(e)s", {'rid': self._rid(), 'e': e}) response = conn.getresponse() response.body = response.read() response.headers = response.getheaders() elapsed_time = time.time() - issued_time LOG.debug("[%(rid)d] Completed request '%(conn)s': " "%(status)s (%(elapsed)s seconds)", {'rid': self._rid(), 'conn': self._request_str(conn, url), 'status': response.status, 'elapsed': elapsed_time}) new_gen = response.getheader('X-Nvp-Config-Generation', None) if new_gen: LOG.debug("Reading X-Nvp-config-Generation response " "header: '%s'", new_gen) if (self._api_client.config_gen is None or self._api_client.config_gen < int(new_gen)): self._api_client.config_gen = int(new_gen) if response.status == httplib.UNAUTHORIZED: # If request is unauthorized, clear the session cookie # for the current provider so that subsequent requests # to the same provider triggers re-authentication. self._api_client.set_auth_cookie(conn, None) elif response.status == httplib.SERVICE_UNAVAILABLE: is_conn_service_unavail = True if response.status not in [httplib.MOVED_PERMANENTLY, httplib.TEMPORARY_REDIRECT]: break elif redirects >= self._redirects: LOG.info("[%d] Maximum redirects exceeded, aborting " "request", self._rid()) break redirects += 1 conn, url = self._redirect_params(conn, response.headers, self._client_conn is None) if url is None: response.status = httplib.INTERNAL_SERVER_ERROR break LOG.info("[%(rid)d] Redirecting request to: %(conn)s", {'rid': self._rid(), 'conn': self._request_str(conn, url)}) # yield here, just in case we are not out of the loop yet eventlet.greenthread.sleep(0) # If we receive any of these responses, then # our server did not process our request and may be in an # errored state. Raise an exception, which will cause the # conn to be released with is_conn_error == True # which puts the conn on the back of the client's priority # queue. if (response.status == httplib.INTERNAL_SERVER_ERROR and response.status > httplib.NOT_IMPLEMENTED): LOG.warning("[%(rid)d] Request '%(method)s %(url)s' " "received: %(status)s", {'rid': self._rid(), 'method': self._method, 'url': self._url, 'status': response.status}) raise Exception(_('Server error return: %s'), response.status) return response except socket.error: is_conn_service_unavail = True except Exception as e: if isinstance(e, httplib.BadStatusLine): msg = (_("Invalid server response")) else: msg = str(e) if response is None: elapsed_time = time.time() - issued_time LOG.warning("[%(rid)d] Failed request '%(conn)s': '%(msg)s' " "(%(elapsed)s seconds)", {'rid': self._rid(), 'conn': self._request_str(conn, url), 'msg': msg, 'elapsed': elapsed_time}) self._request_error = e is_conn_error = True return e finally: # Make sure we release the original connection provided by the # acquire_connection() call above. if self._client_conn is None: self._api_client.release_connection(conn, is_conn_error, is_conn_service_unavail, rid=self._rid()) def _redirect_params(self, conn, headers, allow_release_conn=False): """Process redirect response, create new connection if necessary. Args: conn: connection that returned the redirect response headers: response headers of the redirect response allow_release_conn: if redirecting to a different server, release existing connection back to connection pool. Returns: Return tuple(conn, url) where conn is a connection object to the redirect target and url is the path of the API request """ url = None for name, value in headers: if name.lower() == "location": url = value break if not url: LOG.warning("[%d] Received redirect status without location " "header field", self._rid()) return (conn, None) # Accept location with the following format: # 1. /path, redirect to same node # 2. scheme://hostname:[port]/path where scheme is https or http # Reject others # 3. e.g. relative paths, unsupported scheme, unspecified host result = urllib.parse.urlparse(url) if not result.scheme and not result.hostname and result.path: if result.path[0] == "/": if result.query: url = "%s?%s" % (result.path, result.query) else: url = result.path return (conn, url) # case 1 else: LOG.warning("[%(rid)d] Received invalid redirect " "location: '%(url)s'", {'rid': self._rid(), 'url': url}) return (conn, None) # case 3 elif result.scheme not in ["http", "https"] or not result.hostname: LOG.warning("[%(rid)d] Received malformed redirect " "location: %(url)s", {'rid': self._rid(), 'url': url}) return (conn, None) # case 3 # case 2, redirect location includes a scheme # so setup a new connection and authenticate if allow_release_conn: self._api_client.release_connection(conn) conn_params = (result.hostname, result.port, result.scheme == "https") conn = self._api_client.acquire_redirect_connection(conn_params, True, self._headers) if result.query: url = "%s?%s" % (result.path, result.query) else: url = result.path return (conn, url) def _rid(self): '''Return current request id.''' return self._request_id @property def request_error(self): '''Return any errors associated with this instance.''' return self._request_error def _request_str(self, conn, url): '''Return string representation of connection.''' return "%s %s/%s" % (self._method, api_client.ctrl_conn_to_str(conn), url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_client/version.py0000644000175000017500000000263400000000000023543 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging LOG = logging.getLogger(__name__) def find_version(headers): """Retrieve NSX controller version from response headers.""" for (header_name, header_value) in (headers or ()): try: if header_name == 'server': return Version(header_value.split('/')[1]) except IndexError: LOG.warning("Unable to fetch NSX version from response " "headers :%s", headers) class Version(object): """Abstracts NSX version by exposing major and minor.""" def __init__(self, version): self.full_version = version.split('.') self.major = int(self.full_version[0]) self.minor = int(self.full_version[1]) def __str__(self): return '.'.join(self.full_version) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1822534 vmware-nsx-15.0.1.dev143/vmware_nsx/api_replay/0000755000175000017500000000000000000000000021515 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_replay/__init__.py0000644000175000017500000000000000000000000023614 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_replay/cli.py0000644000175000017500000001560100000000000022641 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse from vmware_nsx.api_replay import client DEFAULT_DOMAIN_ID = 'default' DEFAULT_LOGFILE = 'nsx_migration.log' class ApiReplayCli(object): def __init__(self): args = self._setup_argparse() client.ApiReplayClient( source_os_tenant_name=args.source_os_project_name, source_os_tenant_domain_id=args.source_os_project_domain_id, source_os_username=args.source_os_username, source_os_user_domain_id=args.source_os_user_domain_id, source_os_password=args.source_os_password, source_os_auth_url=args.source_os_auth_url, dest_os_tenant_name=args.dest_os_project_name, dest_os_tenant_domain_id=args.dest_os_project_domain_id, dest_os_username=args.dest_os_username, dest_os_user_domain_id=args.dest_os_user_domain_id, dest_os_password=args.dest_os_password, dest_os_auth_url=args.dest_os_auth_url, dest_plugin=args.dest_plugin, use_old_keystone=args.use_old_keystone, octavia_os_tenant_name=args.octavia_os_project_name, octavia_os_tenant_domain_id=args.octavia_os_project_domain_id, octavia_os_username=args.octavia_os_username, octavia_os_user_domain_id=args.octavia_os_user_domain_id, octavia_os_password=args.octavia_os_password, octavia_os_auth_url=args.octavia_os_auth_url, neutron_conf=args.neutron_conf, ext_net_map=args.external_networks_map, logfile=args.logfile, max_retry=args.max_retry) def _setup_argparse(self): parser = argparse.ArgumentParser() # Arguments required to connect to source # neutron which we will fetch all of the data from. parser.add_argument( "--source-os-username", required=True, help="The source os-username to use to " "gather neutron resources with.") parser.add_argument( "--source-os-user-domain-id", default=DEFAULT_DOMAIN_ID, help="The source os-user-domain-id to use to " "gather neutron resources with.") parser.add_argument( "--source-os-project-name", required=True, help="The source os-project-name to use to " "gather neutron resource with.") parser.add_argument( "--source-os-project-domain-id", default=DEFAULT_DOMAIN_ID, help="The source os-project-domain-id to use to " "gather neutron resource with.") parser.add_argument( "--source-os-password", required=True, help="The password for this user.") parser.add_argument( "--source-os-auth-url", required=True, help="They keystone api endpoint for this user.") # Arguments required to connect to the dest neutron which # we will recreate all of these resources over. parser.add_argument( "--dest-os-username", required=True, help="The dest os-username to use to" "gather neutron resources with.") parser.add_argument( "--dest-os-user-domain-id", default=DEFAULT_DOMAIN_ID, help="The dest os-user-domain-id to use to" "gather neutron resources with.") parser.add_argument( "--dest-os-project-name", required=True, help="The dest os-project-name to use to " "gather neutron resource with.") parser.add_argument( "--dest-os-project-domain-id", default=DEFAULT_DOMAIN_ID, help="The dest os-project-domain-id to use to " "gather neutron resource with.") parser.add_argument( "--dest-os-password", required=True, help="The password for this user.") parser.add_argument( "--dest-os-auth-url", required=True, help="The keystone api endpoint for this user.") parser.add_argument( "--dest-plugin", default='nsx-p', help="The core plugin of the destination nsx-t/nsx-p.") parser.add_argument( "--use-old-keystone", default=False, action='store_true', help="Use old keystone client for source authentication.") # Arguments required to connect to the octavia client (read only) parser.add_argument( "--octavia-os-username", help="The octavia os-username to use to " "gather loadbalancers resources with.") parser.add_argument( "--octavia-os-user-domain-id", default=DEFAULT_DOMAIN_ID, help="The octavia os-user-domain-id to use to " "gather loadbalancers resources with.") parser.add_argument( "--octavia-os-project-name", help="The octavia os-project-name to use to " "gather loadbalancers resource with.") parser.add_argument( "--octavia-os-project-domain-id", default=DEFAULT_DOMAIN_ID, help="The octavia os-project-domain-id to use to " "gather loadbalancers resource with.") parser.add_argument( "--octavia-os-password", help="The password for this octavia user.") parser.add_argument( "--octavia-os-auth-url", help="They keystone api endpoint for this octavia user.") parser.add_argument( "--logfile", default=DEFAULT_LOGFILE, help="Output logfile.") parser.add_argument( "--neutron_conf", default='/etc/neutron/neutron.conf', help="neutron config file path.") parser.add_argument( "--external-networks-map", help="Path to a json file mapping external network neutron ID " "to tier0 ID.") parser.add_argument( "--max-retry", default=10, help="Maximum number of retrying different operations.") # NOTE: this will return an error message if any of the # require options are missing. return parser.parse_args() def main(): ApiReplayCli() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_replay/client.py0000644000175000017500000012530500000000000023353 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import logging import socket import six from keystoneauth1 import identity from keystoneauth1 import session from neutronclient.common import exceptions as n_exc from neutronclient.v2_0 import client from octaviaclient.api.v2 import octavia from oslo_config import cfg import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_serialization import jsonutils from oslo_utils import excutils from neutron.common import config as neutron_config from octavia_lib.api.drivers import driver_lib from vmware_nsx.api_replay import utils from vmware_nsx.common import nsxv_constants from vmware_nsx.services.lbaas.octavia import constants as d_const LOG = logging.getLogger(__name__) LOG.setLevel(logging.INFO) # For internal testing only use_old_keystone_on_dest = False class ApiReplayClient(utils.PrepareObjectForMigration): def __init__(self, source_os_username, source_os_user_domain_id, source_os_tenant_name, source_os_tenant_domain_id, source_os_password, source_os_auth_url, dest_os_username, dest_os_user_domain_id, dest_os_tenant_name, dest_os_tenant_domain_id, dest_os_password, dest_os_auth_url, dest_plugin, use_old_keystone, octavia_os_username, octavia_os_user_domain_id, octavia_os_tenant_name, octavia_os_tenant_domain_id, octavia_os_password, octavia_os_auth_url, neutron_conf, ext_net_map, logfile, max_retry): # Init config and logging if neutron_conf: neutron_config.init(args=['--config-file', neutron_conf]) if logfile: f_handler = logging.FileHandler(logfile) f_formatter = logging.Formatter( '%(asctime)s %(levelname)s %(message)s') f_handler.setFormatter(f_formatter) LOG.addHandler(f_handler) self.max_retry = max_retry # connect to both clients if use_old_keystone: LOG.info("Using old keystone for source neutron") # Since we are not sure what keystone version will be used on the # source setup, we add an option to use the v2 client self.source_neutron = client.Client( username=source_os_username, tenant_name=source_os_tenant_name, password=source_os_password, auth_url=source_os_auth_url) else: self.source_neutron = self.connect_to_client( username=source_os_username, user_domain_id=source_os_user_domain_id, tenant_name=source_os_tenant_name, tenant_domain_id=source_os_tenant_domain_id, password=source_os_password, auth_url=source_os_auth_url) if use_old_keystone_on_dest: LOG.info("Using old keystone for destination neutron") self.dest_neutron = client.Client( username=dest_os_username, tenant_name=dest_os_tenant_name, password=dest_os_password, auth_url=dest_os_auth_url) else: self.dest_neutron = self.connect_to_client( username=dest_os_username, user_domain_id=dest_os_user_domain_id, tenant_name=dest_os_tenant_name, tenant_domain_id=dest_os_tenant_domain_id, password=dest_os_password, auth_url=dest_os_auth_url) if octavia_os_auth_url: self.octavia = self.connect_to_octavia( username=octavia_os_username, user_domain_id=octavia_os_user_domain_id, tenant_name=octavia_os_tenant_name, tenant_domain_id=octavia_os_tenant_domain_id, password=octavia_os_password, auth_url=octavia_os_auth_url) else: self.octavia = None self.dest_plugin = dest_plugin if ext_net_map: with open(ext_net_map, 'r') as myfile: data = myfile.read() self.ext_net_map = jsonutils.loads(data) else: self.ext_net_map = None LOG.info("Starting NSX migration to %s.", self.dest_plugin) # Migrate all the objects self.migrate_security_groups() self.migrate_qos_policies() routers_routes, routers_gw_info = self.migrate_routers() self.migrate_networks_subnets_ports(routers_gw_info) self.migrate_floatingips() self.migrate_routers_routes(routers_routes) self.migrate_fwaas() if self.octavia: self.migrate_octavia() LOG.info("NSX migration is Done.") def _get_session(self, username, user_domain_id, tenant_name, tenant_domain_id, password, auth_url): auth = identity.Password(username=username, user_domain_id=user_domain_id, password=password, project_name=tenant_name, project_domain_id=tenant_domain_id, auth_url=auth_url) return session.Session(auth=auth) def connect_to_client(self, username, user_domain_id, tenant_name, tenant_domain_id, password, auth_url): sess = self._get_session(username, user_domain_id, tenant_name, tenant_domain_id, password, auth_url) neutron = client.Client(session=sess) return neutron def connect_to_octavia(self, username, user_domain_id, tenant_name, tenant_domain_id, password, auth_url): sess = self._get_session(username, user_domain_id, tenant_name, tenant_domain_id, password, auth_url) endpoint = sess.get_endpoint(service_type='load-balancer') client = octavia.OctaviaAPI( session=sess, service_type='load-balancer', endpoint=endpoint, ) return client def find_subnet_by_id(self, subnet_id, subnets): for subnet in subnets: if subnet['id'] == subnet_id: return subnet def get_ports_on_network(self, network_id, ports): """Returns all the ports on a given network_id.""" ports_on_network = [] for port in ports: if port['network_id'] == network_id: ports_on_network.append(port) return ports_on_network def have_id(self, id, groups): """If the sg_id is in groups return true else false.""" for group in groups: if id == group['id']: return group return False def migrate_qos_rule(self, dest_policy, source_rule): """Add the QoS rule from the source to the QoS policy If there is already a rule of that type, skip it since the QoS policy can have only one rule of each type """ #TODO(asarfaty) also take rule direction into account once #ingress support is upstream rule_type = source_rule.get('type') dest_rules = dest_policy.get('rules') if dest_rules: for dest_rule in dest_rules: if dest_rule['type'] == rule_type: return pol_id = dest_policy['id'] body = self.prepare_qos_rule(source_rule) try: if rule_type == 'bandwidth_limit': rule = self.dest_neutron.create_bandwidth_limit_rule( pol_id, body={'bandwidth_limit_rule': body}) elif rule_type == 'dscp_marking': rule = self.dest_neutron.create_dscp_marking_rule( pol_id, body={'dscp_marking_rule': body}) else: LOG.info("QoS rule type %(rule)s is not supported for policy " "%(pol)s", {'rule': rule_type, 'pol': pol_id}) LOG.info("created QoS policy %s rule %s", pol_id, rule) except Exception as e: LOG.error("Failed to create QoS rule for policy %(pol)s: %(e)s", {'pol': pol_id, 'e': e}) def migrate_qos_policies(self): """Migrates QoS policies from source to dest neutron.""" # first fetch the QoS policies from both the # source and destination neutron server try: dest_qos_pols = self.dest_neutron.list_qos_policies()['policies'] except n_exc.NotFound: # QoS disabled on dest LOG.info("QoS is disabled on destination: ignoring QoS policies") self.dest_qos_support = False return self.dest_qos_support = True try: source_qos_pols = self.source_neutron.list_qos_policies()[ 'policies'] except n_exc.NotFound: # QoS disabled on source return for pol in source_qos_pols: dest_pol = self.have_id(pol['id'], dest_qos_pols) # If the policy already exists on the dest_neutron if dest_pol: # make sure all the QoS policy rules are there and # create them if not for qos_rule in pol['rules']: self.migrate_qos_rule(dest_pol, qos_rule) # dest server doesn't have the group so we create it here. else: qos_rules = pol.pop('rules') try: body = self.prepare_qos_policy(pol) new_pol = self.dest_neutron.create_qos_policy( body={'policy': body}) except Exception as e: LOG.error("Failed to create QoS policy %(pol)s: %(e)s", {'pol': pol['id'], 'e': e}) continue else: LOG.info("Created QoS policy %s", new_pol) for qos_rule in qos_rules: self.migrate_qos_rule(new_pol['policy'], qos_rule) def migrate_security_groups(self): """Migrates security groups from source to dest neutron.""" # first fetch the security groups from both the # source and dest neutron server source_sec_groups = self.source_neutron.list_security_groups() dest_sec_groups = self.dest_neutron.list_security_groups() source_sec_groups = source_sec_groups['security_groups'] dest_sec_groups = dest_sec_groups['security_groups'] total_num = len(source_sec_groups) LOG.info("Migrating %s security groups", total_num) for count, sg in enumerate(source_sec_groups, 1): dest_sec_group = self.have_id(sg['id'], dest_sec_groups) # If the security group already exists on the dest_neutron if dest_sec_group: # make sure all the security group rules are there and # create them if not for sg_rule in sg['security_group_rules']: if(self.have_id(sg_rule['id'], dest_sec_group['security_group_rules']) is False): try: body = self.prepare_security_group_rule(sg_rule) self.dest_neutron.create_security_group_rule( {'security_group_rule': body}) except n_exc.Conflict: # NOTE(arosen): when you create a default # security group it is automatically populated # with some rules. When we go to create the rules # that already exist because of a match an error # is raised here but that's okay. pass # dest server doesn't have the group so we create it here. else: sg_rules = sg.pop('security_group_rules') try: body = self.prepare_security_group(sg) new_sg = self.dest_neutron.create_security_group( {'security_group': body}) LOG.info("Created security-group %(count)s/%(total)s: " "%(sg)s", {'count': count, 'total': total_num, 'sg': new_sg}) except Exception as e: LOG.error("Failed to create security group (%(sg)s): " "%(e)s", {'sg': sg, 'e': e}) # Note - policy security groups will have no rules, and will # be created on the destination with the default rules only for sg_rule in sg_rules: try: body = self.prepare_security_group_rule(sg_rule) rule = self.dest_neutron.create_security_group_rule( {'security_group_rule': body}) LOG.debug("created security group rule %s", rule['id']) except Exception: # NOTE(arosen): when you create a default # security group it is automatically populated # with some rules. When we go to create the rules # that already exist because of a match an error # is raised here but that's okay. pass def get_dest_availablity_zones(self, resource): azs = self.dest_neutron.list_availability_zones()['availability_zones'] az_names = [az['name'] for az in azs if az['resource'] == resource] return az_names def migrate_routers(self): """Migrates routers from source to dest neutron. Also return a dictionary of the routes that should be added to each router. Static routes must be added later, after the router ports are set. And return a dictionary of external gateway info per router """ try: source_routers = self.source_neutron.list_routers()['routers'] except Exception: # L3 might be disabled in the source source_routers = [] dest_routers = self.dest_neutron.list_routers()['routers'] dest_azs = self.get_dest_availablity_zones('router') update_routes = {} gw_info = {} total_num = len(source_routers) LOG.info("Migrating %s routers", total_num) for count, router in enumerate(source_routers, 1): if router.get('routes'): update_routes[router['id']] = router['routes'] if router.get('external_gateway_info'): gw_info[router['id']] = router['external_gateway_info'] # Ignore internal NSXV objects if router['project_id'] == nsxv_constants.INTERNAL_TENANT_ID: LOG.info("Skip router %s: Internal NSX-V router", router['id']) continue dest_router = self.have_id(router['id'], dest_routers) if dest_router is False: body = self.prepare_router(router, dest_azs=dest_azs) try: new_router = (self.dest_neutron.create_router( {'router': body})) LOG.info("created router %(count)s/%(total)s: %(rtr)s", {'count': count, 'total': total_num, 'rtr': new_router}) except Exception as e: LOG.error("Failed to create router %(rtr)s: %(e)s", {'rtr': router, 'e': e}) return update_routes, gw_info def migrate_routers_routes(self, routers_routes): """Add static routes to the created routers.""" total_num = len(routers_routes) LOG.info("Migrating %s routers routes", total_num) for count, (router_id, routes) in enumerate( six.iteritems(routers_routes), 1): try: self.dest_neutron.update_router(router_id, {'router': {'routes': routes}}) LOG.info("Added routes to router %(rtr)s %(count)s/%(total)s:", {'count': count, 'total': total_num, 'rtr': router_id}) except Exception as e: LOG.error("Failed to add routes %(routes)s to router " "%(rtr)s: %(e)s", {'routes': routes, 'rtr': router_id, 'e': e}) def migrate_subnetpools(self): subnetpools_map = {} try: source_subnetpools = self.source_neutron.list_subnetpools()[ 'subnetpools'] except Exception: # pools not supported on source return subnetpools_map dest_subnetpools = self.dest_neutron.list_subnetpools()[ 'subnetpools'] for pool in source_subnetpools: # a default subnetpool (per ip-version) should be unique. # so do not create one if already exists if pool['is_default']: for dpool in dest_subnetpools: if (dpool['is_default'] and dpool['ip_version'] == pool['ip_version']): subnetpools_map[pool['id']] = dpool['id'] break else: old_id = pool['id'] body = self.prepare_subnetpool(pool) if 'default_quota' in body and body['default_quota'] is None: del body['default_quota'] try: new_id = self.dest_neutron.create_subnetpool( {'subnetpool': body})['subnetpool']['id'] subnetpools_map[old_id] = new_id # refresh the list of existing subnetpools dest_subnetpools = self.dest_neutron.list_subnetpools()[ 'subnetpools'] except Exception as e: LOG.error("Failed to create subnetpool %(pool)s: %(e)s", {'pool': pool, 'e': e}) return subnetpools_map def migrate_networks_subnets_ports(self, routers_gw_info): """Migrates networks/ports/router-uplinks from src to dest neutron.""" source_ports = self.source_neutron.list_ports()['ports'] source_subnets = self.source_neutron.list_subnets()['subnets'] source_networks = self.source_neutron.list_networks()['networks'] dest_networks = self.dest_neutron.list_networks()['networks'] dest_ports = self.dest_neutron.list_ports()['ports'] dest_subnets = self.dest_neutron.list_subnets()['subnets'] remove_qos = False if not self.dest_qos_support: remove_qos = True # Find out if the destination already has a default public network dest_default_public_net = False for dest_net in dest_networks: if dest_net.get('is_default') and dest_net.get('router:external'): dest_default_public_net = True subnetpools_map = self.migrate_subnetpools() dest_azs = self.get_dest_availablity_zones('network') total_num = len(source_networks) LOG.info("Migrating %(nets)s networks, %(subnets)s subnets and " "%(ports)s ports", {'nets': total_num, 'subnets': len(source_subnets), 'ports': len(source_ports)}) for count, network in enumerate(source_networks, 1): external_net = network.get('router:external') body = self.prepare_network( network, remove_qos=remove_qos, dest_default_public_net=dest_default_public_net, dest_azs=dest_azs, ext_net_map=self.ext_net_map) # only create network if the dest server doesn't have it if self.have_id(network['id'], dest_networks): LOG.info("Skip network %s: Already exists on the destination", network['id']) continue # Ignore internal NSXV objects if network['project_id'] == nsxv_constants.INTERNAL_TENANT_ID: LOG.info("Skip network %s: Internal NSX-V network", network['id']) continue try: created_net = self.dest_neutron.create_network( {'network': body})['network'] LOG.info("Created network %(count)s/%(total)s: %(net)s", {'count': count, 'total': total_num, 'net': created_net}) except Exception as e: # Print the network and exception to help debugging with excutils.save_and_reraise_exception(): LOG.error("Failed to create network %s", body) LOG.error("Source network: %s", network) raise e subnets_map = {} dhcp_subnets = [] count_dhcp_subnet = 0 for subnet_id in network['subnets']: # only create subnet if the dest server doesn't have it if self.have_id(subnet_id, dest_subnets): LOG.info("Skip network %s: Already exists on the " "destination", network['id']) continue subnet = self.find_subnet_by_id(subnet_id, source_subnets) body = self.prepare_subnet(subnet) # specify the network_id that we just created above body['network_id'] = network['id'] # translate the old subnetpool id to the new one if body.get('subnetpool_id'): body['subnetpool_id'] = subnetpools_map.get( body['subnetpool_id']) # Handle DHCP enabled subnets enable_dhcp = False if body['enable_dhcp']: count_dhcp_subnet = count_dhcp_subnet + 1 # disable dhcp on subnet: we will enable it after creating # all the ports to avoid ip collisions body['enable_dhcp'] = False if count_dhcp_subnet > 1: # Do not allow dhcp on the subnet if there is already # another subnet with DHCP as the v3 plugins supports # only one LOG.warning("Disabling DHCP for subnet on net %s: " "The plugin doesn't support multiple " "subnets with DHCP", network['id']) enable_dhcp = False elif external_net: # Do not allow dhcp on the external subnet LOG.warning("Disabling DHCP for subnet on net %s: " "The plugin doesn't support dhcp on " "external networks", network['id']) enable_dhcp = False else: enable_dhcp = True try: created_subnet = self.dest_neutron.create_subnet( {'subnet': body})['subnet'] LOG.info("Created subnet: %s", created_subnet['id']) subnets_map[subnet_id] = created_subnet['id'] if enable_dhcp: dhcp_subnets.append(created_subnet) except n_exc.BadRequest as e: LOG.error("Failed to create subnet: %(subnet)s: %(e)s", {'subnet': subnet, 'e': e}) # create the ports on the network ports = self.get_ports_on_network(network['id'], source_ports) for port in ports: # Ignore internal NSXV objects if port['project_id'] == nsxv_constants.INTERNAL_TENANT_ID: LOG.info("Skip router %s: Internal NSX-V port", port['id']) continue body = self.prepare_port(port, remove_qos=remove_qos) # specify the network_id that we just created above port['network_id'] = network['id'] subnet_id = None if port.get('fixed_ips'): old_subnet_id = port['fixed_ips'][0]['subnet_id'] subnet_id = subnets_map.get(old_subnet_id) # remove the old subnet id field from fixed_ips dict for fixed_ips in body['fixed_ips']: del fixed_ips['subnet_id'] # only create port if the dest server doesn't have it if self.have_id(port['id'], dest_ports) is False: if port['device_owner'] == 'network:router_gateway': router_id = port['device_id'] enable_snat = True if router_id in routers_gw_info: # keep the original snat status of the router enable_snat = routers_gw_info[router_id].get( 'enable_snat', True) rtr_body = { "external_gateway_info": {"network_id": port['network_id'], "enable_snat": enable_snat, # keep the original GW IP "external_fixed_ips": port.get('fixed_ips')}} try: self.dest_neutron.update_router( router_id, {'router': rtr_body}) LOG.info("Uplinked router %(rtr)s to external " "network %(net)s", {'rtr': router_id, 'net': port['network_id']}) except Exception as e: LOG.error("Failed to add router gateway with port " "(%(port)s): %(e)s", {'port': port, 'e': e}) continue # Let the neutron dhcp-agent recreate this on its own if port['device_owner'] == 'network:dhcp': continue # ignore these as we create them ourselves later if port['device_owner'] == 'network:floatingip': continue if (port['device_owner'] == 'network:router_interface' and subnet_id): try: # uplink router_interface ports by creating the # port, and attaching it to the router router_id = port['device_id'] del body['device_owner'] del body['device_id'] created_port = self.dest_neutron.create_port( {'port': body})['port'] LOG.info("Created interface port %(port)s (subnet " "%(subnet)s, ip %(ip)s, mac %(mac)s)", {'port': created_port['id'], 'subnet': subnet_id, 'ip': created_port['fixed_ips'][0][ 'ip_address'], 'mac': created_port['mac_address']}) self.dest_neutron.add_interface_router( router_id, {'port_id': created_port['id']}) LOG.info("Uplinked router %(rtr)s to network " "%(net)s", {'rtr': router_id, 'net': network['id']}) except Exception as e: # NOTE(arosen): this occurs here if you run the # script multiple times as we don't track this. # Note(asarfaty): also if the same network in # source is attached to 2 routers, which the v3 # plugins does not support. LOG.error("Failed to add router interface port" "(%(port)s): %(e)s", {'port': port, 'e': e}) continue try: created_port = self.dest_neutron.create_port( {'port': body})['port'] except Exception as e: # NOTE(arosen): this occurs here if you run the # script multiple times as we don't track this. LOG.error("Failed to create port (%(port)s) : %(e)s", {'port': port, 'e': e}) else: ip_addr = None if created_port.get('fixed_ips'): ip_addr = created_port['fixed_ips'][0].get( 'ip_address') LOG.info("Created port %(port)s (subnet " "%(subnet)s, ip %(ip)s, mac %(mac)s)", {'port': created_port['id'], 'subnet': subnet_id, 'ip': ip_addr, 'mac': created_port['mac_address']}) # Enable dhcp on the relevant subnets: for subnet in dhcp_subnets: try: self.dest_neutron.update_subnet(subnet['id'], {'subnet': {'enable_dhcp': True}}) except Exception as e: LOG.error("Failed to enable DHCP on subnet %(subnet)s: " "%(e)s", {'subnet': subnet['id'], 'e': e}) def migrate_floatingips(self): """Migrates floatingips from source to dest neutron.""" try: source_fips = self.source_neutron.list_floatingips()['floatingips'] except Exception: # L3 might be disabled in the source source_fips = [] total_num = len(source_fips) for count, source_fip in enumerate(source_fips, 1): body = self.prepare_floatingip(source_fip) try: fip = self.dest_neutron.create_floatingip({'floatingip': body}) LOG.info("Created floatingip %(count)s/%(total)s : %(fip)s", {'count': count, 'total': total_num, 'fip': fip}) except Exception as e: LOG.error("Failed to create floating ip (%(fip)s) : %(e)s", {'fip': source_fip, 'e': e}) def _migrate_fwaas_resource(self, resource_type, source_objects, dest_objects, prepare_method, create_method): total_num = len(source_objects) for count, source_obj in enumerate(source_objects, 1): # Check if the object already exists if self.have_id(source_obj['id'], dest_objects): LOG.info("Skipping %s %s as it already exists on the " "destination server", resource_type, source_obj['id']) continue body = prepare_method(source_obj) try: new_obj = create_method({resource_type: body}) LOG.info("Created %(resource)s %(count)s/%(total)s : %(obj)s", {'resource': resource_type, 'count': count, 'total': total_num, 'obj': new_obj}) except Exception as e: LOG.error("Failed to create %(resource)s (%(obj)s) : %(e)s", {'resource': resource_type, 'obj': source_obj, 'e': e}) def migrate_fwaas(self): """Migrates FWaaS V2 objects from source to dest neutron.""" try: source_rules = self.source_neutron.\ list_fwaas_firewall_rules()['firewall_rules'] source_polices = self.source_neutron.\ list_fwaas_firewall_policies()['firewall_policies'] source_groups = self.source_neutron.\ list_fwaas_firewall_groups()['firewall_groups'] except Exception as e: # FWaaS might be disabled in the source LOG.info("FWaaS V2 was not found on the source server: %s", e) return try: dest_rules = self.dest_neutron.\ list_fwaas_firewall_rules()['firewall_rules'] dest_polices = self.dest_neutron.\ list_fwaas_firewall_policies()['firewall_policies'] dest_groups = self.dest_neutron.\ list_fwaas_firewall_groups()['firewall_groups'] except Exception as e: # FWaaS might be disabled in the destination LOG.warning("Skipping FWaaS V2 migration. FWaaS V2 was not found " "on the destination server: %s", e) return # Migrate all FWaaS objects: self._migrate_fwaas_resource( 'firewall_rule', source_rules, dest_rules, self.prepare_fwaas_rule, self.dest_neutron.create_fwaas_firewall_rule) self._migrate_fwaas_resource( 'firewall_policy', source_polices, dest_polices, self.prepare_fwaas_policy, self.dest_neutron.create_fwaas_firewall_policy) self._migrate_fwaas_resource( 'firewall_group', source_groups, dest_groups, self.prepare_fwaas_group, self.dest_neutron.create_fwaas_firewall_group) LOG.info("FWaaS V2 migration done") def _delete_octavia_lb(self, body): kw = {'loadbalancer': body} self.octavia_rpc_client.call({}, 'loadbalancer_delete_cascade', **kw) def _migrate_octavia_lb(self, lb, orig_map): # Creating all loadbalancers resources on the new nsx driver # using RPC calls to the plugin listener. # Create the loadbalancer: lb_body = self.prepare_lb_loadbalancer(lb) kw = {'loadbalancer': lb_body} if not self.octavia_rpc_client.call({}, 'loadbalancer_create', **kw): LOG.error("Failed to create loadbalancer (%s)", lb_body) self._delete_octavia_lb(lb_body) return lb_id = lb['id'] lb_body_for_deletion = copy.deepcopy(lb_body) lb_body_for_deletion['listeners'] = [] lb_body_for_deletion['pools'] = [] listeners_map = {} for listener_dict in lb.get('listeners', []): listener_id = listener_dict['id'] listener = orig_map['listeners'][listener_id] body = self.prepare_lb_listener(listener, lb_body) body['loadbalancer'] = lb_body body['loadbalancer_id'] = lb_id kw = {'listener': body, 'cert': None} if not self.octavia_rpc_client.call({}, 'listener_create', **kw): LOG.error("Failed to create loadbalancer %(lb)s listener " "(%(list)s)", {'list': listener, 'lb': lb_id}) self._delete_octavia_lb(lb_body_for_deletion) return listeners_map[listener_id] = body lb_body_for_deletion['listeners'].append(body) for pool_dict in lb.get('pools', []): pool_id = pool_dict['id'] pool = orig_map['pools'][pool_id] pool_body = self.prepare_lb_pool(pool, lb_body) # Update listeners in pool if pool.get('listeners'): listener_id = pool['listeners'][0]['id'] pool_body['listener_id'] = listener_id pool_body['listener'] = listeners_map.get(listener_id) kw = {'pool': pool_body} if not self.octavia_rpc_client.call({}, 'pool_create', **kw): LOG.error("Failed to create loadbalancer %(lb)s pool " "(%(pool)s)", {'pool': pool, 'lb': lb_id}) self._delete_octavia_lb(lb_body_for_deletion) return lb_body_for_deletion['pools'].append(pool) # Add members to this pool pool_members = self.octavia.member_list(pool_id)['members'] for member in pool_members: body = self.prepare_lb_member(member, lb_body) if not member['subnet_id']: # Add the loadbalancer subnet body['subnet_id'] = lb_body['vip_subnet_id'] body['pool'] = pool_body kw = {'member': body} if not self.octavia_rpc_client.call({}, 'member_create', **kw): LOG.error("Failed to create pool %(pool)s member " "(%(member)s)", {'member': member, 'pool': pool_id}) self._delete_octavia_lb(lb_body_for_deletion) return # Add pool health monitor if pool.get('healthmonitor_id'): hm_id = pool['healthmonitor_id'] hm = orig_map['hms'][hm_id] body = self.prepare_lb_hm(hm) body['pool'] = pool_body # Update pool id in hm kw = {'healthmonitor': body} if not self.octavia_rpc_client.call( {}, 'healthmonitor_create', **kw): LOG.error("Failed to create pool %(pool)s healthmonitor " "(%(hm)s)", {'hm': hm, 'pool': pool_id}) self._delete_octavia_lb(lb_body_for_deletion) return lb_body_for_deletion['pools'][-1]['healthmonitor'] = body # Add listeners L7 policies for listener_id in listeners_map.keys(): listener = orig_map['listeners'][listener_id] for l7pol_dict in listener.get('l7policies', []): l7_pol_id = l7pol_dict['id'] l7pol = orig_map['l7pols'][l7_pol_id] pol_body = self.prepare_lb_l7policy(l7pol) # Add the rules of this policy source_l7rules = self.octavia.l7rule_list( l7_pol_id)['rules'] for rule in source_l7rules: rule_body = self.prepare_lb_l7rule(rule) pol_body['rules'].append(rule_body) kw = {'l7policy': pol_body} if not self.octavia_rpc_client.call( {}, 'l7policy_create', **kw): LOG.error("Failed to create l7policy (%(l7pol)s)", {'l7pol': l7pol}) self._delete_octavia_lb(lb_body_for_deletion) return LOG.info("Created loadbalancer %s", lb_id) def _map_orig_objects_of_type(self, source_objects): result = {} for obj in source_objects: result[obj['id']] = obj return result def _map_orig_lb_objects(self, source_listeners, source_pools, source_hms, source_l7pols): result = { 'listeners': self._map_orig_objects_of_type(source_listeners), 'pools': self._map_orig_objects_of_type(source_pools), 'hms': self._map_orig_objects_of_type(source_hms), 'l7pols': self._map_orig_objects_of_type(source_l7pols), } return result def migrate_octavia(self): """Migrates Octavia NSX objects to the new neutron driver. The Octavia proccess & DB will remain unchanged. Using RPC connection to connect directly with the new plugin driver. """ # Read all existing octavia resources try: loadbalancers = self.octavia.load_balancer_list()['loadbalancers'] listeners = self.octavia.listener_list()['listeners'] pools = self.octavia.pool_list()['pools'] hms = self.octavia.health_monitor_list()['healthmonitors'] l7pols = self.octavia.l7policy_list()['l7policies'] except Exception as e: # Octavia might be disabled in the source LOG.info("Octavia was not found on the server: %s", e) return # Init the RPC connection for sending messages to the octavia driver topic = d_const.OCTAVIA_TO_DRIVER_MIGRATION_TOPIC transport = messaging.get_rpc_transport(cfg.CONF) target = messaging.Target(topic=topic, exchange="common", namespace='control', fanout=False, version='1.0') self.octavia_rpc_client = messaging.RPCClient(transport, target) # Initialize RPC listener for getting status updates from the driver # so that the rsource status will not change in the octavia DB topic = d_const.DRIVER_TO_OCTAVIA_MIGRATION_TOPIC server = socket.gethostname() target = messaging.Target(topic=topic, server=server, exchange="common", fanout=False) class MigrationOctaviaDriverEndpoint(driver_lib.DriverLibrary): target = messaging.Target(namespace="control", version='1.0') def update_loadbalancer_status(self, **kw): # Do nothing pass endpoints = [MigrationOctaviaDriverEndpoint] access_policy = dispatcher.DefaultRPCAccessPolicy self.octavia_rpc_server = messaging.get_rpc_server( transport, target, endpoints, executor='threading', access_policy=access_policy) self.octavia_rpc_server.start() orig_map = self._map_orig_lb_objects(listeners, pools, hms, l7pols) total_num = len(loadbalancers) LOG.info("Migrating %d loadbalancer(s)", total_num) for lb in loadbalancers: if lb['provisioning_status'] == 'ACTIVE': self._migrate_octavia_lb(lb, orig_map) else: LOG.info("Skipping %s loadbalancer %s", lb['provisioning_status'], lb['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/api_replay/utils.py0000644000175000017500000003225400000000000023235 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from neutron_lib.api import attributes as lib_attrs from oslo_config import cfg from oslo_utils import uuidutils import webob.exc logging.basicConfig(level=logging.INFO) LOG = logging.getLogger(__name__) def _fixup_res_dict(context, attr_name, res_dict, check_allow_post=True): # This method is a replacement of _fixup_res_dict which is used in # neutron_lib.plugin.utils. All this mock does is insert a uuid # for the id field if one is not found ONLY if running in api_replay_mode. if cfg.CONF.api_replay_mode and 'id' not in res_dict: res_dict['id'] = uuidutils.generate_uuid() attr_info = lib_attrs.RESOURCES[attr_name] attr_ops = lib_attrs.AttributeInfo(attr_info) try: attr_ops.populate_project_id(context, res_dict, True) lib_attrs.populate_project_info(attr_info) attr_ops.verify_attributes(res_dict) except webob.exc.HTTPBadRequest as e: # convert webob exception into ValueError as these functions are # for internal use. webob exception doesn't make sense. raise ValueError(e.detail) attr_ops.fill_post_defaults(res_dict, check_allow_post=check_allow_post) attr_ops.convert_values(res_dict) return res_dict class PrepareObjectForMigration(object): """Helper class to modify source objects before creating them in dest""" # Remove some fields before creating the new object. # Some fields are not supported for a new object, and some are not # supported by the destination plugin basic_ignore_fields = ['updated_at', 'created_at', 'tags', 'revision', 'revision_number'] drop_sg_rule_fields = basic_ignore_fields drop_sg_fields = basic_ignore_fields + ['policy'] drop_router_fields = basic_ignore_fields + [ 'status', 'routes', 'ha', 'external_gateway_info', 'router_type', 'availability_zones', 'distributed', 'flavor_id'] drop_subnetpool_fields = basic_ignore_fields + [ 'id', 'ip_version'] drop_subnet_fields = basic_ignore_fields + [ 'advanced_service_providers', 'service_types'] drop_port_fields = basic_ignore_fields + [ 'status', 'binding:vif_details', 'binding:vif_type', 'binding:host_id', 'vnic_index', 'dns_assignment'] drop_network_fields = basic_ignore_fields + [ 'status', 'subnets', 'availability_zones', 'ipv4_address_scope', 'ipv6_address_scope', 'mtu'] drop_fip_fields = basic_ignore_fields + [ 'status', 'router_id', 'id', 'revision'] drop_qos_rule_fields = ['revision', 'type', 'qos_policy_id', 'id'] drop_qos_policy_fields = ['revision'] drop_fwaas_rule_fields = ['firewall_policy_id'] drop_fwaas_policy_fields = [] drop_fwaas_group_fields = ['status'] lb_ignore_fields = ['created_at', 'updated_at', 'operating_status', 'provisioning_status'] drop_lb_loadbalancer_fields = lb_ignore_fields + [ 'listeners', 'pools', # Those objects will be created later 'flavor_id', # not supported by the driver 'vip_qos_policy_id', # not supported by the driver ] drop_lb_listener_fields = lb_ignore_fields + [ 'l7policies', 'default_pool_id'] drop_lb_pool_fields = lb_ignore_fields + [ 'loadbalancers', 'healthmonitor_id', 'listeners', 'members'] drop_lb_member_fields = lb_ignore_fields drop_lb_hm_fields = lb_ignore_fields + ['pools'] drop_lb_l7policy_fields = lb_ignore_fields + ['rules'] drop_lb_l7rule_fields = lb_ignore_fields def drop_fields(self, item, drop_fields): body = {} for k, v in item.items(): if k in drop_fields: continue body[k] = v return body def fix_description(self, body): # neutron doesn't like description being None even though its # what it returns to us. if 'description' in body and body['description'] is None: body['description'] = '' # direct_call arg means that the object is prepared for calling the plugin # create method directly def prepare_security_group_rule(self, sg_rule, direct_call=False): self.fix_description(sg_rule) return self.drop_fields(sg_rule, self.drop_sg_rule_fields) def prepare_security_group(self, sg, direct_call=False): self.fix_description(sg) return self.drop_fields(sg, self.drop_sg_fields) def prepare_router(self, rtr, dest_azs=None, direct_call=False): self.fix_description(rtr) body = self.drop_fields(rtr, self.drop_router_fields) if dest_azs: if body.get('availability_zone_hints'): az = body['availability_zone_hints'][0] if az not in dest_azs: if az != 'default': LOG.warning("Ignoring AZ %s in router %s as it is not " "defined in destination", az, rtr['id']) body['availability_zone_hints'] = [] elif direct_call: body['availability_zone_hints'] = [] return body def prepare_subnetpool(self, pool, direct_call=False): self.fix_description(pool) return self.drop_fields(pool, self.drop_subnetpool_fields) def prepare_network(self, net, dest_default_public_net=True, remove_qos=False, dest_azs=None, direct_call=False, ext_net_map=None): self.fix_description(net) body = self.drop_fields(net, self.drop_network_fields) if remove_qos: body = self.drop_fields(body, ['qos_policy_id']) # neutron doesn't like some fields being None even though its # what it returns to us. for field in ['provider:physical_network', 'provider:segmentation_id']: if field in body and body[field] is None: del body[field] # vxlan network with segmentation id should be translated to a regular # network in nsx-v3/P. if (body.get('provider:network_type') == 'vxlan' and body.get('provider:segmentation_id') is not None): del body['provider:network_type'] del body['provider:segmentation_id'] # flat network should be translated to a regular network in nsx-v3/P. if (body.get('provider:network_type') == 'flat'): del body['provider:network_type'] if 'provider:physical_network' in body: del body['provider:physical_network'] # external networks needs some special care if body.get('router:external'): fields_reset = False # TODO(asarfaty): map external network neutron ids to Policy tier0 for field in ['provider:network_type', 'provider:segmentation_id', 'provider:physical_network']: if field in body: if body[field] is not None: fields_reset = True del body[field] if fields_reset: LOG.warning("Ignoring provider network fields while migrating " "external network %s", body['id']) # Get the tier0 into the physical_network if ext_net_map and body['id'] in ext_net_map: body['provider:physical_network'] = ext_net_map[body['id']] else: LOG.warning("Using default Tier0 as provider:physical_network " "while migrating external network %s", body['id']) if 'provider:physical_network' in body: del body['provider:physical_network'] if body.get('is_default') and dest_default_public_net: body['is_default'] = False LOG.warning("Public network %s was set to non default network", body['id']) if dest_azs: if body.get('availability_zone_hints'): az = body['availability_zone_hints'][0] if az not in dest_azs: if az != 'default': LOG.warning("Ignoring AZ %s in net %s as it is not " "defined in destination", az, body['id']) body['availability_zone_hints'] = [] elif direct_call: body['availability_zone_hints'] = [] return body def prepare_subnet(self, subnet, direct_call=False): self.fix_description(subnet) body = self.drop_fields(subnet, self.drop_subnet_fields) # Drops v6 fields on subnets that are v4 as server doesn't allow them. v6_fields_to_remove = ['ipv6_address_mode', 'ipv6_ra_mode'] if body['ip_version'] == 4: for field in v6_fields_to_remove: if field in body: body.pop(field) return body def prepare_port(self, port, remove_qos=False, direct_call=False): self.fix_description(port) body = self.drop_fields(port, self.drop_port_fields) if remove_qos: body = self.drop_fields(body, ['qos_policy_id']) if 'allowed_address_pairs' in body: if not body['allowed_address_pairs']: # remove allowed_address_pairs if empty: del body['allowed_address_pairs'] else: # remove unsupported allowed_address_pairs for pair in body['allowed_address_pairs']: ip = pair.get('ip_address') if len(ip.split('/')) > 1: LOG.warning("ignoring allowed_address_pair %s for " "port %s as cidr is not supported", pair, port['id']) body['allowed_address_pairs'].remove(pair) # remove port security if mac learning is enabled if (body.get('mac_learning_enabled') and body.get('port_security_enabled')): LOG.warning("Disabling port security of port %s: The plugin " "doesn't support mac learning with port security", body['id']) body['port_security_enabled'] = False body['security_groups'] = [] if direct_call: if 'device_id' not in body: body['device_id'] = "" if 'device_owner' not in body: body['device_owner'] = "" return body def prepare_floatingip(self, fip, direct_call=False): self.fix_description(fip) return self.drop_fields(fip, self.drop_fip_fields) def prepare_qos_rule(self, rule, direct_call=False): self.fix_description(rule) return self.drop_fields(rule, self.drop_qos_rule_fields) def prepare_qos_policy(self, policy, direct_call=False): self.fix_description(policy) return self.drop_fields(policy, self.drop_qos_policy_fields) def prepare_fwaas_rule(self, rule): self.fix_description(rule) return self.drop_fields(rule, self.drop_fwaas_rule_fields) def prepare_fwaas_policy(self, policy): self.fix_description(policy) return self.drop_fields(policy, self.drop_fwaas_policy_fields) def prepare_fwaas_group(self, group): self.fix_description(group) return self.drop_fields(group, self.drop_fwaas_group_fields) def prepare_lb_loadbalancer(self, lb_obj): return self.drop_fields(lb_obj, self.drop_lb_loadbalancer_fields) def prepare_lb_listener(self, listener_obj, lb_body): body = self.drop_fields(listener_obj, self.drop_lb_listener_fields) body['loadbalancer'] = lb_body body['loadbalancer_id'] = lb_body['id'] return body def prepare_lb_pool(self, pool_obj, lb_body): body = self.drop_fields(pool_obj, self.drop_lb_pool_fields) body['loadbalancer'] = lb_body body['loadbalancer_id'] = lb_body['id'] return body def prepare_lb_member(self, mem_obj, lb_body): body = self.drop_fields(mem_obj, self.drop_lb_member_fields) body['loadbalancer'] = lb_body body['loadbalancer_id'] = lb_body['id'] return body def prepare_lb_hm(self, lb_obj): return self.drop_fields(lb_obj, self.drop_lb_hm_fields) def prepare_lb_l7policy(self, lb_obj): body = self.drop_fields(lb_obj, self.drop_lb_l7policy_fields) body['rules'] = [] return body def prepare_lb_l7rule(self, lb_obj): return self.drop_fields(lb_obj, self.drop_lb_l7rule_fields) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1862533 vmware-nsx-15.0.1.dev143/vmware_nsx/common/0000755000175000017500000000000000000000000020660 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/__init__.py0000644000175000017500000000000000000000000022757 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/availability_zones.py0000644000175000017500000001475300000000000025134 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import availability_zone as az_exc from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc DEFAULT_NAME = 'default' class ConfiguredAvailabilityZone(object): def __init__(self, config_line, default_name=DEFAULT_NAME): self.name = "" self.init_defaults() self._is_default = False if config_line and ':' in config_line: # Older configuration - each line contains all the relevant # values for one availability zones, separated by ':' values = config_line.split(':') self.name = values[0] self._validate_zone_name(self.name) self.init_from_config_line(config_line) elif config_line: # Newer configuration - the name of the availability zone can be # used to get the rest of the configuration for this AZ self.name = config_line self._validate_zone_name(config_line) self.init_from_config_section(self.name) else: # Default zone configuration self.name = default_name self._is_default = True def is_default(self): return self._is_default def _validate_zone_name(self, config_line): if len(self.name) > 36: raise nsx_exc.NsxInvalidConfiguration( opt_name="availability_zones", opt_value=config_line, reason=_("Maximum name length is 36")) @abc.abstractmethod def init_from_config_line(self, config_values): pass @abc.abstractmethod def init_from_config_section(self, az_name): pass @abc.abstractmethod def init_defaults(self): pass class ConfiguredAvailabilityZones(object): default_name = DEFAULT_NAME def __init__(self, az_conf, az_class, default_availability_zones=None): self.availability_zones = {} # Add the configured availability zones for az in az_conf: obj = az_class(az) self.availability_zones[obj.name] = obj # add a default entry obj = az_class(None, default_name=self.default_name) self.availability_zones[obj.name] = obj # validate the default az: if default_availability_zones: # we support only 1 default az if len(default_availability_zones) > 1: raise nsx_exc.NsxInvalidConfiguration( opt_name="default_availability_zones", opt_value=default_availability_zones, reason=_("The NSX plugin supports only 1 default AZ")) default_az_name = default_availability_zones[0] if (default_az_name not in self.availability_zones): raise nsx_exc.NsxInvalidConfiguration( opt_name="default_availability_zones", opt_value=default_availability_zones, reason=_("The default AZ is not defined in the NSX " "plugin")) else: self._default_az = self.availability_zones[default_az_name] else: self._default_az = self.availability_zones[self.default_name] def get_availability_zone(self, name): """Return an availability zone object by its name """ if name in self.availability_zones.keys(): return self.availability_zones[name] return self.get_default_availability_zone() def get_default_availability_zone(self): """Return the default availability zone object """ return self._default_az def list_availability_zones(self): """Return a list of availability zones names """ return self.availability_zones.keys() def list_availability_zones_objects(self): """Return a list of availability zones objects """ return self.availability_zones.values() class NSXAvailabilityZonesPluginCommon(object): @abc.abstractmethod def init_availability_zones(self): # To be initialized by the real plugin self._availability_zones_data = None def get_azs_list(self): return self._availability_zones_data.list_availability_zones_objects() def get_azs_names(self): return self._availability_zones_data.list_availability_zones() def validate_obj_azs(self, availability_zones): """Verify that the availability zones exist, and only 1 hint was set. """ # For now we support only 1 hint per network/router # TODO(asarfaty): support multiple hints if len(availability_zones) > 1: err_msg = _("Can't use multiple availability zone hints") raise n_exc.InvalidInput(error_message=err_msg) # check that all hints appear in the predefined list of availability # zones diff = (set(availability_zones) - set(self.get_azs_names())) if diff: raise az_exc.AvailabilityZoneNotFound( availability_zone=diff.pop()) def get_az_by_hint(self, hint): az = self._availability_zones_data.get_availability_zone(hint) if not az: raise az_def.AvailabilityZoneNotFound(availability_zone=hint) return az def get_default_az(self): return self._availability_zones_data.get_default_availability_zone() def get_obj_az_by_hints(self, obj): if az_def.AZ_HINTS in obj: for hint in obj[az_def.AZ_HINTS]: # For now we use only the first hint return self.get_az_by_hint(hint) # return the default return self.get_default_az() def get_network_az(self, network): return self.get_obj_az_by_hints(network) def get_router_az(self, router): return self.get_obj_az_by_hints(router) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/config.py0000644000175000017500000016234300000000000022510 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_config import types from oslo_log import log as logging from neutron.conf.db import l3_hamode_db from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.dvs import dvs_utils from vmware_nsx.extensions import projectpluginmap from vmware_nsx.extensions import routersize LOG = logging.getLogger(__name__) DEFAULT_VDR_TRANSIT_NETWORK = "169.254.2.0/28" DEFAULT_PLR_ADDRESS = "169.254.2.3" class AgentModes(object): AGENT = 'agent' AGENTLESS = 'agentless' COMBINED = 'combined' class MetadataModes(object): DIRECT = 'access_network' INDIRECT = 'dhcp_host_route' class ReplicationModes(object): SERVICE = 'service' SOURCE = 'source' base_opts = [ cfg.IntOpt('max_lp_per_bridged_ls', default=5000, deprecated_group='NVP', help=_("Maximum number of ports of a logical switch on a " "bridged transport zone. The recommended value for " "this parameter varies with NSX version.\nPlease use:\n" "NSX 2.x -> 64\nNSX 3.0, 3.1 -> 5000\n" "NSX 3.2 -> 10000")), cfg.IntOpt('max_lp_per_overlay_ls', default=256, deprecated_group='NVP', help=_("Maximum number of ports of a logical switch on an " "overlay transport zone")), cfg.IntOpt('concurrent_connections', default=10, deprecated_group='NVP', help=_("Maximum concurrent connections to each NSX " "controller.")), cfg.IntOpt('nsx_gen_timeout', default=-1, deprecated_name='nvp_gen_timeout', deprecated_group='NVP', help=_("Number of seconds a generation id should be valid for " "(default -1 meaning do not time out)")), cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT, deprecated_group='NVP', help=_("If set to access_network this enables a dedicated " "connection to the metadata proxy for metadata server " "access via Neutron router. If set to dhcp_host_route " "this enables host route injection via the dhcp agent. " "This option is only useful if running on a host that " "does not support namespaces otherwise access_network " "should be used.")), cfg.StrOpt('default_transport_type', default='stt', deprecated_group='NVP', help=_("The default network tranport type to use (stt, gre, " "bridge, ipsec_gre, or ipsec_stt)")), cfg.StrOpt('agent_mode', default=AgentModes.AGENT, deprecated_group='NVP', help=_("Specifies in which mode the plugin needs to operate " "in order to provide DHCP and metadata proxy services " "to tenant instances. If 'agent' is chosen (default) " "the NSX plugin relies on external RPC agents (i.e. " "dhcp and metadata agents) to provide such services. " "In this mode, the plugin supports API extensions " "'agent' and 'dhcp_agent_scheduler'. If 'agentless' " "is chosen (experimental in Icehouse), the plugin will " "use NSX logical services for DHCP and metadata proxy. " "This simplifies the deployment model for Neutron, in " "that the plugin no longer requires the RPC agents to " "operate. When 'agentless' is chosen, the config option " "metadata_mode becomes ineffective. The 'agentless' " "mode works only on NSX 4.1. Furthermore, a 'combined' " "mode is also provided and is used to support existing " "deployments that want to adopt the agentless mode. " "With this mode, existing networks keep being served by " "the existing infrastructure (thus preserving backward " "compatibility, whereas new networks will be served by " "the new infrastructure. Migration tools are provided " "to 'move' one network from one model to another; with " "agent_mode set to 'combined', option " "'network_auto_schedule' in neutron.conf is ignored, as " "new networks will no longer be scheduled to existing " "dhcp agents.")), cfg.StrOpt('replication_mode', default=ReplicationModes.SERVICE, choices=(ReplicationModes.SERVICE, ReplicationModes.SOURCE), help=_("Specifies which mode packet replication should be done " "in. If set to service a service node is required in " "order to perform packet replication. This can also be " "set to source if one wants replication to be performed " "locally (NOTE: usually only useful for testing if one " "does not want to deploy a service node). In order to " "leverage distributed routers, replication_mode should " "be set to 'service'.")), cfg.FloatOpt('qos_peak_bw_multiplier', default=2.0, min=1.0, help=_("The QoS rules peak bandwidth value will be the " "configured maximum bandwidth of the QoS rule, " "multiplied by this value. Value must be bigger than" " 1")), ] sync_opts = [ cfg.IntOpt('state_sync_interval', default=10, deprecated_group='NVP_SYNC', help=_("Interval in seconds between runs of the status " "synchronization task. The plugin will aim at " "resynchronizing operational status for all resources " "in this interval, and it should be therefore large " "enough to ensure the task is feasible. Otherwise the " "plugin will be constantly synchronizing resource " "status, ie: a new task is started as soon as the " "previous is completed. If this value is set to 0, the " "state synchronization thread for this Neutron instance " "will be disabled.")), cfg.IntOpt('max_random_sync_delay', default=0, deprecated_group='NVP_SYNC', help=_("Random additional delay between two runs of the state " "synchronization task. An additional wait time between " "0 and max_random_sync_delay seconds will be added on " "top of state_sync_interval.")), cfg.IntOpt('min_sync_req_delay', default=1, deprecated_group='NVP_SYNC', help=_("Minimum delay, in seconds, between two status " "synchronization requests for NSX. Depending on chunk " "size, controller load, and other factors, state " "synchronization requests might be pretty heavy. This " "means the controller might take time to respond, and " "its load might be quite increased by them. This " "parameter allows to specify a minimum interval between " "two subsequent requests. The value for this parameter " "must never exceed state_sync_interval. If this does, " "an error will be raised at startup.")), cfg.IntOpt('min_chunk_size', default=500, deprecated_group='NVP_SYNC', help=_("Minimum number of resources to be retrieved from NSX " "in a single status synchronization request. The actual " "size of the chunk will increase if the number of " "resources is such that using the minimum chunk size " "will cause the interval between two requests to be " "less than min_sync_req_delay")), cfg.BoolOpt('always_read_status', default=False, deprecated_group='NVP_SYNC', help=_("Enable this option to allow punctual state " "synchronization on show operations. In this way, show " "operations will always fetch the operational status " "of the resource from the NSX backend, and this might " "have a considerable impact on overall performance.")) ] connection_opts = [ cfg.StrOpt('nsx_user', default='admin', deprecated_name='nvp_user', help=_('User name for NSX controllers in this cluster')), cfg.StrOpt('nsx_password', default='admin', deprecated_name='nvp_password', secret=True, help=_('Password for NSX controllers in this cluster')), cfg.IntOpt('http_timeout', default=75, help=_('Time before aborting a request on an ' 'unresponsive controller (Seconds)')), cfg.IntOpt('retries', default=2, help=_('Maximum number of times a particular request ' 'should be retried')), cfg.IntOpt('redirects', default=2, help=_('Maximum number of times a redirect response ' 'should be followed')), cfg.ListOpt('nsx_controllers', default=[], deprecated_name='nvp_controllers', help=_('Comma-separated list of NSX controller ' 'endpoints (:). When port is omitted, ' '443 is assumed. This option MUST be specified. ' 'e.g.: aa.bb.cc.dd, ee.ff.gg.hh.ee:80')), cfg.IntOpt('conn_idle_timeout', default=900, help=_('Reconnect connection to nsx if not used within this ' 'amount of time.')), ] cluster_opts = [ cfg.StrOpt('default_tz_uuid', help=_("This is uuid of the default NSX Transport zone that " "will be used for creating tunneled isolated " "\"Neutron\" networks. It needs to be created in NSX " "before starting Neutron with the nsx plugin.")), cfg.StrOpt('default_l3_gw_service_uuid', help=_("(Optional) UUID of the NSX L3 Gateway " "service which will be used for implementing routers " "and floating IPs")), cfg.StrOpt('default_l2_gw_service_uuid', help=_("(Optional) UUID of the NSX L2 Gateway service " "which will be used by default for network gateways")), cfg.StrOpt('default_service_cluster_uuid', help=_("(Optional) UUID of the Service Cluster which will " "be used by logical services like dhcp and metadata")), cfg.StrOpt('nsx_default_interface_name', default='breth0', deprecated_name='default_interface_name', help=_("Name of the interface on a L2 Gateway transport node " "which should be used by default when setting up a " "network connection")), ] nsx_common_opts = [ cfg.StrOpt('nsx_l2gw_driver', help=_("Specify the class path for the Layer 2 gateway " "backend driver (i.e. NSX-T/NSX-V). This field will be " "used when a L2 Gateway service plugin is configured.")), cfg.StrOpt('locking_coordinator_url', help=_("(Optional) URL for distributed locking coordination " "resource for lock manager. This value is passed as a " "parameter to tooz coordinator. By default, value is " "None and oslo_concurrency is used for single-node " "lock management.")), cfg.BoolOpt('api_replay_mode', default=False, help=_("If true, the server then allows the caller to " "specify the id of resources. This should only " "be enabled in order to allow one to migrate an " "existing install of neutron to a new VMWare plugin.")), cfg.ListOpt('nsx_extension_drivers', default=[], help=_("An ordered list of extension driver " "entrypoints to be loaded from the " "vmware_nsx.extension_drivers namespace.")), cfg.StrOpt('smtp_gateway', help=_("(Optional) IP address of SMTP gateway to use for" "admin warnings.")), cfg.StrOpt('smtp_from_addr', help=_("(Optional) email address to use for outgoing admin" "notifications.")), cfg.ListOpt('snmp_to_list', default=[], help=_("(Optional) List of email addresses for " "notifications.")), cfg.IntOpt('octavia_stats_interval', default=10, help=_("Interval in seconds for Octavia statistics reporting. " "0 means no reporting")), ] nsx_v3_and_p = [ cfg.ListOpt('nsx_api_user', default=['admin'], help=_('User names for the NSX managers')), cfg.ListOpt('nsx_api_password', default=['default'], secret=True, help=_('Passwords for the NSX managers')), cfg.ListOpt('nsx_api_managers', default=[], help=_("IP address of one or more NSX managers separated " "by commas. The IP address should be of the form:\n" "[://][:]\nIf scheme is not " "provided https is used. If port is not provided port " "80 is used for http and port 443 for https.")), cfg.BoolOpt('nsx_use_client_auth', default=False, help=_("Use client certificate in NSX manager " "authentication")), cfg.StrOpt('nsx_client_cert_file', default='', help=_("File to contain client certificate and private key")), cfg.StrOpt('nsx_client_cert_pk_password', default="", secret=True, help=_("password for private key encryption")), cfg.StrOpt('nsx_client_cert_storage', default='nsx-db', choices=['nsx-db', 'none'], help=_("Storage type for client certificate sensitive data")), cfg.IntOpt('retries', default=10, help=_('Maximum number of times to retry API requests upon ' 'stale revision errors.')), cfg.ListOpt('ca_file', help=_('Specify a CA bundle files to use in verifying the NSX ' 'Managers server certificate. This option is ignored ' 'if "insecure" is set to True. If "insecure" is set to ' 'False and ca_file is unset, the system root CAs will ' 'be used to verify the server certificate.')), cfg.BoolOpt('insecure', default=True, help=_('If true, the NSX Manager server certificate is not ' 'verified. If false the CA bundle specified via ' '"ca_file" will be used or if unsest the default ' 'system root CAs will be used.')), cfg.IntOpt('http_timeout', default=10, help=_('The time in seconds before aborting a HTTP connection ' 'to a NSX manager.')), cfg.IntOpt('http_read_timeout', default=180, help=_('The time in seconds before aborting a HTTP read ' 'response from a NSX manager.')), cfg.IntOpt('http_retries', default=3, help=_('Maximum number of times to retry a HTTP connection.')), cfg.IntOpt('concurrent_connections', default=10, help=_("Maximum concurrent connections to each NSX " "manager.")), cfg.IntOpt('conn_idle_timeout', default=10, help=_("The amount of time in seconds to wait before ensuring " "connectivity to the NSX manager if no manager " "connection has been used.")), cfg.IntOpt('redirects', default=2, help=_('Number of times a HTTP redirect should be followed.')), cfg.BoolOpt('log_security_groups_blocked_traffic', default=False, help=_("(Optional) Indicates whether distributed-firewall " "rule for security-groups blocked traffic is logged.")), cfg.BoolOpt('log_security_groups_allowed_traffic', default=False, help=_("(Optional) Indicates whether distributed-firewall " "security-groups rules are logged.")), cfg.ListOpt('network_vlan_ranges', default=[], help=_("List of :: " "specifying Transport Zone UUID usable for VLAN " "provider networks, as well as ranges of VLAN " "tags on each available for allocation to networks.")), cfg.ListOpt('availability_zones', default=[], help=_('Optional parameter defining the networks availability ' 'zones names for the native dhcp configuration. The ' 'configuration of each zone will be under a group ' 'names [az:]')), cfg.StrOpt('metadata_proxy', help=_("This is the name or UUID of the NSX Metadata Proxy " "that will be used to enable native metadata service. " "It needs to be created in NSX before starting Neutron " "with the NSX plugin.")), cfg.StrOpt('native_metadata_route', default="169.254.169.254/31", help=_("The metadata route used for native metadata proxy " "service.")), cfg.StrOpt('dns_domain', default='openstacklocal', help=_("Domain to use for building the hostnames.")), cfg.ListOpt('nameservers', default=[], help=_("List of nameservers to configure for the DHCP " "binding entries. These will be used if there are no " "nameservers defined on the subnet.")), cfg.StrOpt('edge_cluster', help=_("(Optional) Specifying an edge cluster for Tier1 " "routers to connect other that the one connected to" " the Tier0 router")), cfg.ListOpt('transit_networks', default=['100.64.0.0/16', 'fc3d:e3c3:7b93::/48'], help=_("List of transit networks used by NSX tier0 routers. " "Neutron subnets will not be allowed to use those " "cidrs")), cfg.BoolOpt('init_objects_by_tags', default=False, help=_("When True, the configured transport zones, router and " "profiles will be found by tags on the NSX. The scope " "of the tag will be the value of search_objects_" "scope. The value of the search tag will be the name " "configured in each respective configuration.")), cfg.StrOpt('search_objects_scope', help=_("This is the scope of the tag that will be used for " "finding the objects uuids on the NSX during plugin " "init.")), cfg.IntOpt('dhcp_lease_time', default=86400, help=_("DHCP default lease time.")), cfg.BoolOpt('support_nsx_port_tagging', default=False, help=_("If true, adding neutron tags to ports will also add " "tags on the NSX logical ports. This feature requires " "oslo_messaging_notifications driver to be " "configured.")), ] nsx_v3_opts = nsx_v3_and_p + [ cfg.StrOpt('dhcp_profile', help=_("This is the name or UUID of the NSX DHCP Profile " "that will be used to enable native DHCP service. It " "needs to be created in NSX before starting Neutron " "with the NSX plugin")), cfg.StrOpt('default_overlay_tz', help=_("This is the name or UUID of the default NSX overlay " "transport zone that will be used for creating " "tunneled isolated Neutron networks. It needs to be " "created in NSX before starting Neutron with the NSX " "plugin.")), cfg.StrOpt('default_vlan_tz', help=_("(Optional) Only required when creating VLAN or flat " "provider networks. Name or UUID of default NSX VLAN " "transport zone that will be used for bridging between " "Neutron networks, if no physical network has been " "specified")), cfg.StrOpt('default_bridge_cluster', deprecated_for_removal=True, help=_("(Optional) Name or UUID of the default NSX bridge " "cluster that will be used to perform L2 gateway " "bridging between VXLAN and VLAN networks. If default " "bridge cluster UUID is not specified, admin will have " "to manually create a L2 gateway corresponding to a " "NSX Bridge Cluster using L2 gateway APIs. This field " "must be specified on one of the active neutron " "servers only.")), cfg.StrOpt('default_bridge_endpoint_profile', help=_("(Optional) Name or UUID of the default NSX bridge " "endpoint profile that will be used to perform L2 " "bridging between networks in the NSX fabric and " "VLANs external to NSX. If not specified, operators " "will need to explicitly create a layer-2 gateway in " "Neutron using the L2 gateway APIs.")), cfg.StrOpt('default_tier0_router', help=_("Name or UUID of the default tier0 router that will be " "used for connecting to tier1 logical routers and " "configuring external networks")), cfg.IntOpt('number_of_nested_groups', default=8, help=_("(Optional) The number of nested groups which are used " "by the plugin, each Neutron security-groups is added " "to one nested group, and each nested group can contain " "as maximum as 500 security-groups, therefore, the " "maximum number of security groups that can be created " "is 500 * number_of_nested_groups. The default is 8 " "nested groups, which allows a maximum of 4k " "security-groups, to allow creation of more " "security-groups, modify this figure.")), cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT, help=_("If set to access_network this enables a dedicated " "connection to the metadata proxy for metadata server " "access via Neutron router. If set to dhcp_host_route " "this enables host route injection via the dhcp agent. " "This option is only useful if running on a host that " "does not support namespaces otherwise access_network " "should be used.")), cfg.BoolOpt('metadata_on_demand', default=False, help=_("If true, an internal metadata network will be created " "for a router only when the router is attached to a " "DHCP-disabled subnet.")), cfg.BoolOpt('native_dhcp_metadata', default=True, help=_("If true, DHCP and metadata proxy services will be " "provided by NSX backend.")), cfg.ListOpt('switching_profiles', default=[], help=_("Optional parameter defining a list switching profiles " "uuids that will be attached to all neutron created " "nsx ports.")), cfg.BoolOpt('ens_support', default=False, help=_("(Optional) Indicates whether ENS transport zones can " "be used")), cfg.BoolOpt('disable_port_security_for_ens', default=False, help=_("When True, port security will be set to False for " "newly created ENS networks and ports, overriding " "user settings")), cfg.StrOpt('dhcp_relay_service', help=_("(Optional) This is the name or UUID of the NSX dhcp " "relay service that will be used to enable DHCP relay " "on router ports.")), cfg.ListOpt('housekeeping_jobs', default=['orphaned_dhcp_server', 'orphaned_logical_switch', 'orphaned_logical_router', 'mismatch_logical_port', 'orphaned_firewall_section'], help=_("List of the enabled housekeeping jobs")), cfg.ListOpt('housekeeping_readonly_jobs', default=[], help=_("List of housekeeping jobs which are enabled in read " "only mode")), cfg.BoolOpt('housekeeping_readonly', default=True, help=_("Housekeeping will only warn about breakage.")), ] nsx_p_opts = nsx_v3_and_p + [ cfg.StrOpt('dhcp_profile', help=_("This is the name or UUID of the NSX DHCP Profile, " "or the name or ID of the Policy DHCP server config " "that will be used to enable native DHCP service. It " "needs to be created in NSX before starting Neutron " "with the NSX plugin")), cfg.StrOpt('default_tier0_router', help=_("Name or UUID of the default tier0 router that will be " "used for connecting to tier1 logical routers and " "configuring external networks. If only one tier0 " " router is present on backend, it will be assumed " "as default unless this value is provided")), cfg.StrOpt('default_overlay_tz', help=_("This is the name or UUID of the default NSX overlay " "transport zone that will be used for creating " "tunneled isolated Neutron networks. It needs to be " "created in NSX before starting Neutron with the NSX " "plugin. If only one overlay transport zone is present " "on backend, it will be assumed as default unless this " "value is provided")), cfg.StrOpt('default_vlan_tz', help=_("(Optional) Only required when creating VLAN or flat " "provider networks. Name or UUID of default NSX VLAN " "transport zone that will be used for bridging between " "Neutron networks, if no physical network has been " "specified. If only one VLAN transport zone is present " "on backend, it will be assumed as default unless this " "value is provided")), cfg.StrOpt('waf_profile', deprecated_for_removal=True, help=_("(Optional) Name or UUID of the default WAF profile to " "be attached to L7 loadbalancer listeners")), cfg.BoolOpt('allow_passthrough', default=True, help=_("If True, use nsx manager api for cases which are not " "supported by the policy manager api")), cfg.IntOpt('realization_max_attempts', default=50, help=_("(Optional) Maximum number of times to retry while " "waiting for a resource to be realized")), cfg.IntOpt('realization_wait_sec', default=1.0, help=_("(Optional) Number of seconds to wait between attempts " "for a resource to be realized")), cfg.BoolOpt('firewall_match_internal_addr', default=True, help=_("If True, edge firewall rules will match internal " "addresses. Else they will match the external " "addresses")), ] DEFAULT_STATUS_CHECK_INTERVAL = 2000 DEFAULT_MINIMUM_POOLED_EDGES = 1 DEFAULT_MAXIMUM_POOLED_EDGES = 3 DEFAULT_MAXIMUM_TUNNELS_PER_VNIC = 20 nsxv_opts = [ cfg.StrOpt('user', default='admin', help=_('User name for NSXv manager')), cfg.StrOpt('password', default='default', secret=True, help=_('Password for NSXv manager')), cfg.StrOpt('manager_uri', help=_('URL for NSXv manager')), cfg.StrOpt('ca_file', help=_('Specify a CA bundle file to use in verifying the NSXv ' 'server certificate.')), cfg.BoolOpt('insecure', default=True, help=_('If true, the NSXv server certificate is not verified. ' 'If false, then the default CA truststore is used for ' 'verification. This option is ignored if "ca_file" is ' 'set.')), cfg.ListOpt('cluster_moid', default=[], help=_('(Required) Parameter listing the IDs of the clusters ' 'which are used by OpenStack.')), cfg.StrOpt('datacenter_moid', help=_('Required parameter identifying the ID of datacenter ' 'to deploy NSX Edges')), cfg.StrOpt('deployment_container_id', help=_('Optional parameter identifying the ID of datastore to ' 'deploy NSX Edges')), cfg.StrOpt('resource_pool_id', help=_('Optional parameter identifying the ID of resource to ' 'deploy NSX Edges')), cfg.ListOpt('availability_zones', default=[], help=_('Optional parameter defining the availability zones ' 'names for deploying NSX Edges. The configuration of ' 'each zone will be under a group names [az:]')), cfg.StrOpt('datastore_id', help=_('Optional parameter identifying the ID of datastore to ' 'deploy NSX Edges')), cfg.StrOpt('ha_datastore_id', help=_('Optional parameter identifying the ID of datastore to ' 'deploy NSX Edges in addition to data_store_id in case' 'edge_ha is True')), cfg.BoolOpt('ha_placement_random', default=False, help=_('When True and in case edge_ha is True, half of the ' 'edges will be placed in the primary datastore as ' 'active and the other half will be placed in the ' 'ha_datastore')), cfg.ListOpt('edge_host_groups', default=[], help=_('(Optional) If edge HA is used then this will ensure ' 'that active/backup edges are placed in the listed ' 'host groups. At least 2 predefined host groups need ' 'to be configured.')), cfg.StrOpt('external_network', help=_('(Required) Network ID for physical network ' 'connectivity')), cfg.IntOpt('task_status_check_interval', default=DEFAULT_STATUS_CHECK_INTERVAL, help=_("(Optional) Asynchronous task status check interval. " "Default is 2000 (millisecond)")), cfg.StrOpt('vdn_scope_id', help=_('(Optional) Network scope ID for VXLAN virtual wires')), cfg.StrOpt('dvs_id', help=_('(Optional) DVS MoRef ID for DVS connected to ' 'Management / Edge cluster')), cfg.IntOpt('maximum_tunnels_per_vnic', default=DEFAULT_MAXIMUM_TUNNELS_PER_VNIC, min=1, max=110, help=_('(Optional) Maximum number of sub interfaces supported ' 'per vnic in edge.')), cfg.ListOpt('backup_edge_pool', default=['service:compact:4:10', 'vdr:compact:4:10'], help=_("Defines edge pool's management range with the format: " ":[edge_size]::." "edge_type: service,vdr. " "edge_size: compact, large, xlarge, quadlarge " "and default is compact. By default, edge pool manager " "would manage service edge with compact size " "and distributed edge with compact size as following: " "service:compact:4:10,vdr:compact:" "4:10")), cfg.IntOpt('retries', default=20, help=_('Maximum number of API retries on endpoint.')), cfg.StrOpt('mgt_net_moid', help=_('(Optional) Portgroup MoRef ID for metadata proxy ' 'management network')), cfg.ListOpt('mgt_net_proxy_ips', default=[], help=_('(Optional) Comma separated list of management network ' 'IP addresses for metadata proxy.')), cfg.StrOpt('mgt_net_proxy_netmask', help=_("(Optional) Management network netmask for metadata " "proxy.")), cfg.StrOpt('mgt_net_default_gateway', help=_("(Optional) Management network default gateway for " "metadata proxy.")), cfg.ListOpt('nova_metadata_ips', default=[], help=_("(Optional) IP addresses used by Nova metadata " "service.")), cfg.PortOpt('nova_metadata_port', default=8775, help=_("(Optional) TCP Port used by Nova metadata server.")), cfg.StrOpt('metadata_shared_secret', secret=True, help=_("(Optional) Shared secret to sign metadata requests.")), cfg.BoolOpt('metadata_insecure', default=True, help=_("(Optional) If True, the end to end connection for " "metadata service is not verified. If False, the " "default CA truststore is used for verification.")), cfg.StrOpt('metadata_nova_client_cert', help=_('(Optional) Client certificate to use when metadata ' 'connection is to be verified. If not provided, ' 'a self signed certificate will be used.')), cfg.StrOpt('metadata_nova_client_priv_key', help=_("(Optional) Private key of client certificate.")), cfg.BoolOpt('spoofguard_enabled', default=True, help=_("(Optional) If True then plugin will use NSXV " "spoofguard component for port-security feature.")), cfg.BoolOpt('use_exclude_list', default=True, help=_("(Optional) If True then plugin will use NSXV exclude " "list component when port security is disabled and " "spoofguard is enabled.")), cfg.ListOpt('tenant_router_types', default=['shared', 'distributed', 'exclusive'], help=_("Ordered list of router_types to allocate as tenant " "routers. It limits the router types that the Nsxv " "can support for tenants:\ndistributed: router is " "supported by distributed edge at the backend.\n" "shared: multiple routers share the same service " "edge at the backend.\nexclusive: router exclusively " "occupies one service edge at the backend.\nNsxv would " "select the first available router type from " "tenant_router_types list if router-type is not " "specified. If the tenant defines the router type with " "'--distributed','--router_type exclusive' or " "'--router_type shared', Nsxv would verify that the " "router type is in tenant_router_types. Admin supports " "all these three router types.")), cfg.StrOpt('edge_appliance_user', secret=True, help=_("(Optional) Username to configure for Edge appliance " "login.")), cfg.StrOpt('edge_appliance_password', secret=True, help=_("(Optional) Password to configure for Edge appliance " "login.")), cfg.IntOpt('dhcp_lease_time', default=86400, help=_("(Optional) DHCP default lease time.")), cfg.BoolOpt('metadata_initializer', default=True, help=_("If True, the server instance will attempt to " "initialize the metadata infrastructure")), cfg.ListOpt('metadata_service_allowed_ports', item_type=types.Port(), default=[], help=_('List of tcp ports, to be allowed access to the ' 'metadata proxy, in addition to the default ' '80,443,8775 tcp ports')), cfg.BoolOpt('edge_ha', default=False, help=_("(Optional) Enable HA for NSX Edges.")), cfg.StrOpt('exclusive_router_appliance_size', default="compact", choices=routersize.VALID_EDGE_SIZES, help=_("(Optional) Edge appliance size to be used for creating " "exclusive router. Valid values: " "['compact', 'large', 'xlarge', 'quadlarge']. This " "exclusive_router_appliance_size will be picked up if " "--router-size parameter is not specified while doing " "neutron router-create")), cfg.StrOpt('shared_router_appliance_size', default="compact", choices=routersize.VALID_EDGE_SIZES, help=_("(Optional) Edge appliance size to be used for creating " "shared router edge. Valid values: " "['compact', 'large', 'xlarge', 'quadlarge'].")), cfg.StrOpt('dns_search_domain', help=_("(Optional) Use this search domain if there is no " "search domain configured on the subnet.")), cfg.ListOpt('nameservers', default=[], help=_('List of nameservers to configure for the DHCP binding ' 'entries. These will be used if there are no ' 'nameservers defined on the subnet.')), cfg.BoolOpt('use_dvs_features', default=False, help=_('If True, dvs features will be supported which ' 'involves configuring the dvs backing nsx_v directly. ' 'If False, only features exposed via nsx_v will be ' 'supported')), cfg.BoolOpt('log_security_groups_blocked_traffic', default=False, help=_("(Optional) Indicates whether distributed-firewall " "rule for security-groups blocked traffic is logged.")), cfg.BoolOpt('log_security_groups_allowed_traffic', default=False, help=_("(Optional) Indicates whether distributed-firewall " "security-groups allowed traffic is logged.")), cfg.StrOpt('service_insertion_profile_id', help=_("(Optional) The profile id of the redirect firewall " "rules that will be used for the Service Insertion " "feature.")), cfg.BoolOpt('service_insertion_redirect_all', default=False, help=_("(Optional) If set to True, the plugin will create " "a redirect rule to send all the traffic to the " "security partner")), cfg.BoolOpt('use_nsx_policies', default=False, help=_("If set to True, the plugin will use NSX policies " "in the neutron security groups.")), cfg.StrOpt('default_policy_id', help=_("(Optional) If use_nsx_policies is True, this policy " "will be used as the default policy for new tenants.")), cfg.BoolOpt('allow_tenant_rules_with_policy', default=False, help=_("(Optional) If use_nsx_policies is True, this value " "will determine if a tenants can add rules to their " "security groups.")), cfg.StrOpt('vdr_transit_network', default=DEFAULT_VDR_TRANSIT_NETWORK, help=_("(Optional) Sets the network address for distributed " "router TLR-PLR connectivity, with " "/ syntax")), cfg.BoolOpt('bind_floatingip_to_all_interfaces', default=False, help=_("If set to False, router will associate floating ip " "with external interface of only, thus denying " "connectivity between hosts on same network via " "their floating ips. If True, floating ip will " "be associated with all router interfaces.")), cfg.BoolOpt('exclusive_dhcp_edge', default=False, help=_("(Optional) Have exclusive DHCP edge per network.")), cfg.IntOpt('bgp_neighbour_hold_down_timer', default=4, help=_("(Optional) Set the interval (Seconds) for BGP " "neighbour hold down time.")), cfg.IntOpt('bgp_neighbour_keep_alive_timer', default=1, help=_("(Optional) Set the interval (Seconds) for BGP " "neighbour keep alive time.")), cfg.IntOpt('ecmp_wait_time', default=2, help=_("(Optional) Set the wait time (Seconds) between " "enablement of ECMP.")), cfg.ListOpt('network_vlan_ranges', default=[], help=_("List of :: " "specifying DVS MoRef ID usable for VLAN provider " "networks, as well as ranges of VLAN tags on each " "available for allocation to networks.")), cfg.IntOpt('nsx_transaction_timeout', default=240, help=_("Timeout interval for NSX backend transactions.")), cfg.BoolOpt('share_edges_between_tenants', default=True, help=_("If False, different tenants will not use the same " "DHCP edge or router edge.")), cfg.ListOpt('housekeeping_jobs', default=['error_dhcp_edge', 'error_backup_edge'], help=_("List of the enabled housekeeping jobs")), cfg.ListOpt('housekeeping_readonly_jobs', default=[], help=_("List of housekeeping jobs which are enabled in read " "only mode")), cfg.BoolOpt('housekeeping_readonly', default=True, help=_("Housekeeping will only warn about breakage.")), cfg.BoolOpt('use_default_block_all', default=False, help=_("Use default block all rule when no security groups " "are set on a port and port security is enabled")), cfg.BoolOpt('use_routers_as_lbaas_platform', default=False, help=_("Use subnet's exclusive router as a platform for " "LBaaS")), cfg.BoolOpt('allow_multiple_ip_addresses', default=False, help=_("Allow associating multiple IPs to VMs " "without spoofguard limitations")), cfg.StrOpt('nsx_sg_name_format', default='%(name)s (%(id)s)', help=_("(Optional) Format for the NSX name of an openstack " "security group")), ] # define the configuration of each NSX-V availability zone. # the list of expected zones is under nsxv group: availability_zones # Note: if any of the optional arguments is missing - the global one will be # used instead. nsxv_az_opts = [ cfg.StrOpt('resource_pool_id', help=_('Identifying the ID of resource to deploy NSX Edges')), cfg.StrOpt('datastore_id', help=_('Identifying the ID of datastore to deploy NSX Edges')), cfg.BoolOpt('edge_ha', default=False, help=_("(Optional) Enable HA for NSX Edges.")), cfg.StrOpt('ha_datastore_id', help=_('Optional parameter identifying the ID of datastore to ' 'deploy NSX Edges in addition to data_store_id in case' 'edge_ha is True')), cfg.BoolOpt('ha_placement_random', help=_('When True and in case edge_ha is True, half of the ' 'edges will be placed in the primary datastore as ' 'active and the other half will be placed in the ' 'ha_datastore. If this value is not set, the global ' 'one will be used')), cfg.ListOpt('edge_host_groups', default=[], help=_('(Optional) If edge HA is used then this will ensure ' 'that active/backup edges are placed in the listed ' 'host groups. At least 2 predefined host groups need ' 'to be configured.')), cfg.StrOpt('datacenter_moid', help=_('(Optional) Identifying the ID of datacenter to deploy ' 'NSX Edges')), cfg.ListOpt('backup_edge_pool', help=_("(Optional) Defines edge pool's management range for " "the availability zone. If not defined, the global one " "will be used")), cfg.StrOpt('mgt_net_moid', help=_('(Optional) Portgroup MoRef ID for metadata proxy ' 'management network')), cfg.ListOpt('mgt_net_proxy_ips', default=[], help=_('(Optional) Comma separated list of management network ' 'IP addresses for metadata proxy.')), cfg.StrOpt('mgt_net_proxy_netmask', help=_("(Optional) Management network netmask for metadata " "proxy.")), cfg.StrOpt('mgt_net_default_gateway', help=_("(Optional) Management network default gateway for " "metadata proxy.")), cfg.StrOpt('external_network', help=_('(Optional) Network ID for physical network ' 'connectivity')), cfg.StrOpt('vdn_scope_id', help=_('(Optional) Network scope ID for VXLAN virtual wires')), cfg.StrOpt('dvs_id', help=_('(Optional) DVS MoRef ID for DVS connected to ' 'Management / Edge cluster')), cfg.BoolOpt('exclusive_dhcp_edge', default=False, help=_("(Optional) Have exclusive DHCP edge per network.")), cfg.BoolOpt('bind_floatingip_to_all_interfaces', default=False, help=_("If set to False, router will associate floating ip " "with external interface of only, thus denying " "connectivity between hosts on same network via " "their floating ips. If True, floating ip will " "be associated with all router interfaces.")), ] # define the configuration of each NSX-V3 availability zone. # the list of expected zones is under nsx_v3 group: availability_zones # Note: if any of the optional arguments is missing - the global one will be # used instead. nsx_v3_and_p_az_opts = [ cfg.StrOpt('metadata_proxy', help=_("The name or UUID of the NSX Metadata Proxy " "that will be used to enable native metadata service. " "It needs to be created in NSX before starting Neutron " "with the NSX plugin.")), cfg.StrOpt('dhcp_profile', help=_("The name or UUID of the NSX DHCP Profile " "that will be used to enable native DHCP service. It " "needs to be created in NSX before starting Neutron " "with the NSX plugin")), cfg.StrOpt('native_metadata_route', help=_("(Optional) The metadata route used for native metadata " "proxy service.")), cfg.StrOpt('dns_domain', help=_("(Optional) Domain to use for building the hostnames.")), cfg.ListOpt('nameservers', help=_("(Optional) List of nameservers to configure for the " "DHCP binding entries. These will be used if there are " "no nameservers defined on the subnet.")), cfg.StrOpt('default_overlay_tz', help=_("(Optional) This is the name or UUID of the default NSX " "overlay transport zone that will be used for creating " "tunneled isolated Neutron networks. It needs to be " "created in NSX before starting Neutron with the NSX " "plugin.")), cfg.StrOpt('default_vlan_tz', help=_("(Optional) Only required when creating VLAN or flat " "provider networks. Name or UUID of default NSX VLAN " "transport zone that will be used for bridging between " "Neutron networks, if no physical network has been " "specified")), cfg.StrOpt('default_tier0_router', help=_("Name or UUID of the default tier0 router that will be " "used for connecting to tier1 logical routers and " "configuring external networks")), cfg.StrOpt('edge_cluster', help=_("(Optional) Specifying an edge cluster for Tier1 " "routers to connect other that the one connected to" " the Tier0 router")), ] nsxv3_az_opts = nsx_v3_and_p_az_opts + [ cfg.ListOpt('switching_profiles', help=_("(Optional) list switching profiles uuids that will be " "attached to all neutron created nsx ports.")), cfg.StrOpt('dhcp_relay_service', help=_("(Optional) This is the name or UUID of the NSX dhcp " "relay service that will be used to enable DHCP relay " "on router ports.")), ] nsxp_az_opts = nsx_v3_and_p_az_opts nsx_tvd_opts = [ cfg.ListOpt('nsx_v_extension_drivers', default=[], help=_("An ordered list of NSX-V extension driver " "entrypoints to be loaded from the " "vmware_nsx.extension_drivers namespace.")), cfg.ListOpt('nsx_v3_extension_drivers', default=[], help=_("An ordered list of NSX-T extension driver " "entrypoints to be loaded from the " "vmware_nsx.extension_drivers namespace.")), cfg.ListOpt('dvs_extension_drivers', default=[], help=_("An ordered list of DVS extension driver " "entrypoints to be loaded from the " "vmware_nsx.extension_drivers namespace.")), cfg.StrOpt('default_plugin', default=projectpluginmap.NsxPlugins.NSX_T, choices=projectpluginmap.VALID_TYPES, help=_("The default plugin that will be used for new projects " "that were not added to the projects plugin mapping.")), cfg.ListOpt('enabled_plugins', default=[projectpluginmap.NsxPlugins.NSX_T, projectpluginmap.NsxPlugins.NSX_V, projectpluginmap.NsxPlugins.DVS], help=_("The list of plugins that the TVD core plugin will " "load")), cfg.ListOpt('nsx_v_default_availability_zones', default=[], help=_("The default availability zones that will be used for " "NSX-V networks and routers creation under the TVD " "plugin.")), cfg.ListOpt('nsx_v3_default_availability_zones', default=[], help=_("The default availability zones that will be used for " "NSX-V3 networks and routers creation under the TVD " "plugin.")), cfg.IntOpt('init_retries', default=3, help=_('Maximum number of times a particular plugin ' 'initialization should be retried')), ] # Register the configuration options cfg.CONF.register_opts(connection_opts) cfg.CONF.register_opts(cluster_opts) cfg.CONF.register_opts(nsx_common_opts) cfg.CONF.register_opts(nsx_p_opts, group="nsx_p") cfg.CONF.register_opts(nsx_v3_opts, group="nsx_v3") cfg.CONF.register_opts(nsxv_opts, group="nsxv") cfg.CONF.register_opts(nsx_tvd_opts, group="nsx_tvd") cfg.CONF.register_opts(base_opts, group="NSX") cfg.CONF.register_opts(sync_opts, group="NSX_SYNC") # register l3_ha config opts. This is due to commit # a7c633dc8e8a67e65e558ecbdf9ea8efc5468251 cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS) def _register_nsx_azs(conf, availability_zones, az_opts): # first verify that the availability zones are in the format of a # list of names. The old format was a list of values for each az, # separated with ':' if not availability_zones or len(availability_zones[0].split(':')) > 1: return for az in availability_zones: az_group = 'az:%s' % az conf.register_group(cfg.OptGroup( name=az_group, title="Configuration for availability zone %s" % az)) conf.register_opts(az_opts, group=az_group) # register a group for each nsxv/v3 availability zones def register_nsxv_azs(conf, availability_zones): _register_nsx_azs(conf, availability_zones, nsxv_az_opts) def register_nsxv3_azs(conf, availability_zones): _register_nsx_azs(conf, availability_zones, nsxv3_az_opts) def register_nsxp_azs(conf, availability_zones): _register_nsx_azs(conf, availability_zones, nsxp_az_opts) register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) register_nsxv3_azs(cfg.CONF, cfg.CONF.nsx_v3.availability_zones) register_nsxp_azs(cfg.CONF, cfg.CONF.nsx_p.availability_zones) def _get_nsx_az_opts(az, opts): az_info = dict() group = 'az:%s' % az if group not in cfg.CONF: raise nsx_exc.NsxInvalidConfiguration( opt_name=group, opt_value='None', reason=(_("Configuration group \'%s\' must be defined") % group)) for opt in opts: az_info[opt.name] = cfg.CONF[group][opt.name] return az_info def get_nsxv_az_opts(az): return _get_nsx_az_opts(az, nsxv_az_opts) def get_nsxv3_az_opts(az): return _get_nsx_az_opts(az, nsxv3_az_opts) def get_nsxp_az_opts(az): return _get_nsx_az_opts(az, nsxp_az_opts) def validate_nsxv_config_options(): if (cfg.CONF.nsxv.manager_uri is None or cfg.CONF.nsxv.user is None or cfg.CONF.nsxv.password is None): error = _("manager_uri, user, and password must be configured!") raise nsx_exc.NsxPluginException(err_msg=error) if cfg.CONF.nsxv.dvs_id is None: LOG.warning("dvs_id must be configured to support VLANs!") if cfg.CONF.nsxv.vdn_scope_id is None: LOG.warning("vdn_scope_id must be configured to support VXLANs!") if cfg.CONF.nsxv.use_dvs_features and not dvs_utils.dvs_is_enabled( dvs_id=cfg.CONF.nsxv.dvs_id): error = _("dvs host/vcenter credentials must be defined to use " "dvs features") raise nsx_exc.NsxPluginException(err_msg=error) def validate_nsx_config_options(): if cfg.CONF.nsx_extension_drivers: error = _("nsx_extension_drivers should not be configured!") raise nsx_exc.NsxPluginException(err_msg=error) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/driver_api.py0000644000175000017500000001506200000000000023362 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class ExtensionDriver(object): """Define stable abstract interface for extension drivers. An extension driver extends the core resources implemented by the plugin with additional attributes. Methods that process create and update operations for these resources validate and persist values for extended attributes supplied through the API. Other methods extend the resource dictionaries returned from the API operations with the values of the extended attributes. """ @abc.abstractmethod def initialize(self): """Perform driver initialization. Called after all drivers have been loaded and the database has been initialized. No abstract methods defined below will be called prior to this method being called. """ pass @property def extension_alias(self): """Supported extension alias. Return the alias identifying the core API extension supported by this driver. Do not declare if API extension handling will be left to a service plugin, and we just need to provide core resource extension and updates. """ pass def process_create_network(self, plugin_context, data, result): """Process extended attributes for create network. :param plugin_context: plugin request context :param data: dictionary of incoming network data :param result: network dictionary to extend Called inside transaction context on plugin_context.session to validate and persist any extended network attributes defined by this driver. Extended attribute values must also be added to result. """ pass def process_create_subnet(self, plugin_context, data, result): """Process extended attributes for create subnet. :param plugin_context: plugin request context :param data: dictionary of incoming subnet data :param result: subnet dictionary to extend Called inside transaction context on plugin_context.session to validate and persist any extended subnet attributes defined by this driver. Extended attribute values must also be added to result. """ pass def process_create_port(self, plugin_context, data, result): """Process extended attributes for create port. :param plugin_context: plugin request context :param data: dictionary of incoming port data :param result: port dictionary to extend Called inside transaction context on plugin_context.session to validate and persist any extended port attributes defined by this driver. Extended attribute values must also be added to result. """ pass def process_update_network(self, plugin_context, data, result): """Process extended attributes for update network. :param plugin_context: plugin request context :param data: dictionary of incoming network data :param result: network dictionary to extend Called inside transaction context on plugin_context.session to validate and update any extended network attributes defined by this driver. Extended attribute values, whether updated or not, must also be added to result. """ pass def process_update_subnet(self, plugin_context, data, result): """Process extended attributes for update subnet. :param plugin_context: plugin request context :param data: dictionary of incoming subnet data :param result: subnet dictionary to extend Called inside transaction context on plugin_context.session to validate and update any extended subnet attributes defined by this driver. Extended attribute values, whether updated or not, must also be added to result. """ pass def process_update_port(self, plugin_context, data, result): """Process extended attributes for update port. :param plugin_context: plugin request context :param data: dictionary of incoming port data :param result: port dictionary to extend Called inside transaction context on plugin_context.session to validate and update any extended port attributes defined by this driver. Extended attribute values, whether updated or not, must also be added to result. """ pass def extend_network_dict(self, session, base_model, result): """Add extended attributes to network dictionary. :param session: database session :param base_model: network model data :param result: network dictionary to extend Called inside transaction context on session to add any extended attributes defined by this driver to a network dictionary to be used for driver calls and/or returned as the result of a network operation. """ pass def extend_subnet_dict(self, session, base_model, result): """Add extended attributes to subnet dictionary. :param session: database session :param base_model: subnet model data :param result: subnet dictionary to extend Called inside transaction context on session to add any extended attributes defined by this driver to a subnet dictionary to be used for driver calls and/or returned as the result of a subnet operation. """ pass def extend_port_dict(self, session, base_model, result): """Add extended attributes to port dictionary. :param session: database session :param base_model: port model data :param result: port dictionary to extend Called inside transaction context on session to add any extended attributes defined by this driver to a port dictionary to be used for driver calls and/or returned as the result of a port operation. """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/exceptions.py0000644000175000017500000001634700000000000023426 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from vmware_nsx._i18n import _ class NsxPluginException(n_exc.NeutronException): message = _("An unexpected error occurred in the NSX Plugin: %(err_msg)s") class NsxPluginTemporaryError(n_exc.ServiceUnavailable): message = _("Temporary error occurred in the NSX Plugin: %(err_msg)s." " Please try again later") class ClientCertificateException(NsxPluginException): message = _("Client certificate error: %(err_msg)s") class InvalidVersion(NsxPluginException): message = _("Unable to fulfill request with version %(version)s.") class InvalidConnection(NsxPluginException): message = _("Invalid NSX connection parameters: %(conn_params)s") class InvalidClusterConfiguration(NsxPluginException): message = _("Invalid cluster values: %(invalid_attrs)s. Please ensure " "that these values are specified in the [DEFAULT] " "section of the NSX plugin ini file.") class InvalidNovaZone(NsxPluginException): message = _("Unable to find cluster config entry " "for nova zone: %(nova_zone)s") class NoMorePortsException(NsxPluginException): message = _("Unable to create port on network %(network)s. " "Maximum number of ports reached") class NatRuleMismatch(NsxPluginException): message = _("While retrieving NAT rules, %(actual_rules)s were found " "whereas rules in the (%(min_rules)s,%(max_rules)s) interval " "were expected") class InvalidAttachmentType(NsxPluginException): message = _("Invalid NSX attachment type '%(attachment_type)s'") class MaintenanceInProgress(NsxPluginException): message = _("The networking backend is currently in maintenance mode and " "therefore unable to accept requests which modify its state. " "Please try later.") class L2GatewayAlreadyInUse(n_exc.Conflict): message = _("Gateway Service %(gateway)s is already in use") class BridgeEndpointAttachmentInUse(n_exc.Conflict): message = _("The NSX backend only allow a single L2 gateway connection " "for network %(network_id)s") class InvalidTransportType(NsxPluginException): message = _("The transport type %(transport_type)s is not recognized " "by the backend") class InvalidSecurityCertificate(NsxPluginException): message = _("An invalid security certificate was specified for the " "gateway device. Certificates must be enclosed between " "'-----BEGIN CERTIFICATE-----' and " "'-----END CERTIFICATE-----'") class ServiceOverQuota(n_exc.Conflict): message = _("Quota exceeded for NSX resource %(overs)s: %(err_msg)s") class PortConfigurationError(NsxPluginException): message = _("An error occurred while connecting LSN %(lsn_id)s " "and network %(net_id)s via port %(port_id)s") def __init__(self, **kwargs): super(PortConfigurationError, self).__init__(**kwargs) self.port_id = kwargs.get('port_id') class LogicalRouterNotFound(n_exc.NotFound): message = _('Unable to find logical router for %(entity_id)s') class LsnNotFound(n_exc.NotFound): message = _('Unable to find LSN for %(entity)s %(entity_id)s') class LsnPortNotFound(n_exc.NotFound): message = (_('Unable to find port for LSN %(lsn_id)s ' 'and %(entity)s %(entity_id)s')) class LsnMigrationConflict(n_exc.Conflict): message = _("Unable to migrate network '%(net_id)s' to LSN: %(reason)s") class LsnConfigurationConflict(NsxPluginException): message = _("Configuration conflict on Logical Service Node %(lsn_id)s") class DvsNotFound(n_exc.NotFound): message = _('Unable to find DVS %(dvs)s') class NoRouterAvailable(n_exc.ResourceExhausted): message = _("Unable to create the router. " "No tenant router is available for allocation.") class NsxL2GWDeviceNotFound(n_exc.NotFound): message = _('Unable to find logical L2 gateway device.') class NsxL2GWInUse(n_exc.InUse): message = _("L2 Gateway '%(gateway_id)s' has been used") class InvalidIPAddress(n_exc.InvalidInput): message = _("'%(ip_address)s' must be a /32 CIDR based IPv4 address") class QoSOnExternalNet(n_exc.InvalidInput): message = _("Cannot configure QOS on external networks") class SecurityGroupMaximumCapacityReached(NsxPluginException): pass class NsxResourceNotFound(n_exc.NotFound): message = _("%(res_name)s %(res_id)s not found on the backend.") class NsxAZResourceNotFound(NsxResourceNotFound): message = _("Availability zone %(res_name)s %(res_id)s not found on the " "backend.") class NsxQosPolicyMappingNotFound(n_exc.NotFound): message = _('Unable to find mapping for QoS policy: %(policy)s') class NumberOfNsgroupCriteriaTagsReached(NsxPluginException): message = _("Port can be associated with at most %(max_num)s " "security-groups.") class NsxTaaSDriverException(NsxPluginException): message = _("Tap-as-a-Service NSX driver exception: %(msg)s.") class NsxPortMirrorSessionMappingNotFound(n_exc.NotFound): message = _("Unable to find mapping for Tap Flow: %(tf)s") class NsxInvalidConfiguration(n_exc.InvalidConfigurationOption): message = _("An invalid value was provided for %(opt_name)s: " "%(opt_value)s: %(reason)s") class NsxBgpSpeakerUnableToAddGatewayNetwork(n_exc.BadRequest): message = _("Unable to add gateway network %(network_id)s to BGP speaker " "%(bgp_speaker_id)s, network must have association with an " "address-scope and can be associated with one BGP speaker at " "most.") class NsxBgpNetworkNotExternal(n_exc.BadRequest): message = _("Network %(net_id)s is not external, only external network " "can be associated with a BGP speaker.") class NsxBgpGatewayNetworkHasNoSubnets(n_exc.BadRequest): message = _("Can't associate external network %(net_id)s with BGP " "speaker, network doesn't contain any subnets.") class NsxRouterInterfaceDoesNotMatchAddressScope(n_exc.BadRequest): message = _("Unable to update no-NAT router %(router_id)s, " "only subnets allocated from address-scope " "%(address_scope_id)s can be connected.") class NsxVpnValidationError(NsxPluginException): message = _("Invalid VPN configuration: %(details)s") class NsxIPsecVpnMappingNotFound(n_exc.NotFound): message = _("Unable to find mapping for ipsec site connection: %(conn)s") class NsxENSPortSecurity(n_exc.BadRequest): message = _("Port security is not supported on ENS Transport zones") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/l3_rpc_agent_api.py0000644000175000017500000000303500000000000024424 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class L3NotifyAPI(object): """Dummy driver for L3 notifcations - no need - no L3 agenets.""" # We need this driver as this code is invoked from the L3 mixin code. def agent_updated(self, context, admin_state_up, host): pass def router_deleted(self, context, router_id): pass def routers_updated(self, context, router_ids, operation=None, data=None, shuffle_agents=False, schedule_routers=True): pass def add_arp_entry(self, context, router_id, arp_table, operation=None): pass def del_arp_entry(self, context, router_id, arp_table, operation=None): pass def delete_fipnamespace_for_ext_net(self, context, ext_net_id): pass def router_removed_from_agent(self, context, router_id, host): pass def router_added_to_agent(self, context, router_ids, host): pass def routers_updated_on_host(self, context, router_ids, host): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/locking.py0000644000175000017500000000476400000000000022673 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import traceback from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log from tooz import coordination LOG = log.getLogger(__name__) class LockManager(object): _coordinator = None _coordinator_pid = None _connect_string = cfg.CONF.locking_coordinator_url def __init__(self): LOG.debug('LockManager initialized!') @staticmethod def get_lock(name, **kwargs): if cfg.CONF.locking_coordinator_url: lck = LockManager._get_lock_distributed(name) LOG.debug('Lock %s taken with stack trace %s', name, traceback.extract_stack()) return lck else: # Ensure that external=True kwargs['external'] = True lck = LockManager._get_lock_local(name, **kwargs) LOG.debug('Lock %s taken with stack trace %s', name, traceback.extract_stack()) return lck @staticmethod def _get_lock_local(name, **kwargs): return lockutils.lock(name, **kwargs) @staticmethod def _get_lock_distributed(name): if LockManager._coordinator_pid != os.getpid(): # We should use a per-process coordinator. If PID is different # start a new coordinator. # While the API workers are spawned, we have to re-initialize # a coordinator, so we validate that the PID is still the same. LockManager._coordinator_pid = os.getpid() LOG.debug('Initialized coordinator with connect string %s', LockManager._connect_string) LockManager._coordinator = coordination.get_coordinator( LockManager._connect_string, 'vmware-neutron-plugin') LockManager._coordinator.start() LOG.debug('Retrieved lock for %s', name) return LockManager._coordinator.get_lock(name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/managers.py0000644000175000017500000001350700000000000023035 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import excutils import stevedore LOG = log.getLogger(__name__) class ExtensionManager(stevedore.named.NamedExtensionManager): """Manage extension drivers using drivers.""" def __init__(self, extension_drivers=None): # Ordered list of extension drivers, defining # the order in which the drivers are called. self.ordered_ext_drivers = [] if extension_drivers is None: extension_drivers = cfg.CONF.nsx_extension_drivers LOG.info("Configured extension driver names: %s", extension_drivers) super(ExtensionManager, self).__init__('vmware_nsx.extension_drivers', extension_drivers, invoke_on_load=True, name_order=True) LOG.info("Loaded extension driver names: %s", self.names()) self._register_drivers() def _register_drivers(self): """Register all extension drivers. This method should only be called once in the ExtensionManager constructor. """ for ext in self: self.ordered_ext_drivers.append(ext) LOG.info("Registered extension drivers: %s", [driver.name for driver in self.ordered_ext_drivers]) def initialize(self): # Initialize each driver in the list. for driver in self.ordered_ext_drivers: LOG.info("Initializing extension driver '%s'", driver.name) driver.obj.initialize() def extension_aliases(self): exts = [] for driver in self.ordered_ext_drivers: alias = driver.obj.extension_alias if alias: exts.append(alias) LOG.info("Got %(alias)s extension from driver '%(drv)s'", {'alias': alias, 'drv': driver.name}) return exts def _call_on_ext_drivers(self, method_name, plugin_context, data, result): """Helper method for calling a method across all extension drivers.""" for driver in self.ordered_ext_drivers: try: getattr(driver.obj, method_name)(plugin_context, data, result) except Exception: with excutils.save_and_reraise_exception(): LOG.info("Extension driver '%(name)s' failed in " "%(method)s", {'name': driver.name, 'method': method_name}) def process_create_network(self, plugin_context, data, result): """Notify all extension drivers during network creation.""" self._call_on_ext_drivers("process_create_network", plugin_context, data, result) def process_update_network(self, plugin_context, data, result): """Notify all extension drivers during network update.""" self._call_on_ext_drivers("process_update_network", plugin_context, data, result) def process_create_subnet(self, plugin_context, data, result): """Notify all extension drivers during subnet creation.""" self._call_on_ext_drivers("process_create_subnet", plugin_context, data, result) def process_update_subnet(self, plugin_context, data, result): """Notify all extension drivers during subnet update.""" self._call_on_ext_drivers("process_update_subnet", plugin_context, data, result) def process_create_port(self, plugin_context, data, result): """Notify all extension drivers during port creation.""" self._call_on_ext_drivers("process_create_port", plugin_context, data, result) def process_update_port(self, plugin_context, data, result): """Notify all extension drivers during port update.""" self._call_on_ext_drivers("process_update_port", plugin_context, data, result) def _call_on_dict_driver(self, method_name, session, base_model, result): for driver in self.ordered_ext_drivers: try: getattr(driver.obj, method_name)(session, base_model, result) except Exception: LOG.error("Extension driver '%(name)s' failed in " "%(method)s", {'name': driver.name, 'method': method_name}) raise def extend_network_dict(self, session, base_model, result): """Notify all extension drivers to extend network dictionary.""" self._call_on_dict_driver("extend_network_dict", session, base_model, result) def extend_subnet_dict(self, session, base_model, result): """Notify all extension drivers to extend subnet dictionary.""" self._call_on_dict_driver("extend_subnet_dict", session, base_model, result) def extend_port_dict(self, session, base_model, result): """Notify all extension drivers to extend port dictionary.""" self._call_on_dict_driver("extend_port_dict", session, base_model, result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/nsx_constants.py0000644000175000017500000000141500000000000024137 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # L2 agent vif type VIF_TYPE_DVS = 'dvs' # NSXv3 CORE PLUGIN PATH VMWARE_NSX_V3_PLUGIN_NAME = 'vmware_nsxv3' INTERNAL_V3_TENANT_ID = 'v3_internal_project' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/nsx_utils.py0000644000175000017500000001762700000000000023277 0ustar00coreycorey00000000000000# Copyright 2013 VMware Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api import validators from neutron_lib import constants from oslo_log import log from vmware_nsx.api_client import client from vmware_nsx.common import utils as vmw_utils from vmware_nsx.db import db as nsx_db from vmware_nsx import nsx_cluster from vmware_nsx.nsxlib.mh import switch as switchlib LOG = log.getLogger(__name__) def get_nsx_switch_ids(session, cluster, neutron_network_id): """Return the NSX switch id for a given neutron network. First lookup for mappings in Neutron database. If no mapping is found, query the NSX backend and add the mappings. """ nsx_switch_ids = nsx_db.get_nsx_switch_ids( session, neutron_network_id) if not nsx_switch_ids: # Find logical switches from backend. # This is a rather expensive query, but it won't be executed # more than once for each network in Neutron's lifetime nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id) if not nsx_switches: LOG.warning("Unable to find NSX switches for Neutron network " "%s", neutron_network_id) return nsx_switch_ids = [] with session.begin(subtransactions=True): for nsx_switch in nsx_switches: nsx_switch_id = nsx_switch['uuid'] nsx_switch_ids.append(nsx_switch_id) # Create DB mapping nsx_db.add_neutron_nsx_network_mapping( session, neutron_network_id, nsx_switch_id) return nsx_switch_ids def get_nsx_switch_and_port_id(session, cluster, neutron_port_id): """Return the NSX switch and port uuids for a given neutron port. First, look up the Neutron database. If not found, execute a query on NSX platform as the mapping might be missing because the port was created before upgrading to grizzly. This routine also retrieves the identifier of the logical switch in the backend where the port is plugged. Prior to Icehouse this information was not available in the Neutron Database. For dealing with pre-existing records, this routine will query the backend for retrieving the correct switch identifier. As of Icehouse release it is not indeed anymore possible to assume the backend logical switch identifier is equal to the neutron network identifier. """ nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( session, neutron_port_id) if not nsx_switch_id: # Find logical switch for port from backend # This is a rather expensive query, but it won't be executed # more than once for each port in Neutron's lifetime nsx_ports = switchlib.query_lswitch_lports( cluster, '*', relations='LogicalSwitchConfig', filters={'tag': neutron_port_id, 'tag_scope': 'q_port_id'}) # Only one result expected # NOTE(salv-orlando): Not handling the case where more than one # port is found with the same neutron port tag if not nsx_ports: LOG.warning("Unable to find NSX port for Neutron port %s", neutron_port_id) # This method is supposed to return a tuple return None, None nsx_port = nsx_ports[0] nsx_switch_id = (nsx_port['_relations'] ['LogicalSwitchConfig']['uuid']) if nsx_port_id: # Mapping already exists. Delete before recreating nsx_db.delete_neutron_nsx_port_mapping( session, neutron_port_id) else: nsx_port_id = nsx_port['uuid'] # (re)Create DB mapping nsx_db.add_neutron_nsx_port_mapping( session, neutron_port_id, nsx_switch_id, nsx_port_id) return nsx_switch_id, nsx_port_id def create_nsx_cluster(cluster_opts, concurrent_connections, gen_timeout): cluster = nsx_cluster.NSXCluster(**cluster_opts) def _ctrl_split(x, y): return (x, int(y), True) api_providers = [_ctrl_split(*ctrl.split(':')) for ctrl in cluster.nsx_controllers] cluster.api_client = client.NsxApiClient( api_providers, cluster.nsx_user, cluster.nsx_password, http_timeout=cluster.http_timeout, retries=cluster.retries, redirects=cluster.redirects, concurrent_connections=concurrent_connections, gen_timeout=gen_timeout) return cluster def _convert_bindings_to_nsx_transport_zones(bindings): nsx_transport_zones_config = [] for binding in bindings: transport_entry = {} if binding.binding_type in [vmw_utils.NetworkTypes.FLAT, vmw_utils.NetworkTypes.VLAN]: transport_entry['transport_type'] = ( vmw_utils.NetworkTypes.BRIDGE) transport_entry['binding_config'] = {} vlan_id = binding.vlan_id if vlan_id: transport_entry['binding_config'] = ( {'vlan_translation': [{'transport': vlan_id}]}) else: transport_entry['transport_type'] = binding.binding_type transport_entry['zone_uuid'] = binding.phy_uuid nsx_transport_zones_config.append(transport_entry) return nsx_transport_zones_config def _convert_segments_to_nsx_transport_zones(segments, default_tz_uuid): nsx_transport_zones_config = [] for transport_zone in segments: for value in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: if transport_zone.get(value) == constants.ATTR_NOT_SPECIFIED: transport_zone[value] = None transport_entry = {} transport_type = transport_zone.get(pnet.NETWORK_TYPE) if transport_type in [vmw_utils.NetworkTypes.FLAT, vmw_utils.NetworkTypes.VLAN]: transport_entry['transport_type'] = ( vmw_utils.NetworkTypes.BRIDGE) transport_entry['binding_config'] = {} vlan_id = transport_zone.get(pnet.SEGMENTATION_ID) if vlan_id: transport_entry['binding_config'] = ( {'vlan_translation': [{'transport': vlan_id}]}) else: transport_entry['transport_type'] = transport_type transport_entry['zone_uuid'] = ( transport_zone[pnet.PHYSICAL_NETWORK] or default_tz_uuid) nsx_transport_zones_config.append(transport_entry) return nsx_transport_zones_config def convert_to_nsx_transport_zones( default_tz_uuid, network=None, bindings=None, default_transport_type=None): # Convert fields from provider request to nsx format if (network and not validators.is_attr_set( network.get(mpnet_apidef.SEGMENTS))): return [{"zone_uuid": default_tz_uuid, "transport_type": default_transport_type}] # Convert fields from db to nsx format if bindings: return _convert_bindings_to_nsx_transport_zones(bindings) # If we end up here we need to convert multiprovider segments into nsx # transport zone configurations return _convert_segments_to_nsx_transport_zones( network.get(mpnet_apidef.SEGMENTS), default_tz_uuid) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/nsxv_constants.py0000644000175000017500000000467000000000000024333 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Edge size COMPACT = 'compact' LARGE = 'large' XLARGE = 'xlarge' QUADLARGE = 'quadlarge' EXCLUSIVE = "exclusive" # Edge type SERVICE_EDGE = 'service' VDR_EDGE = 'vdr' # Internal element purpose INTER_EDGE_PURPOSE = 'inter_edge_net' # etc INTERNAL_TENANT_ID = 'metadata_internal_project' # L2 gateway edge name prefix L2_GATEWAY_EDGE = 'L2 bridging' # An artificial limit for router name length - subtract 1 for the - separator ROUTER_NAME_LENGTH = (78 - 1) # LoadBalancer Certificate constants #NOTE(abhiraut): Number of days specify the total number of days for which the # certificate will be active. This certificate will expire in # 10 years. Once the backend API allows creation of certs which # do not expire, the following constant should be removed. CERT_NUMBER_OF_DAYS = 3650 CSR_REQUEST = ("" "CNmetadata.nsx.local" "" "OOrganization" "OUUnit" "LLocality" "STState" "CUS" "RSA2048" "") # Reserved IPs that cannot overlap defined subnets RESERVED_IPS = ["169.254.128.0/17", "169.254.1.0/24", "169.254.64.192/26"] # VPNaaS constants ENCRYPTION_ALGORITHM_MAP = { '3des': '3des', 'aes-128': 'aes', 'aes-256': 'aes256' } PFS_MAP = { 'group2': 'dh2', 'group5': 'dh5' } TRANSFORM_PROTOCOL_ALLOWED = ('esp',) ENCAPSULATION_MODE_ALLOWED = ('tunnel',) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/profile.py0000644000175000017500000000210000000000000022663 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_log import log as logging LOG = logging.getLogger(__name__) def profile(func): def wrap(*args, **kwargs): f_name = '{}.{}'.format(func.__module__, func.__name__) started_at = time.time() result = func(*args, **kwargs) LOG.debug(">>>>>>>>>>>>> Method %(method)s execution time %(time)f", {'method': f_name, 'time': time.time() - started_at}) return result return wrap ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/common/utils.py0000644000175000017500000002527200000000000022402 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import re from distutils import version import functools import hashlib import xml.etree.ElementTree as et import eventlet import six import tenacity from tenacity import _utils as tenacity_utils from neutron._i18n import _ from neutron import version as n_version from neutron_lib.api.definitions import provider_net from neutron_lib.api import validators from neutron_lib import constants from neutron_lib import exceptions as n_exc from oslo_context import context as common_context from oslo_log import log from vmware_nsxlib.v3 import nsx_constants as v3_const LOG = log.getLogger(__name__) MAX_DISPLAY_NAME_LEN = 40 NEUTRON_VERSION = n_version.version_info.release_string() OS_NEUTRON_ID_SCOPE = 'os-neutron-id' # Allowed network types for the NSX Plugin class NetworkTypes(object): """Allowed provider network types for the NSX Plugin.""" L3_EXT = 'l3_ext' STT = 'stt' GRE = 'gre' FLAT = 'flat' VLAN = 'vlan' BRIDGE = 'bridge' PORTGROUP = 'portgroup' LOCAL = 'local' # Allowed network types for the NSX-v Plugin class NsxVNetworkTypes(object): """Allowed provider network types for the NSX-v Plugin.""" FLAT = 'flat' VLAN = 'vlan' VXLAN = 'vxlan' PORTGROUP = 'portgroup' # Allowed network types for the NSXv3 and NSX-Policy Plugin class NsxV3NetworkTypes(object): """Allowed provider network types for the NSXv3 Plugin.""" FLAT = 'flat' VLAN = 'vlan' GENEVE = 'geneve' NSX_NETWORK = 'nsx-net' def is_nsx_version_1_1_0(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion(v3_const.NSX_VERSION_1_1_0)) def is_nsx_version_2_0_0(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion(v3_const.NSX_VERSION_2_0_0)) def is_nsx_version_2_1_0(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion(v3_const.NSX_VERSION_2_1_0)) def is_nsx_version_2_4_0(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion(v3_const.NSX_VERSION_2_4_0)) def is_nsx_version_2_5_0(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion(v3_const.NSX_VERSION_2_5_0)) def is_nsx_version_3_0_0(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion(v3_const.NSX_VERSION_3_0_0)) def is_nsxv_version_6_2(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion('6.2')) def is_nsxv_version_6_3(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion('6.3')) def is_nsxv_version_6_4_6(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion('6.4.6')) def is_nsxv_dhcp_binding_supported(nsx_version): return ((version.LooseVersion(nsx_version) >= version.LooseVersion('6.3.3')) or (version.LooseVersion(nsx_version) >= version.LooseVersion('6.2.8') and version.LooseVersion(nsx_version) < version.LooseVersion('6.3'))) def get_tags(**kwargs): tags = ([dict(tag=value, scope=key) for key, value in six.iteritems(kwargs)]) tags.append({"tag": NEUTRON_VERSION, "scope": "quantum"}) return sorted(tags, key=lambda x: x['tag']) def device_id_to_vm_id(device_id, obfuscate=False): # device_id can be longer than 40 characters, for example # a device_id for a dhcp port is like the following: # # dhcp83b5fdeb-e3b4-5e18-ac5f-55161...80747326-47d7-46c2-a87a-cf6d5194877c # # To fit it into an NSX tag we need to hash it, however device_id # used for ports associated to VM's are small enough so let's skip the # hashing if len(device_id) > MAX_DISPLAY_NAME_LEN or obfuscate: return hashlib.sha1(device_id.encode()).hexdigest() else: return device_id or "N/A" def check_and_truncate(display_name): if (validators.is_attr_set(display_name) and len(display_name) > MAX_DISPLAY_NAME_LEN): LOG.debug("Specified name:'%s' exceeds maximum length. " "It will be truncated on NSX", display_name) return display_name[:MAX_DISPLAY_NAME_LEN] return display_name or '' def normalize_xml(data): data = data.encode('ascii', 'ignore') return et.fromstring(data) def _get_bad_request_error_code(e): """Get the error code out of the exception""" try: desc = normalize_xml(e.response) return int(desc.find('errorCode').text) except Exception: pass def _log_before_retry(retry_state): """Before call strategy that logs to some logger the attempt.""" if retry_state.attempt_number > 1: LOG.warning("Retrying call to '%(func)s' for the %(num)s time", {'func': tenacity_utils.get_callback_name( retry_state.fn), 'num': tenacity_utils.to_ordinal( retry_state.attempt_number)}) def _get_args_from_frame(frames, frame_num): if len(frames) > frame_num and frames[frame_num] and frames[frame_num][0]: # pylint: disable=deprecated-method argvalues = inspect.getargvalues(frames[frame_num][0]) # pylint: disable=deprecated-method formated_args = inspect.formatargvalues(*argvalues) # remove the first 'self' arg from the log as it adds no information formated_args = re.sub(r'\(self=.*?, ', "(", formated_args) return formated_args def _log_after_retry(retry_state): """After call strategy that logs to some logger the finished attempt.""" # Using inspect to get arguments of the relevant call frames = inspect.trace() # Look at frame #2 first because of the internal functions _do_X formated_args = _get_args_from_frame(frames, 2) if not formated_args: formated_args = _get_args_from_frame(frames, 1) if not formated_args: formated_args = "Unknown" LOG.warning("Finished retry of %(func)s for the %(num)s time after " "%(time)0.3f(s) with args: %(args)s", {'func': tenacity_utils.get_callback_name(retry_state.fn), 'num': tenacity_utils.to_ordinal(retry_state.attempt_number), 'time': retry_state.seconds_since_start, 'args': formated_args}) def retry_upon_exception_exclude_error_codes( exc, excluded_errors, delay, max_delay, max_attempts): """Retry with the configured exponential delay, unless the exception error code is in the given list """ def retry_if_not_error_codes(e): # return True only for BadRequests without error codes or with error # codes not in the exclude list if isinstance(e, exc): error_code = _get_bad_request_error_code(e) if error_code and error_code not in excluded_errors: return True return False return tenacity.retry(reraise=True, retry=tenacity.retry_if_exception( retry_if_not_error_codes), wait=tenacity.wait_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry) def retry_upon_exception(exc, delay, max_delay, max_attempts): return tenacity.retry(reraise=True, retry=tenacity.retry_if_exception_type(exc), wait=tenacity.wait_exponential( multiplier=delay, max=max_delay), stop=tenacity.stop_after_attempt(max_attempts), before=_log_before_retry, after=_log_after_retry) def read_file(path): try: with open(path) as file: return file.read().strip() except IOError as e: LOG.error("Error while opening file " "%(path)s: %(err)s", {'path': path, 'err': str(e)}) def get_name_and_uuid(name, uuid, tag=None, maxlen=80): short_uuid = '_' + uuid[:5] + '...' + uuid[-5:] maxlen = maxlen - len(short_uuid) if tag: maxlen = maxlen - len(tag) - 1 return name[:maxlen] + '_' + tag + short_uuid else: return name[:maxlen] + short_uuid def is_ipv4_ip_address(addr): def _valid_part(part): try: int_part = int(part) if int_part < 0 or int_part > 255: return False return True except ValueError: return False parts = str(addr).split('.') if len(parts) != 4: return False for ip_part in parts: if not _valid_part(ip_part): return False return True def is_port_dhcp_configurable(port): owner = port.get('device_owner') return (owner and not owner.startswith(constants.DEVICE_OWNER_NETWORK_PREFIX)) def spawn_n(func, *args, **kwargs): """Passthrough method for eventlet.spawn_n. This utility exists so that it can be stubbed for testing without interfering with the service spawns. It will also grab the context from the threadlocal store and add it to the store on the new thread. This allows for continuity in logging the context when using this method to spawn a new thread. """ _context = common_context.get_current() @functools.wraps(func) def context_wrapper(*args, **kwargs): # NOTE: If update_store is not called after spawn_n it won't be # available for the logger to pull from threadlocal storage. if _context is not None: _context.update_store() func(*args, **kwargs) eventlet.spawn_n(context_wrapper, *args, **kwargs) def raise_if_updates_provider_attributes(attrs): """Raise exception if provider attributes are present. This method is used for plugins that do not support updating provider network attributes. """ if any(validators.is_attr_set(attrs.get(a)) for a in provider_net.ATTRIBUTES): msg = _("Plugin does not support updating provider attributes") raise n_exc.InvalidInput(error_message=msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1862533 vmware-nsx-15.0.1.dev143/vmware_nsx/db/0000755000175000017500000000000000000000000017755 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/__init__.py0000644000175000017500000000000000000000000022054 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/db.py0000644000175000017500000006762700000000000020736 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from sqlalchemy.orm import exc from neutron_lib.db import api as db_api from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import nsx_models LOG = logging.getLogger(__name__) def _apply_filters_to_query(query, model, filters, like_filters=None): if filters: for key, value in six.iteritems(filters): column = getattr(model, key, None) if column: query = query.filter(column.in_(value)) if like_filters: for key, search_term in six.iteritems(like_filters): column = getattr(model, key, None) if column: query = query.filter(column.like(search_term)) return query def get_network_bindings(session, network_id): session = session or db_api.get_reader_session() return (session.query(nsx_models.TzNetworkBinding). filter_by(network_id=network_id). all()) def get_network_bindings_by_phy_uuid(session, phy_uuid): session = session or db_api.get_reader_session() return (session.query(nsx_models.TzNetworkBinding). filter_by(phy_uuid=phy_uuid). all()) def get_network_bindings_by_vlanid_and_physical_net(session, vlan_id, phy_uuid): session = session or db_api.get_reader_session() return (session.query(nsx_models.TzNetworkBinding). filter_by(vlan_id=vlan_id, phy_uuid=phy_uuid). all()) def delete_network_bindings(session, network_id): return (session.query(nsx_models.TzNetworkBinding). filter_by(network_id=network_id).delete()) def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id): with session.begin(subtransactions=True): binding = nsx_models.TzNetworkBinding(network_id, binding_type, phy_uuid, vlan_id) session.add(binding) return binding def add_neutron_nsx_network_mapping(session, neutron_id, nsx_switch_id, dvs_id=None): with session.begin(subtransactions=True): mapping = nsx_models.NeutronNsxNetworkMapping( neutron_id=neutron_id, nsx_id=nsx_switch_id, dvs_id=dvs_id) session.add(mapping) return mapping def delete_neutron_nsx_network_mapping(session, neutron_id): return (session.query(nsx_models.NeutronNsxNetworkMapping). filter_by(neutron_id=neutron_id).delete()) def add_neutron_nsx_port_mapping(session, neutron_id, nsx_switch_id, nsx_port_id): session.begin(subtransactions=True) try: mapping = nsx_models.NeutronNsxPortMapping( neutron_id, nsx_switch_id, nsx_port_id) session.add(mapping) session.commit() except db_exc.DBDuplicateEntry: with excutils.save_and_reraise_exception() as ctxt: session.rollback() # do not complain if the same exact mapping is being added, # otherwise re-raise because even though it is possible for the # same neutron port to map to different back-end ports over time, # this should not occur whilst a mapping already exists current = get_nsx_switch_and_port_id(session, neutron_id) if current[1] == nsx_port_id: LOG.debug("Port mapping for %s already available", neutron_id) ctxt.reraise = False except db_exc.DBError: with excutils.save_and_reraise_exception(): # rollback for any other db error session.rollback() return mapping def add_neutron_nsx_router_mapping(session, neutron_id, nsx_router_id): with session.begin(subtransactions=True): mapping = nsx_models.NeutronNsxRouterMapping( neutron_id=neutron_id, nsx_id=nsx_router_id) session.add(mapping) return mapping def add_neutron_nsx_security_group_mapping(session, neutron_id, nsx_id): """Map a Neutron security group to a NSX security profile. :param session: a valid database session object :param neutron_id: a neutron security group identifier :param nsx_id: a nsx security profile identifier """ with session.begin(subtransactions=True): mapping = nsx_models.NeutronNsxSecurityGroupMapping( neutron_id=neutron_id, nsx_id=nsx_id) session.add(mapping) return mapping def get_nsx_service_binding(session, network_id, service_type): try: return session.query(nsx_models.NeutronNsxServiceBinding).filter_by( network_id=network_id, nsx_service_type=service_type).one() except exc.NoResultFound: LOG.debug("NSX %s service not enabled on network %s", service_type, network_id) def add_neutron_nsx_service_binding(session, network_id, port_id, service_type, service_id): """Store enabled NSX services on each Neutron network. :param session: database session object :param network_id: identifier of Neutron network enabling the service :param port_id: identifier of Neutron port providing the service :param service_type: type of NSX service :param service_id: identifier of NSX service """ with session.begin(subtransactions=True): binding = nsx_models.NeutronNsxServiceBinding( network_id=network_id, port_id=port_id, nsx_service_type=service_type, nsx_service_id=service_id) session.add(binding) return binding def delete_neutron_nsx_service_binding(session, network_id, service_type): return session.query(nsx_models.NeutronNsxServiceBinding).filter_by( network_id=network_id, nsx_service_type=service_type).delete() def update_nsx_dhcp_bindings(session, port_id, org_ip, new_ip): try: with session.begin(subtransactions=True): binding = (session.query(nsx_models.NeutronNsxDhcpBinding). filter_by(port_id=port_id, ip_address=org_ip).one()) binding.ip_address = new_ip except exc.NoResultFound: LOG.debug("Binding not found for port %s", port_id) return def get_nsx_dhcp_bindings(session, port_id): return [binding for binding in session.query( nsx_models.NeutronNsxDhcpBinding).filter_by(port_id=port_id)] def get_nsx_dhcp_bindings_by_service(session, service_id): return [binding for binding in session.query( nsx_models.NeutronNsxDhcpBinding).filter_by(nsx_service_id=service_id)] def add_neutron_nsx_dhcp_binding(session, port_id, subnet_id, ip_address, service_id, binding_id): """Store DHCP binding of each Neutron port. :param session: database session object :param port_id: identifier of Neutron port with DHCP binding :param subnet_id: identifier of Neutron subnet for the port :param ip_address: IP address for the port in this subnet. :param service_id: identifier of NSX DHCP service :param binding_id: identifier of NSX DHCP binding """ with session.begin(subtransactions=True): binding = nsx_models.NeutronNsxDhcpBinding( port_id=port_id, subnet_id=subnet_id, ip_address=ip_address, nsx_service_id=service_id, nsx_binding_id=binding_id) session.add(binding) return binding def delete_neutron_nsx_dhcp_binding(session, port_id, binding_id): return session.query(nsx_models.NeutronNsxDhcpBinding).filter_by( port_id=port_id, nsx_binding_id=binding_id).delete() def delete_neutron_nsx_dhcp_bindings_by_service_id(session, service_id): return session.query(nsx_models.NeutronNsxDhcpBinding).filter_by( nsx_service_id=service_id).delete() def get_nsx_switch_ids(session, neutron_id): # This function returns a list of NSX switch identifiers because of # the possibility of chained logical switches return [mapping['nsx_id'] for mapping in session.query(nsx_models.NeutronNsxNetworkMapping).filter_by( neutron_id=neutron_id)] def get_nsx_network_mappings(session, neutron_id): # This function returns a list of NSX switch identifiers because of # the possibility of chained logical switches return session.query(nsx_models.NeutronNsxNetworkMapping).filter_by( neutron_id=neutron_id).all() def get_nsx_switch_id_for_dvs(session, neutron_id, dvs_id): """Retrieve the NSX switch ID for a given DVS ID and neutron network.""" try: mapping = (session.query(nsx_models.NeutronNsxNetworkMapping). filter_by(neutron_id=neutron_id, dvs_id=dvs_id).one()) return mapping['nsx_id'] except exc.NoResultFound: LOG.debug("NSX switch for dvs-id: %s not yet stored in Neutron DB", dvs_id) def get_net_ids(session, nsx_id): return [mapping['neutron_id'] for mapping in get_nsx_network_mapping_for_nsx_id(session, nsx_id)] def get_nsx_network_mapping_for_nsx_id(session, nsx_id): return session.query(nsx_models.NeutronNsxNetworkMapping).filter_by( nsx_id=nsx_id).all() def get_nsx_networks_mapping(session): return session.query(nsx_models.NeutronNsxNetworkMapping).all() def get_nsx_switch_and_port_id(session, neutron_id): try: mapping = (session.query(nsx_models.NeutronNsxPortMapping). filter_by(neutron_id=neutron_id). one()) return mapping['nsx_switch_id'], mapping['nsx_port_id'] except exc.NoResultFound: LOG.debug("NSX identifiers for neutron port %s not yet " "stored in Neutron DB", neutron_id) return None, None def get_nsx_router_id(session, neutron_id): try: mapping = (session.query(nsx_models.NeutronNsxRouterMapping). filter_by(neutron_id=neutron_id).one()) return mapping['nsx_id'] except exc.NoResultFound: LOG.debug("NSX identifiers for neutron router %s not yet " "stored in Neutron DB", neutron_id) def get_neutron_from_nsx_router_id(session, nsx_router_id): try: mapping = (session.query(nsx_models.NeutronNsxRouterMapping). filter_by(nsx_id=nsx_router_id).one()) return mapping['neutron_id'] except exc.NoResultFound: LOG.debug("Couldn't find router with nsx id %s", nsx_router_id) def get_nsx_security_group_id(session, neutron_id, moref=False): """Return the id of a security group in the NSX backend. Note: security groups are called 'security profiles' in NSX """ try: mappings = (session.query(nsx_models.NeutronNsxSecurityGroupMapping). filter_by(neutron_id=neutron_id). all()) for mapping in mappings: if moref and not uuidutils.is_uuid_like(mapping['nsx_id']): return mapping['nsx_id'] if not moref and uuidutils.is_uuid_like(mapping['nsx_id']): return mapping['nsx_id'] except exc.NoResultFound: LOG.debug("NSX identifiers for neutron security group %s not yet " "stored in Neutron DB", neutron_id) return None def _delete_by_neutron_id(session, model, neutron_id): return session.query(model).filter_by(neutron_id=neutron_id).delete() def delete_neutron_nsx_port_mapping(session, neutron_id): return _delete_by_neutron_id( session, nsx_models.NeutronNsxPortMapping, neutron_id) def delete_neutron_nsx_router_mapping(session, neutron_id): return _delete_by_neutron_id( session, nsx_models.NeutronNsxRouterMapping, neutron_id) def unset_default_network_gateways(session): with session.begin(subtransactions=True): session.query(nsx_models.NetworkGateway).update( {nsx_models.NetworkGateway.default: False}) def set_default_network_gateway(session, gw_id): with session.begin(subtransactions=True): gw = (session.query(nsx_models.NetworkGateway). filter_by(id=gw_id).one()) gw['default'] = True def set_multiprovider_network(session, network_id): with session.begin(subtransactions=True): multiprovider_network = nsx_models.MultiProviderNetworks( network_id) session.add(multiprovider_network) return multiprovider_network def is_multiprovider_network(session, network_id): with session.begin(subtransactions=True): return bool( session.query(nsx_models.MultiProviderNetworks).filter_by( network_id=network_id).first()) # NSXv3 L2 Gateway DB methods. def add_l2gw_connection_mapping(session, connection_id, bridge_endpoint_id, port_id): with session.begin(subtransactions=True): mapping = nsx_models.NsxL2GWConnectionMapping( connection_id=connection_id, port_id=port_id, bridge_endpoint_id=bridge_endpoint_id) session.add(mapping) return mapping def get_l2gw_connection_mapping(session, connection_id): try: return (session.query(nsx_models.NsxL2GWConnectionMapping). filter_by(connection_id=connection_id).one()) except exc.NoResultFound: pass # NSXv3 QoS policy id <-> switch Id mapping def add_qos_policy_profile_mapping(session, qos_policy_id, switch_profile_id): with session.begin(subtransactions=True): mapping = nsx_models.QosPolicySwitchProfile( qos_policy_id=qos_policy_id, switch_profile_id=switch_profile_id) session.add(mapping) return mapping def get_switch_profile_by_qos_policy(session, qos_policy_id): try: entry = (session.query(nsx_models.QosPolicySwitchProfile). filter_by(qos_policy_id=qos_policy_id).one()) return entry.switch_profile_id except exc.NoResultFound: raise nsx_exc.NsxQosPolicyMappingNotFound(policy=qos_policy_id) def delete_qos_policy_profile_mapping(session, qos_policy_id): return (session.query(nsx_models.QosPolicySwitchProfile). filter_by(qos_policy_id=qos_policy_id).delete()) # NSXv3 Port Mirror Sessions DB methods. def add_port_mirror_session_mapping(session, tf_id, pm_session_id): with session.begin(subtransactions=True): mapping = nsx_models.NsxPortMirrorSessionMapping( tap_flow_id=tf_id, port_mirror_session_id=pm_session_id) session.add(mapping) return mapping def get_port_mirror_session_mapping(session, tf_id): try: return (session.query(nsx_models.NsxPortMirrorSessionMapping). filter_by(tap_flow_id=tf_id).one()) except exc.NoResultFound: raise nsx_exc.NsxPortMirrorSessionMappingNotFound(tf=tf_id) def delete_port_mirror_session_mapping(session, tf_id): return (session.query(nsx_models.NsxPortMirrorSessionMapping). filter_by(tap_flow_id=tf_id).delete()) @db_api.CONTEXT_WRITER def save_sg_mappings(context, sg_id, nsgroup_id, section_id): context.session.add( nsx_models.NeutronNsxFirewallSectionMapping(neutron_id=sg_id, nsx_id=section_id)) context.session.add( nsx_models.NeutronNsxSecurityGroupMapping(neutron_id=sg_id, nsx_id=nsgroup_id)) def delete_sg_mappings(context, sg_id, nsgroup_id, section_id): context.session.query( nsx_models.NeutronNsxFirewallSectionMapping).filter_by( neutron_id=sg_id, nsx_id=section_id).delete() context.session.query( nsx_models.NeutronNsxSecurityGroupMapping).filter_by( neutron_id=sg_id, nsx_id=nsgroup_id).delete() def get_sg_mappings(session, sg_id, moref=False): nsgroup_mappings = session.query( nsx_models.NeutronNsxSecurityGroupMapping ).filter_by(neutron_id=sg_id).all() nsgroup_mapping = section_mapping = None for mapping in nsgroup_mappings: if moref and not uuidutils.is_uuid_like(mapping['nsx_id']): nsgroup_mapping = mapping['nsx_id'] break if not moref and uuidutils.is_uuid_like(mapping['nsx_id']): nsgroup_mapping = mapping['nsx_id'] break section_mappings = session.query( nsx_models.NeutronNsxFirewallSectionMapping ).filter_by(neutron_id=sg_id).all() for mapping in section_mappings: if moref and not uuidutils.is_uuid_like(mapping['nsx_id']): section_mapping = mapping['nsx_id'] break if not moref and uuidutils.is_uuid_like(mapping['nsx_id']): section_mapping = mapping['nsx_id'] break return nsgroup_mapping, section_mapping def get_sg_rule_mapping(session, rule_id): rule_mapping = session.query( nsx_models.NeutronNsxRuleMapping).filter_by( neutron_id=rule_id).one() return rule_mapping.nsx_id def save_sg_rule_mappings(session, rules): with session.begin(subtransactions=True): for neutron_id, nsx_id in rules: mapping = nsx_models.NeutronNsxRuleMapping( neutron_id=neutron_id, nsx_id=nsx_id) session.add(mapping) def add_nsx_ipam_subnet_pool(session, subnet_id, nsx_pool_id): with session.begin(subtransactions=True): binding = nsx_models.NsxSubnetIpam( subnet_id=subnet_id, nsx_pool_id=nsx_pool_id) session.add(binding) return binding def get_nsx_ipam_pool_for_subnet(session, subnet_id): try: entry = session.query( nsx_models.NsxSubnetIpam).filter_by( subnet_id=subnet_id).one() return entry.nsx_pool_id except exc.NoResultFound: return def del_nsx_ipam_subnet_pool(session, subnet_id, nsx_pool_id): return (session.query(nsx_models.NsxSubnetIpam). filter_by(subnet_id=subnet_id, nsx_pool_id=nsx_pool_id).delete()) def get_certificate(session, purpose): try: cert_entry = session.query( nsx_models.NsxCertificateRepository).filter_by( purpose=purpose).one() return cert_entry.certificate, cert_entry.private_key except exc.NoResultFound: return None, None def save_certificate(session, purpose, cert, pk): with session.begin(subtransactions=True): cert_entry = nsx_models.NsxCertificateRepository( purpose=purpose, certificate=cert, private_key=pk) session.add(cert_entry) def delete_certificate(session, purpose): return (session.query(nsx_models.NsxCertificateRepository). filter_by(purpose=purpose).delete()) def add_nsx_lbaas_loadbalancer_binding(session, loadbalancer_id, lb_service_id, lb_router_id, vip_address): with session.begin(subtransactions=True): binding = nsx_models.NsxLbaasLoadbalancer( loadbalancer_id=loadbalancer_id, lb_service_id=lb_service_id, lb_router_id=lb_router_id, vip_address=vip_address) session.add(binding) return binding def get_nsx_lbaas_loadbalancer_binding(session, loadbalancer_id): try: return session.query( nsx_models.NsxLbaasLoadbalancer).filter_by( loadbalancer_id=loadbalancer_id).one() except exc.NoResultFound: return def update_nsx_lbaas_loadbalancer_binding(session, loadbalancer_id, lb_router_id): with session.begin(subtransactions=True): binding = (session.query(nsx_models.NsxLbaasLoadbalancer). filter_by(loadbalancer_id=loadbalancer_id).one()) binding.lb_router_id = lb_router_id def get_nsx_lbaas_loadbalancer_bindings(session): return session.query(nsx_models.NsxLbaasLoadbalancer).all() def get_nsx_lbaas_loadbalancer_binding_by_service(session, lb_service_id): return session.query( nsx_models.NsxLbaasLoadbalancer).filter_by( lb_service_id=lb_service_id).all() def has_nsx_lbaas_loadbalancer_binding_by_router(session, nsx_router_id): try: bindings = session.query(nsx_models.NsxLbaasLoadbalancer).filter_by( lb_router_id=nsx_router_id).all() return len(bindings) > 0 except exc.NoResultFound: return False def delete_nsx_lbaas_loadbalancer_binding(session, loadbalancer_id): return (session.query(nsx_models.NsxLbaasLoadbalancer). filter_by(loadbalancer_id=loadbalancer_id).delete()) def add_nsx_lbaas_listener_binding(session, loadbalancer_id, listener_id, app_profile_id, lb_vs_id): with session.begin(subtransactions=True): binding = nsx_models.NsxLbaasListener( loadbalancer_id=loadbalancer_id, listener_id=listener_id, app_profile_id=app_profile_id, lb_vs_id=lb_vs_id) session.add(binding) return binding def get_nsx_lbaas_listener_binding(session, loadbalancer_id, listener_id): try: return session.query( nsx_models.NsxLbaasListener).filter_by( loadbalancer_id=loadbalancer_id, listener_id=listener_id).one() except exc.NoResultFound: return def get_nsx_lbaas_listener_binding_by_lb_and_vs(session, loadbalancer_id, lb_vs_id): try: return session.query( nsx_models.NsxLbaasListener).filter_by( loadbalancer_id=loadbalancer_id, lb_vs_id=lb_vs_id).one() except exc.NoResultFound: return def get_nsx_lbaas_listener_binding_by_vs_id(session, lb_vs_id): try: return session.query( nsx_models.NsxLbaasListener).filter_by( lb_vs_id=lb_vs_id).one() except exc.NoResultFound: return def delete_nsx_lbaas_listener_binding(session, loadbalancer_id, listener_id): return (session.query(nsx_models.NsxLbaasListener). filter_by(loadbalancer_id=loadbalancer_id, listener_id=listener_id).delete()) def add_nsx_lbaas_pool_binding(session, loadbalancer_id, pool_id, lb_pool_id, lb_vs_id=None): with session.begin(subtransactions=True): binding = nsx_models.NsxLbaasPool(loadbalancer_id=loadbalancer_id, pool_id=pool_id, lb_pool_id=lb_pool_id, lb_vs_id=lb_vs_id) session.add(binding) return binding def get_nsx_lbaas_pool_binding(session, loadbalancer_id, pool_id): try: return session.query(nsx_models.NsxLbaasPool).filter_by( loadbalancer_id=loadbalancer_id, pool_id=pool_id).one() except exc.NoResultFound: return def get_nsx_lbaas_pool_binding_by_lb_pool(session, loadbalancer_id, lb_pool_id): try: return session.query(nsx_models.NsxLbaasPool).filter_by( loadbalancer_id=loadbalancer_id, lb_pool_id=lb_pool_id).one() except exc.NoResultFound: return def update_nsx_lbaas_pool_binding(session, loadbalancer_id, pool_id, lb_vs_id): try: with session.begin(subtransactions=True): binding = (session.query(nsx_models.NsxLbaasPool). filter_by(loadbalancer_id=loadbalancer_id, pool_id=pool_id).one()) binding.lb_vs_id = lb_vs_id except exc.NoResultFound: LOG.debug("Binding not found for pool %s", pool_id) return def delete_nsx_lbaas_pool_binding(session, loadbalancer_id, pool_id): return (session.query(nsx_models.NsxLbaasPool). filter_by(loadbalancer_id=loadbalancer_id, pool_id=pool_id).delete()) def add_nsx_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id, lb_monitor_id, lb_pool_id): with session.begin(subtransactions=True): binding = nsx_models.NsxLbaasMonitor( loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id, lb_monitor_id=lb_monitor_id, lb_pool_id=lb_pool_id) session.add(binding) return binding def get_nsx_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id): try: return session.query(nsx_models.NsxLbaasMonitor).filter_by( loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id).one() except exc.NoResultFound: return def delete_nsx_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id): return (session.query(nsx_models.NsxLbaasMonitor). filter_by(loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id).delete()) def add_nsx_lbaas_l7policy_binding(session, l7policy_id, lb_rule_id, lb_vs_id): with session.begin(subtransactions=True): binding = nsx_models.NsxLbaasL7Policy( l7policy_id=l7policy_id, lb_rule_id=lb_rule_id, lb_vs_id=lb_vs_id) session.add(binding) return binding def get_nsx_lbaas_l7policy_binding(session, l7policy_id): try: return session.query(nsx_models.NsxLbaasL7Policy).filter_by( l7policy_id=l7policy_id).one() except exc.NoResultFound: return def delete_nsx_lbaas_l7policy_binding(session, l7policy_id): return (session.query(nsx_models.NsxLbaasL7Policy). filter_by(l7policy_id=l7policy_id).delete()) def add_project_plugin_mapping(session, project, plugin): with session.begin(subtransactions=True): binding = nsx_models.NsxProjectPluginMapping( project=project, plugin=plugin) session.add(binding) return binding def get_project_plugin_mapping(session, project): try: return session.query(nsx_models.NsxProjectPluginMapping).filter_by( project=project).one() except exc.NoResultFound: return def get_project_plugin_mappings(session): return session.query(nsx_models.NsxProjectPluginMapping).all() def get_project_plugin_mappings_by_plugin(session, plugin): return session.query(nsx_models.NsxProjectPluginMapping).filter_by( plugin=plugin).all() def update_project_plugin_mapping(session, project, plugin): with session.begin(subtransactions=True): binding = (session.query(nsx_models.NsxProjectPluginMapping). filter_by(project=project).one()) binding.plugin = plugin def add_nsx_vpn_connection_mapping(session, neutron_id, session_id, dpd_profile_id, ike_profile_id, ipsec_profile_id, peer_ep_id): with session.begin(subtransactions=True): mapping = nsx_models.NsxVpnConnectionMapping( neutron_id=neutron_id, session_id=session_id, dpd_profile_id=dpd_profile_id, ike_profile_id=ike_profile_id, ipsec_profile_id=ipsec_profile_id, peer_ep_id=peer_ep_id) session.add(mapping) return mapping def get_nsx_vpn_connection_mapping(session, neutron_id): try: return (session.query(nsx_models.NsxVpnConnectionMapping). filter_by(neutron_id=neutron_id).one()) except exc.NoResultFound: return def delete_nsx_vpn_connection_mapping(session, neutron_id): return (session.query(nsx_models.NsxVpnConnectionMapping). filter_by(neutron_id=neutron_id).delete()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/distributedrouter.py0000644000175000017500000000175700000000000024124 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import dvr as dvr_apidef from vmware_nsx.db import nsxrouter class DistributedRouter_mixin(nsxrouter.NsxRouterMixin): """Mixin class to enable distributed router support.""" nsx_attributes = ( nsxrouter.NsxRouterMixin.nsx_attributes + [{ 'name': dvr_apidef.DISTRIBUTED, 'default': False }]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/extended_security_group.py0000644000175000017500000004367000000000000025304 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from sqlalchemy import sql from neutron.db.models import securitygroup as securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron_lib.api.definitions import port as port_def from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_constants from neutron_lib.db import api as db_api from neutron_lib.db import model_base from neutron_lib.db import resource_extend from neutron_lib.objects import registry as obj_reg from neutron_lib.utils import helpers from neutron_lib.utils import net as n_utils from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.extensions import securitygrouplogging as sg_logging from vmware_nsx.extensions import securitygrouppolicy as sg_policy LOG = logging.getLogger(__name__) class NsxExtendedSecurityGroupProperties(model_base.BASEV2): __tablename__ = 'nsx_extended_security_group_properties' security_group_id = sa.Column(sa.String(36), sa.ForeignKey('securitygroups.id', ondelete="CASCADE"), primary_key=True) logging = sa.Column(sa.Boolean, default=False, nullable=False) provider = sa.Column(sa.Boolean, default=False, server_default=sql.false(), nullable=False) policy = sa.Column(sa.String(36)) security_group = orm.relationship( securitygroups_db.SecurityGroup, backref=orm.backref('ext_properties', lazy='joined', uselist=False, cascade='delete')) @resource_extend.has_resource_extenders class ExtendedSecurityGroupPropertiesMixin(object): # NOTE(arosen): here we add a relationship so that from the ports model # it provides us access to SecurityGroupPortBinding and # NsxExtendedSecurityGroupProperties securitygroups_db.SecurityGroupPortBinding.extended_grp = orm.relationship( 'NsxExtendedSecurityGroupProperties', foreign_keys="SecurityGroupPortBinding.security_group_id", primaryjoin=("NsxExtendedSecurityGroupProperties.security_group_id" "==SecurityGroupPortBinding.security_group_id")) def create_provider_security_group(self, context, security_group): return self.create_security_group_without_rules( context, security_group, False, True) def create_security_group_without_rules(self, context, security_group, default_sg, is_provider): """Create a neutron security group, without any default rules. This method creates a security group that does not by default enable egress traffic which normal neutron security groups do. """ s = security_group['security_group'] kwargs = { 'context': context, 'security_group': s, 'is_default': default_sg, } self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE, exc_cls=ext_sg.SecurityGroupConflict, payload=events.DBEventPayload( context, metadata={'is_default': default_sg}, request_body=security_group, desired_state=s)) tenant_id = s['tenant_id'] if not default_sg: self._ensure_default_security_group(context, tenant_id) with db_api.CONTEXT_WRITER.using(context): sg = obj_reg.new_instance( 'SecurityGroup', context, id=s.get('id') or uuidutils.generate_uuid(), description=s.get('description', ''), project_id=tenant_id, name=s.get('name', ''), is_default=default_sg) # Note(asarfaty): for unknown reason, removing the 'is_default' # here allows the loading of the ext_properties of the security # group. If not - we will get DetachedInstanceError if 'is_default' in sg.fields_no_update: sg.fields_no_update.remove('is_default') sg.create() secgroup_dict = self._make_security_group_dict(sg) secgroup_dict[sg_policy.POLICY] = s.get(sg_policy.POLICY) secgroup_dict[provider_sg.PROVIDER] = is_provider kwargs['security_group'] = secgroup_dict registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self, **kwargs) return secgroup_dict def _process_security_group_properties_create(self, context, sg_res, sg_req, default_sg=False): self._validate_security_group_properties_create( context, sg_req, default_sg) with db_api.CONTEXT_WRITER.using(context): properties = NsxExtendedSecurityGroupProperties( security_group_id=sg_res['id'], logging=sg_req.get(sg_logging.LOGGING, False), provider=sg_req.get(provider_sg.PROVIDER, False), policy=sg_req.get(sg_policy.POLICY)) context.session.add(properties) sg_res[sg_logging.LOGGING] = sg_req.get(sg_logging.LOGGING, False) sg_res[provider_sg.PROVIDER] = sg_req.get(provider_sg.PROVIDER, False) sg_res[sg_policy.POLICY] = sg_req.get(sg_policy.POLICY) def _get_security_group_properties(self, context, security_group_id): with db_api.CONTEXT_READER.using(context): try: prop = context.session.query( NsxExtendedSecurityGroupProperties).filter_by( security_group_id=security_group_id).one() except exc.NoResultFound: raise ext_sg.SecurityGroupNotFound(id=security_group_id) return prop def _process_security_group_properties_update(self, context, sg_res, sg_req): if ((sg_logging.LOGGING in sg_req and (sg_req[sg_logging.LOGGING] != sg_res.get(sg_logging.LOGGING, False))) or (sg_policy.POLICY in sg_req and (sg_req[sg_policy.POLICY] != sg_res.get(sg_policy.POLICY)))): with db_api.CONTEXT_WRITER.using(context): prop = context.session.query( NsxExtendedSecurityGroupProperties).filter_by( security_group_id=sg_res['id']).one() prop.logging = sg_req.get(sg_logging.LOGGING, False) prop.policy = sg_req.get(sg_policy.POLICY) sg_res[sg_logging.LOGGING] = sg_req.get(sg_logging.LOGGING, False) sg_res[sg_policy.POLICY] = sg_req.get(sg_policy.POLICY) def _is_security_group_logged(self, context, security_group_id): prop = self._get_security_group_properties(context, security_group_id) return prop.logging def _is_provider_security_group(self, context, security_group_id): sg_prop = self._get_security_group_properties(context, security_group_id) return sg_prop.provider def _is_policy_security_group(self, context, security_group_id): sg_prop = self._get_security_group_properties(context, security_group_id) return True if sg_prop.policy else False def _get_security_group_policy(self, context, security_group_id): sg_prop = self._get_security_group_properties(context, security_group_id) return sg_prop.policy def _check_provider_security_group_exists(self, context, security_group_id): # NOTE(roeyc): We want to retrieve the security-group info by calling # get_security_group, this will also validate that the provider # security-group belongs to the same tenant this request is made for. sg = self.get_security_group(context, security_group_id) if not sg[provider_sg.PROVIDER]: raise provider_sg.SecurityGroupNotProvider(id=security_group_id) def _check_invalid_security_groups_specified(self, context, port, only_warn=False): """Check if the lists of security groups are valid When only_warn is True we do not raise an exception here, because this may fail nova boot. Instead we will later remove provider security groups from the regular security groups list of the port. Since all the provider security groups of the tenant will be on this list anyway, the result will be the same. """ if validators.is_attr_set(port.get(ext_sg.SECURITYGROUPS)): for sg in port.get(ext_sg.SECURITYGROUPS, []): # makes sure user doesn't add non-provider secgrp as secgrp if self._is_provider_security_group(context, sg): if only_warn: LOG.warning( "Ignored provider security group %(sg)s in " "security groups list for port %(id)s", {'sg': sg, 'id': port['id']}) else: raise provider_sg.SecurityGroupIsProvider(id=sg) if validators.is_attr_set( port.get(provider_sg.PROVIDER_SECURITYGROUPS)): # also check all provider groups are provider. for sg in port.get(provider_sg.PROVIDER_SECURITYGROUPS, []): self._check_provider_security_group_exists(context, sg) def _get_tenant_provider_security_groups(self, context, tenant_id): res = context.session.query( NsxExtendedSecurityGroupProperties.security_group_id ).join(securitygroups_db.SecurityGroup).filter( securitygroups_db.SecurityGroup.tenant_id == tenant_id, NsxExtendedSecurityGroupProperties.provider == sa.true()).all() return [r[0] for r in res] def _validate_security_group_properties_create(self, context, security_group, default_sg): self._validate_provider_security_group_create(context, security_group, default_sg) def _validate_provider_security_group_create(self, context, security_group, default_sg): if not security_group.get(provider_sg.PROVIDER, False): return if default_sg: raise provider_sg.DefaultSecurityGroupIsNotProvider() def _get_provider_security_groups_on_port(self, context, port): p = port['port'] tenant_id = p['tenant_id'] provider_sgs = p.get(provider_sg.PROVIDER_SECURITYGROUPS, n_constants.ATTR_NOT_SPECIFIED) if p.get('device_owner') and n_utils.is_port_trusted(p): return if not validators.is_attr_set(provider_sgs): if provider_sgs is n_constants.ATTR_NOT_SPECIFIED: provider_sgs = self._get_tenant_provider_security_groups( context, tenant_id) else: # Accept None as indication that this port should not be # associated with any provider security-group. provider_sgs = [] return provider_sgs def _get_port_security_groups_lists(self, context, port): """Return 2 lists of this port security groups: 1) Regular security groups for this port 2) Provider security groups for this port """ port_data = port['port'] # First check that the configuration is valid self._check_invalid_security_groups_specified( context, port_data, only_warn=True) # get the 2 separate lists of security groups sgids = self._get_security_groups_on_port( context, port) or [] psgids = self._get_provider_security_groups_on_port( context, port) or [] had_sgs = len(sgids) > 0 # remove provider security groups which were specified also in the # regular sg list sgids = list(set(sgids) - set(psgids)) if not len(sgids) and had_sgs: # Add the default sg of the tenant if no other remained tenant_id = port_data.get('tenant_id') default_sg = self._ensure_default_security_group( context, tenant_id) sgids.append(default_sg) return (sgids, psgids) def _process_port_create_provider_security_group(self, context, p, security_group_ids): if validators.is_attr_set(security_group_ids): for security_group_id in security_group_ids: self._create_port_security_group_binding(context, p['id'], security_group_id) p[provider_sg.PROVIDER_SECURITYGROUPS] = security_group_ids or [] def _process_port_update_provider_security_group(self, context, port, original_port, updated_port): p = port['port'] provider_sg_specified = (provider_sg.PROVIDER_SECURITYGROUPS in p and p[provider_sg.PROVIDER_SECURITYGROUPS] != n_constants.ATTR_NOT_SPECIFIED) provider_sg_changed = ( provider_sg_specified and not helpers.compare_elements( original_port.get(provider_sg.PROVIDER_SECURITYGROUPS, []), p[provider_sg.PROVIDER_SECURITYGROUPS])) sg_changed = ( set(original_port[ext_sg.SECURITYGROUPS]) != set(updated_port[ext_sg.SECURITYGROUPS])) if sg_changed or provider_sg_changed: self._check_invalid_security_groups_specified(context, p) if provider_sg_changed: port['port']['tenant_id'] = original_port['id'] port['port']['id'] = original_port['id'] updated_port[provider_sg.PROVIDER_SECURITYGROUPS] = ( self._get_provider_security_groups_on_port(context, port)) else: updated_port[provider_sg.PROVIDER_SECURITYGROUPS] = ( original_port.get(provider_sg.PROVIDER_SECURITYGROUPS, [])) if provider_sg_changed or sg_changed: if not sg_changed: query = context.session.query( securitygroups_db.SecurityGroupPortBinding) for sg in original_port[provider_sg.PROVIDER_SECURITYGROUPS]: binding = query.filter_by( port_id=p['id'], security_group_id=sg).one() context.session.delete(binding) self._process_port_create_provider_security_group( context, updated_port, updated_port[provider_sg.PROVIDER_SECURITYGROUPS]) return provider_sg_changed def _prevent_non_admin_edit_provider_sg(self, context, sg_id): # Only someone who is an admin is allowed to modify a provider sg. if not context.is_admin and self._is_provider_security_group(context, sg_id): raise provider_sg.ProviderSecurityGroupEditNotAdmin(id=sg_id) def _prevent_non_admin_delete_policy_sg(self, context, sg_id): # Only someone who is an admin is allowed to delete this. if not context.is_admin and self._is_policy_security_group(context, sg_id): raise sg_policy.PolicySecurityGroupDeleteNotAdmin(id=sg_id) @staticmethod @resource_extend.extends([ext_sg.SECURITYGROUPS]) def _extend_security_group_with_properties(sg_res, sg_db): if sg_db.ext_properties: sg_res[sg_logging.LOGGING] = sg_db.ext_properties.logging sg_res[provider_sg.PROVIDER] = sg_db.ext_properties.provider sg_res[sg_policy.POLICY] = sg_db.ext_properties.policy @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_dict_provider_security_group(port_res, port_db): # Add the provider sg list to the port. # later we will remove those from the regular sg list provider_groups = [] for sec_group_mapping in port_db.security_groups: if (sec_group_mapping.extended_grp and sec_group_mapping.extended_grp.provider is True): provider_groups.append(sec_group_mapping['security_group_id']) port_res[provider_sg.PROVIDER_SECURITYGROUPS] = provider_groups return port_res @staticmethod def _remove_provider_security_groups_from_list(port_res): # Remove provider security groups from the list of regular security # groups of the result port if (ext_sg.SECURITYGROUPS not in port_res or provider_sg.PROVIDER_SECURITYGROUPS not in port_res): return port_res[ext_sg.SECURITYGROUPS] = list( set(port_res[ext_sg.SECURITYGROUPS]) - set(port_res[provider_sg.PROVIDER_SECURITYGROUPS])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/extended_security_group_rule.py0000644000175000017500000000772300000000000026332 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from sqlalchemy.orm import exc from neutron.db.models import securitygroup from neutron.extensions import securitygroup as ext_sg from neutron_lib.api import validators from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ from vmware_nsx.extensions import secgroup_rule_local_ip_prefix as ext_local_ip class NotIngressRule(nexception.BadRequest): message = _("Specifying local_ip_prefix is supported " "with ingress rules only.") class NsxExtendedSecurityGroupRuleProperties(model_base.BASEV2): """Persist security group rule properties for the extended-security-group-rule extension. """ __tablename__ = 'nsx_extended_security_group_rule_properties' rule_id = sa.Column(sa.String(36), sa.ForeignKey('securitygrouprules.id', ondelete='CASCADE'), primary_key=True, nullable=False) local_ip_prefix = sa.Column(sa.String(255), nullable=False) rule = orm.relationship( securitygroup.SecurityGroupRule, backref=orm.backref('ext_properties', lazy='joined', uselist=False, cascade='delete')) @resource_extend.has_resource_extenders class ExtendedSecurityGroupRuleMixin(object): def _check_local_ip_prefix(self, context, rule): rule_specify_local_ip_prefix = validators.is_attr_set( rule.get(ext_local_ip.LOCAL_IP_PREFIX)) if rule_specify_local_ip_prefix and rule['direction'] != 'ingress': raise NotIngressRule() if not rule_specify_local_ip_prefix: # remove ATTR_NOT_SPECIFIED rule[ext_local_ip.LOCAL_IP_PREFIX] = None return rule_specify_local_ip_prefix def _process_security_group_rule_properties(self, context, rule_res, rule_req): rule_res[ext_local_ip.LOCAL_IP_PREFIX] = None if not validators.is_attr_set( rule_req.get(ext_local_ip.LOCAL_IP_PREFIX)): return with db_api.CONTEXT_WRITER.using(context): properties = NsxExtendedSecurityGroupRuleProperties( rule_id=rule_res['id'], local_ip_prefix=rule_req[ext_local_ip.LOCAL_IP_PREFIX]) context.session.add(properties) rule_res[ext_local_ip.LOCAL_IP_PREFIX] = ( rule_req[ext_local_ip.LOCAL_IP_PREFIX]) @staticmethod @resource_extend.extends([ext_sg.SECURITYGROUPRULES]) def _extend_security_group_rule_with_params(sg_rule_res, sg_rule_db): if sg_rule_db.ext_properties: sg_rule_res[ext_local_ip.LOCAL_IP_PREFIX] = ( sg_rule_db.ext_properties.local_ip_prefix) else: sg_rule_res[ext_local_ip.LOCAL_IP_PREFIX] = None def _get_security_group_rule_local_ip(self, context, rule_id): with db_api.CONTEXT_READER.using(context): try: prop = context.session.query( NsxExtendedSecurityGroupRuleProperties).filter_by( rule_id=rule_id).one() except exc.NoResultFound: return False return prop[ext_local_ip.LOCAL_IP_PREFIX] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/lsn_db.py0000644000175000017500000000772700000000000021605 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.db import api as db_api from oslo_db import exception as d_exc from oslo_log import log as logging from sqlalchemy import orm from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as p_exc from vmware_nsx.db import nsx_models LOG = logging.getLogger(__name__) def lsn_add(context, network_id, lsn_id): """Add Logical Service Node information to persistent datastore.""" with db_api.CONTEXT_WRITER.using(context): lsn = nsx_models.Lsn(network_id, lsn_id) context.session.add(lsn) def lsn_remove(context, lsn_id): """Remove Logical Service Node information from datastore given its id.""" with db_api.CONTEXT_WRITER.using(context): context.session.query(nsx_models.Lsn).filter_by(lsn_id=lsn_id).delete() def lsn_remove_for_network(context, network_id): """Remove information about the Logical Service Node given its network.""" with db_api.CONTEXT_WRITER.using(context): context.session.query(nsx_models.Lsn).filter_by( net_id=network_id).delete() def lsn_get_for_network(context, network_id, raise_on_err=True): """Retrieve LSN information given its network id.""" query = context.session.query(nsx_models.Lsn) try: return query.filter_by(net_id=network_id).one() except (orm.exc.NoResultFound, d_exc.DBError): msg = _('Unable to find Logical Service Node for network %s') if raise_on_err: LOG.error(msg, network_id) raise p_exc.LsnNotFound(entity='network', entity_id=network_id) else: LOG.warning(msg, network_id) def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id): """Add Logical Service Node Port information to persistent datastore.""" with db_api.CONTEXT_WRITER.using(context): lsn_port = nsx_models.LsnPort(lsn_port_id, subnet_id, mac, lsn_id) context.session.add(lsn_port) def lsn_port_get_for_subnet(context, subnet_id, raise_on_err=True): """Return Logical Service Node Port information given its subnet id.""" with db_api.CONTEXT_READER.using(context): try: return (context.session.query(nsx_models.LsnPort). filter_by(sub_id=subnet_id).one()) except (orm.exc.NoResultFound, d_exc.DBError): if raise_on_err: raise p_exc.LsnPortNotFound(lsn_id=None, entity='subnet', entity_id=subnet_id) def lsn_port_get_for_mac(context, mac_address, raise_on_err=True): """Return Logical Service Node Port information given its mac address.""" with db_api.CONTEXT_READER.using(context): try: return (context.session.query(nsx_models.LsnPort). filter_by(mac_addr=mac_address).one()) except (orm.exc.NoResultFound, d_exc.DBError): if raise_on_err: raise p_exc.LsnPortNotFound(lsn_id=None, entity='mac', entity_id=mac_address) def lsn_port_remove(context, lsn_port_id): """Remove Logical Service Node port from the given Logical Service Node.""" with db_api.CONTEXT_WRITER.using(context): (context.session.query(nsx_models.LsnPort). filter_by(lsn_port_id=lsn_port_id).delete()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/maclearning.py0000644000175000017500000000564000000000000022614 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from sqlalchemy.orm import exc from neutron_lib.api.definitions import port as port_def from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from oslo_log import log as logging from vmware_nsx.db import nsx_models from vmware_nsx.extensions import maclearning as mac LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class MacLearningDbMixin(object): """Mixin class for mac learning.""" def _make_mac_learning_state_dict(self, port, fields=None): res = {'port_id': port['port_id'], mac.MAC_LEARNING: port[mac.MAC_LEARNING]} return db_utils.resource_fields(res, fields) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_mac_learning_state(port_res, port_db): state = port_db.mac_learning_state if state: port_res[mac.MAC_LEARNING] = state.mac_learning_enabled def _update_mac_learning_state(self, context, port_id, enabled): try: query = model_query.query_with_hooks( context, nsx_models.MacLearningState) state = query.filter( nsx_models.MacLearningState.port_id == port_id).one() state.update({mac.MAC_LEARNING: enabled}) except exc.NoResultFound: self._create_mac_learning_state(context, {'id': port_id, mac.MAC_LEARNING: enabled}) def _create_mac_learning_state(self, context, port): with db_api.CONTEXT_WRITER.using(context): enabled = port[mac.MAC_LEARNING] state = nsx_models.MacLearningState( port_id=port['id'], mac_learning_enabled=enabled) context.session.add(state) return self._make_mac_learning_state_dict(state) def get_mac_learning_state(self, context, port_id): try: query = model_query.query_with_hooks( context, nsx_models.MacLearningState) state = query.filter( nsx_models.MacLearningState.port_id == port_id).one() return state.mac_learning_enabled except exc.NoResultFound: return None ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1862533 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/0000755000175000017500000000000000000000000021746 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/__init__.py0000644000175000017500000000000000000000000024045 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1862533 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/0000755000175000017500000000000000000000000025576 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/__init__.py0000644000175000017500000000120400000000000027704 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. VERSION_TABLE = 'vmware_alembic_version' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/env.py0000644000175000017500000000667700000000000026760 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as logging_config from alembic import context from neutron_lib.db import model_base from oslo_config import cfg from oslo_db.sqlalchemy import session import sqlalchemy as sa from sqlalchemy import event from neutron.db.migration.alembic_migrations import external from neutron.db.migration.models import head # noqa from vmware_nsx.db.migration import alembic_migrations MYSQL_ENGINE = None # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config neutron_config = config.neutron_config # Interpret the config file for Python logging. # This line sets up loggers basically. logging_config.fileConfig(config.config_file_name) # set the target for 'autogenerate' support target_metadata = model_base.BASEV2.metadata def set_mysql_engine(): try: mysql_engine = neutron_config.command.mysql_engine except cfg.NoSuchOptError: mysql_engine = None global MYSQL_ENGINE MYSQL_ENGINE = (mysql_engine or model_base.BASEV2.__table_args__['mysql_engine']) def include_object(object, name, type_, reflected, compare_to): if (type_ == 'table' and name in set(external.TABLES) - set(external.REPO_VMWARE_TABLES)): return False else: return True def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with either a URL or an Engine. Calls to context.execute() here emit the given string to the script output. """ set_mysql_engine() kwargs = dict() if neutron_config.database.connection: kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine kwargs['include_object'] = include_object kwargs['version_table'] = alembic_migrations.VERSION_TABLE context.configure(**kwargs) with context.begin_transaction(): context.run_migrations() @event.listens_for(sa.Table, 'after_parent_attach') def set_storage_engine(target, parent): if MYSQL_ENGINE: target.kwargs['mysql_engine'] = MYSQL_ENGINE def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ set_mysql_engine() engine = session.create_engine(neutron_config.database.connection) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata, include_object=include_object, version_table=alembic_migrations.VERSION_TABLE ) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() engine.dispose() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/script.py.mako0000644000175000017500000000201400000000000030377 0ustar00coreycorey00000000000000# Copyright ${create_date.year} VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} % if branch_labels: branch_labels = ${repr(branch_labels)} %endif from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1862533 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/0000755000175000017500000000000000000000000027446 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/CONTRACT_HEAD0000644000175000017500000000001500000000000031363 0ustar00coreycorey00000000000000717f7f63a219 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD0000644000175000017500000000001500000000000031125 0ustar00coreycorey0000000000000099bfcb6003c6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/kilo_release.py0000644000175000017500000000153100000000000032456 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """kilo Revision ID: kilo Revises: None Create Date: 2015-04-16 00:00:00.000000 """ # revision identifiers, used by Alembic. revision = 'kilo' down_revision = None def upgrade(): """A no-op migration for marking the Kilo release.""" pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1542525 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/0000755000175000017500000000000000000000000031120 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1862533 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/0000755000175000017500000000000000000000000032735 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000026200000000000011455 xustar0000000000000000156 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/393bf843b96_initial_liberty_no_op_contract_script.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/393bf80000644000175000017500000000164000000000000033577 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Initial Liberty no-op contract script. Revision ID: 393bf843b96 Revises: kilo Create Date: 2015-08-13 07:26:21.891165 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '393bf843b96' down_revision = 'kilo' branch_labels = (cli.CONTRACT_BRANCH,) def upgrade(): pass ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/3c88bdea3054_nsxv_vdr_dhcp_binding.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/3c88bd0000644000175000017500000000231000000000000033647 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv_vdr_dhcp_binding.py Revision ID: 3c88bdea3054 Revises: 393bf843b96 Create Date: 2015-09-23 14:59:15.102609 """ from alembic import op from neutron.db import migration # revision identifiers, used by Alembic. revision = '3c88bdea3054' down_revision = '393bf843b96' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.LIBERTY, migration.MITAKA] def upgrade(): op.drop_constraint('unique_nsxv_vdr_dhcp_bindings0dhcp_router_id', 'nsxv_vdr_dhcp_bindings', 'unique') op.drop_column('nsxv_vdr_dhcp_bindings', 'dhcp_router_id') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1902535 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/0000755000175000017500000000000000000000000032377 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000025200000000000011454 xustar0000000000000000148 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/279b70ac3ae8_nsxv3_add_l2gwconnection_table.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/279b70ac0000644000175000017500000000273500000000000033467 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """NSXv3 Add l2gwconnection table Revision ID: 279b70ac3ae8 Revises: 28430956782d Create Date: 2015-08-14 02:04:09.807926 """ from alembic import op import sqlalchemy as sa from neutron.db import migration # revision identifiers, used by Alembic. revision = '279b70ac3ae8' down_revision = '28430956782d' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.LIBERTY] def upgrade(): op.create_table( 'nsx_l2gw_connection_mappings', sa.Column('connection_id', sa.String(length=36), nullable=False), sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('bridge_endpoint_id', sa.String(length=36), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('connection_id'), ) ././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/28430956782d_nsxv3_security_groups.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/284309560000644000175000017500000000312500000000000033247 0ustar00coreycorey00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """nsxv3_security_groups Revision ID: 28430956782d Revises: 53a3254aa95e Create Date: 2015-08-24 18:19:09.397813 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '28430956782d' down_revision = '53a3254aa95e' def upgrade(): op.create_table( 'neutron_nsx_firewall_section_mappings', sa.Column('neutron_id', sa.String(36), nullable=False), sa.Column('nsx_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['neutron_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id')) op.create_table( 'neutron_nsx_rule_mappings', sa.Column('neutron_id', sa.String(36), nullable=False), sa.Column('nsx_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['neutron_id'], ['securitygrouprules.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('neutron_id')) ././@PaxHeader0000000000000000000000000000025700000000000011461 xustar0000000000000000153 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/53a3254aa95e_initial_liberty_no_op_expand_script.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/53a3254a0000644000175000017500000000163600000000000033377 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Initial Liberty no-op expand script. Revision ID: 53a3254aa95e Revises: kilo Create Date: 2015-08-13 06:34:29.842396 """ from neutron.db.migration import cli # revision identifiers, used by Alembic. revision = '53a3254aa95e' down_revision = 'kilo' branch_labels = (cli.EXPAND_BRANCH,) def upgrade(): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1542525 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/0000755000175000017500000000000000000000000030714 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1902535 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/0000755000175000017500000000000000000000000032173 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024500000000000011456 xustar0000000000000000143 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/20483029f1ff_update_tz_network_bindings.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/20483029f0000644000175000017500000000266200000000000033205 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """update nsx_v3 tz_network_bindings_binding_type Revision ID: 20483029f1ff Revises: 69fb78b33d41 Create Date: 2016-02-09 13:57:01.590154 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '20483029f1ff' down_revision = '69fb78b33d41' old_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', name='tz_network_bindings_binding_type') new_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'vxlan', name='tz_network_bindings_binding_type') def upgrade(): op.alter_column( 'tz_network_bindings', 'binding_type', type_=new_tz_binding_type_enum, existing_type=old_tz_binding_type_enum, existing_nullable=False) ././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/2af850eb3970_update_nsxv_tz_binding_type.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/2af850eb30000644000175000017500000000260400000000000033417 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """update nsxv tz binding type Revision ID: 2af850eb3970 Revises: 312211a5725f Create Date: 2015-11-24 13:44:08.664653 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '2af850eb3970' down_revision = '312211a5725f' tz_binding_type_enum = sa.Enum('flat', 'vlan', 'portgroup', name='nsxv_tz_network_bindings_binding_type') new_tz_binding_type_enum = sa.Enum( 'flat', 'vlan', 'portgroup', 'vxlan', name='nsxv_tz_network_bindings_binding_type') def upgrade(): op.alter_column( 'nsxv_tz_network_bindings', 'binding_type', type_=new_tz_binding_type_enum, existing_type=tz_binding_type_enum, existing_nullable=False) ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/312211a5725f_nsxv_lbv2.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/312211a570000644000175000017500000000577600000000000033203 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """nsxv_lbv2 Revision ID: 312211a5725f Revises: 279b70ac3ae8 Create Date: 2015-09-09 02:02:59.990122 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '312211a5725f' down_revision = '279b70ac3ae8' def upgrade(): op.create_table( 'nsxv_lbaas_loadbalancer_bindings', sa.Column('loadbalancer_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_fw_rule_id', sa.String(length=36), nullable=False), sa.Column('vip_address', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('loadbalancer_id')) op.create_table( 'nsxv_lbaas_listener_bindings', sa.Column('loadbalancer_id', sa.String(length=36), nullable=False), sa.Column('listener_id', sa.String(length=36), nullable=False), sa.Column('app_profile_id', sa.String(length=36), nullable=False), sa.Column('vse_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('loadbalancer_id', 'listener_id')) op.create_table( 'nsxv_lbaas_pool_bindings', sa.Column('loadbalancer_id', sa.String(length=36), nullable=False), sa.Column('listener_id', sa.String(length=36), nullable=False), sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('edge_pool_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('loadbalancer_id', 'listener_id', 'pool_id')) op.create_table( 'nsxv_lbaas_monitor_bindings', sa.Column('loadbalancer_id', sa.String(length=36), nullable=False), sa.Column('listener_id', sa.String(length=36), nullable=False), sa.Column('pool_id', sa.String(length=36), nullable=False), sa.Column('hm_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_mon_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('loadbalancer_id', 'listener_id', 'pool_id', 'hm_id', 'edge_id')) op.create_table( 'nsxv_lbaas_certificate_bindings', sa.Column('cert_id', sa.String(length=128), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_cert_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('cert_id', 'edge_id')) ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/4c45bcadccf9_extend_secgroup_rule.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/4c45bcadc0000644000175000017500000000247300000000000033560 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """extend_secgroup_rule Revision ID: 4c45bcadccf9 Revises: 20483029f1ff Create Date: 2016-03-01 06:12:09.450116 """ from alembic import op import sqlalchemy as sa from neutron.db import migration # revision identifiers, used by Alembic. revision = '4c45bcadccf9' down_revision = '20483029f1ff' neutron_milestone = [migration.MITAKA] def upgrade(): op.create_table( 'nsxv_extended_security_group_rule_properties', sa.Column('rule_id', sa.String(36), nullable=False), sa.Column('local_ip_prefix', sa.String(255), nullable=False), sa.ForeignKeyConstraint(['rule_id'], ['securitygrouprules.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('rule_id')) ././@PaxHeader0000000000000000000000000000025400000000000011456 xustar0000000000000000150 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/69fb78b33d41_nsxv_add_search_domain_to_subnets.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/69fb78b330000644000175000017500000000243700000000000033361 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv add dns search domain to subnets Revision ID: 69fb78b33d41 Revises: 2af850eb3970 Create Date: 2016-01-27 07:28:35.369938 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '69fb78b33d41' down_revision = '2af850eb3970' def upgrade(): op.create_table( 'nsxv_subnet_ext_attributes', sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('dns_search_domain', sa.String(length=255), nullable=False), sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('subnet_id') ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1542525 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/0000755000175000017500000000000000000000000030760 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1902535 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/0000755000175000017500000000000000000000000032575 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000025300000000000011455 xustar0000000000000000149 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/081af0e396d7_nsx_extended_rule_table_rename.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/081af0e0000644000175000017500000000176000000000000033570 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv3_secgroup_local_ip_prefix Revision ID: 081af0e396d7 Revises: 5ed1ffbc0d2a Create Date: 2016-03-24 07:11:30.300482 """ from alembic import op # revision identifiers, used by Alembic. revision = '081af0e396d7' down_revision = '5ed1ffbc0d2a' def upgrade(): op.rename_table('nsxv_extended_security_group_rule_properties', 'nsx_extended_security_group_rule_properties') ././@PaxHeader0000000000000000000000000000024700000000000011460 xustar0000000000000000145 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/5ed1ffbc0d2a_nsx_security_group_logging.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/5ed1ffb0000644000175000017500000000503600000000000033740 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """nsxv_security_group_logging Revision ID: 5ed1ffbc0d2a Revises: 3e4dccfe6fb4 Create Date: 2016-03-24 06:06:06.680092 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '5ed1ffbc0d2a' down_revision = '3c88bdea3054' depends_on = ('3e4dccfe6fb4',) def upgrade(): secgroup_prop_table = sa.Table( 'nsx_extended_security_group_properties', sa.MetaData(), sa.Column('security_group_id', sa.String(36), nullable=False), sa.Column('logging', sa.Boolean(), nullable=False)) op.bulk_insert(secgroup_prop_table, get_values()) op.drop_column('nsxv_security_group_section_mappings', 'logging') def get_values(): values = [] session = sa.orm.Session(bind=op.get_bind()) section_mapping_table = sa.Table('nsxv_security_group_section_mappings', sa.MetaData(), sa.Column('neutron_id', sa.String(36)), sa.Column('logging', sa.Boolean(), nullable=False)) secgroup_table = sa.Table('securitygroups', sa.MetaData(), sa.Column('id', sa.String(36))) # If we run NSX-V plugin then we want the current values for security-group # logging, taken from the section mapping table. for row in session.query(section_mapping_table).all(): values.append({'security_group_id': row.neutron_id, 'logging': row.logging}) # If we run NSX-V3 plugin then previous table is empty, since # security-group logging isn't supported on previous versions, we set the # current value to false (the default). if not values: for row in session.query(secgroup_table).all(): values.append({'security_group_id': row.id, 'logging': False}) session.commit() return values ././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/d49ac91b560e_nsxv_lbaasv2_shared_pools.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/d49ac910000644000175000017500000000343700000000000033605 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Support shared pools with NSXv LBaaSv2 driver Revision ID: d49ac91b560e Revises: dbe29d208ac6 Create Date: 2016-07-21 05:03:35.369938 """ from alembic import op from sqlalchemy.engine import reflection from neutron.db import migration # revision identifiers, used by Alembic. revision = 'd49ac91b560e' down_revision = 'dbe29d208ac6' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.NEWTON] def upgrade(): change_pk_constraint('nsxv_lbaas_pool_bindings', ['loadbalancer_id', 'pool_id']) change_pk_constraint('nsxv_lbaas_monitor_bindings', ['loadbalancer_id', 'pool_id', 'hm_id', 'edge_id']) def change_pk_constraint(table_name, columns): inspector = reflection.Inspector.from_engine(op.get_bind()) pk_constraint = inspector.get_pk_constraint(table_name) op.drop_constraint(pk_constraint.get('name'), table_name, type_='primary') op.drop_column(table_name, 'listener_id') op.create_primary_key(None, table_name, columns) ././@PaxHeader0000000000000000000000000000025100000000000011453 xustar0000000000000000147 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/dbe29d208ac6_nsxv_add_dhcp_mtu_to_subnets.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/dbe29d20000644000175000017500000000244500000000000033660 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv add DHCP MTU to subnets Revision ID: dbe29d208ac6 Revises: 081af0e396d7 Create Date: 2016-07-21 05:03:35.369938 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'dbe29d208ac6' down_revision = '081af0e396d7' def upgrade(): # Add a new column and make the previous column nullable, # because it is enough that one of them is non-null op.add_column('nsxv_subnet_ext_attributes', sa.Column('dhcp_mtu', sa.Integer, nullable=True)) op.alter_column('nsxv_subnet_ext_attributes', 'dns_search_domain', nullable=True, existing_type=sa.String(length=255), existing_nullable=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1902535 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/0000755000175000017500000000000000000000000032237 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/1b4eaffe4f31_nsx_provider_security_group.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/1b4eaffe40000644000175000017500000000213500000000000033624 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSX Adds a 'provider' attribute to security-group Revision ID: 1b4eaffe4f31 Revises: 633514d94b93 Create Date: 2016-07-17 11:30:31.263918 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '1b4eaffe4f31' down_revision = '633514d94b93' def upgrade(): op.add_column('nsx_extended_security_group_properties', sa.Column('provider', sa.Boolean(), default=False, server_default=sa.false(), nullable=False)) ././@PaxHeader0000000000000000000000000000024600000000000011457 xustar0000000000000000144 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/2c87aedb206f_nsxv_security_group_logging.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/2c87aedb20000644000175000017500000000201000000000000033534 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """nsxv_security_group_logging Revision ID: 2c87aedb206f Revises: 4c45bcadccf9 Create Date: 2016-03-15 06:06:06.680092 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '2c87aedb206f' down_revision = '4c45bcadccf9' def upgrade(): op.add_column('nsxv_security_group_section_mappings', sa.Column('logging', sa.Boolean(), nullable=False)) ././@PaxHeader0000000000000000000000000000024500000000000011456 xustar0000000000000000143 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/3e4dccfe6fb4_nsx_security_group_logging.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/3e4dccfe60000644000175000017500000000246000000000000033632 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv add dns search domain to subnets Revision ID: 3e4dccfe6fb4 Revises: 2c87aedb206f Create Date: 2016-03-20 07:28:35.369938 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '3e4dccfe6fb4' down_revision = '2c87aedb206f' def upgrade(): op.create_table( 'nsx_extended_security_group_properties', sa.Column('security_group_id', sa.String(36), nullable=False), sa.Column('logging', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['security_group_id'], ['securitygroups.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('security_group_id') ) ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/5e564e781d77_add_nsx_binding_type.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/5e564e7810000644000175000017500000000270100000000000033337 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add nsx binding type Revision ID: 5e564e781d77 Revises: c644ec62c585 Create Date: 2016-06-27 23:58:22.003350 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '5e564e781d77' down_revision = 'c644ec62c585' tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'vxlan', name='tz_network_bindings_binding_type') new_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'vxlan', 'portgroup', name='tz_network_bindings_binding_type') def upgrade(): op.alter_column( 'tz_network_bindings', 'binding_type', type_=new_tz_binding_type_enum, existing_type=tz_binding_type_enum, existing_nullable=False) ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/633514d94b93_add_support_for_taas.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/633514d940000644000175000017500000000216200000000000033251 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add support for TaaS Revision ID: 633514d94b93 Revises: 86a55205337c Create Date: 2016-05-09 14:11:31.940021 """ from alembic import op import sqlalchemy as sa revision = '633514d94b93' down_revision = '86a55205337c' def upgrade(): op.create_table( 'nsx_port_mirror_session_mappings', sa.Column('tap_flow_id', sa.String(length=36), nullable=False), sa.Column('port_mirror_session_id', sa.String(length=36), nullable=False), sa.PrimaryKeyConstraint('tap_flow_id'), ) ././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/6e6da8296c0e_add_nsxv_ipam.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/6e6da82960000644000175000017500000000231100000000000033415 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add support for IPAM in NSXv Revision ID: 6e6da8296c0e Revises: 1b4eaffe4f31 Create Date: 2016-09-01 10:17:16.770021 """ from alembic import op import sqlalchemy as sa revision = '6e6da8296c0e' down_revision = '1b4eaffe4f31' def upgrade(): op.create_table( 'nsxv_subnet_ipam', sa.Column('subnet_id', sa.String(length=36), nullable=False), sa.Column('nsx_pool_id', sa.String(length=36), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('nsx_pool_id'), ) ././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7b5ec3caa9a4_nsxv_fix_az_default.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7b5ec3caa0000644000175000017500000000232100000000000033615 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fix the availability zones default value in the router bindings table Revision ID: 7b5ec3caa9a4 Revises: 6e6da8296c0e Create Date: 2016-09-07 11:38:35.369938 """ from alembic import op from neutron.db import migration # revision identifiers, used by Alembic. revision = '7b5ec3caa9a4' down_revision = '6e6da8296c0e' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.NEWTON] def upgrade(): #previous migration left this column empty instead of 'default' op.execute("UPDATE nsxv_router_bindings SET availability_zone='default' " "where availability_zone is NULL") ././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7e46906f8997_lbaas_foreignkeys.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7e46906f80000644000175000017500000000532000000000000033344 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """lbaas foreignkeys Revision ID: 7e46906f8997 Revises: aede17d51d0f Create Date: 2016-04-21 10:45:32.278433 """ from alembic import op from neutron.db import migration # revision identifiers, used by Alembic. revision = '7e46906f8997' down_revision = 'aede17d51d0f' def upgrade(): if (migration.schema_has_table('lbaas_loadbalancers') and migration.schema_has_table('nsxv_lbaas_loadbalancer_bindings')): op.execute('delete from nsxv_lbaas_loadbalancer_bindings ' 'where loadbalancer_id not in ' '(select id from lbaas_loadbalancers)') op.create_foreign_key( 'fk_lbaas_loadbalancers_id', 'nsxv_lbaas_loadbalancer_bindings', 'lbaas_loadbalancers', ['loadbalancer_id'], ['id'], ondelete='CASCADE') if (migration.schema_has_table('lbaas_listeners') and migration.schema_has_table('nsxv_lbaas_listener_bindings')): op.execute('delete from nsxv_lbaas_listener_bindings ' 'where listener_id not in ' '(select id from lbaas_listeners)') op.create_foreign_key( 'fk_lbaas_listeners_id', 'nsxv_lbaas_listener_bindings', 'lbaas_listeners', ['listener_id'], ['id'], ondelete='CASCADE') if (migration.schema_has_table('lbaas_pools') and migration.schema_has_table('nsxv_lbaas_pool_bindings')): op.execute('delete from nsxv_lbaas_pool_bindings ' 'where pool_id not in (select id from lbaas_pools)') op.create_foreign_key( 'fk_lbaas_pools_id', 'nsxv_lbaas_pool_bindings', 'lbaas_pools', ['pool_id'], ['id'], ondelete='CASCADE') if (migration.schema_has_table('lbaas_healthmonitors') and migration.schema_has_table('nsxv_lbaas_monitor_bindings')): op.execute('delete from nsxv_lbaas_monitor_bindings ' 'where hm_id not in (select id from lbaas_healthmonitors)') op.create_foreign_key( 'fk_lbaas_healthmonitors_id', 'nsxv_lbaas_monitor_bindings', 'lbaas_healthmonitors', ['hm_id'], ['id'], ondelete='CASCADE') ././@PaxHeader0000000000000000000000000000026000000000000011453 xustar0000000000000000154 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/86a55205337c_nsxv_availability_zone_router_mapping.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/86a5520530000644000175000017500000000236500000000000033252 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv add availability zone to the router bindings table instead of the resource pool column Revision ID: 86a55205337c Revises: 7e46906f8997 Create Date: 2016-07-12 09:18:44.450116 """ from alembic import op import sqlalchemy as sa from vmware_nsx.common import config # noqa # revision identifiers, used by Alembic. revision = '86a55205337c' down_revision = '7e46906f8997' def upgrade(): op.alter_column('nsxv_router_bindings', 'resource_pool', new_column_name='availability_zone', existing_type=sa.String(36), existing_nullable=True, existing_server_default='default') ././@PaxHeader0000000000000000000000000000025000000000000011452 xustar0000000000000000146 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/967462f585e1_add_dvs_id_to_switch_mappings.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/967462f580000644000175000017500000000201300000000000033262 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add dvs_id column to neutron_nsx_network_mappings Revision ID: 967462f585e1 Revises: 3e4dccfe6fb4 Create Date: 2016-02-23 18:22:01.998540 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '967462f585e1' down_revision = '3e4dccfe6fb4' def upgrade(): op.add_column('neutron_nsx_network_mappings', sa.Column('dvs_id', sa.String(36), nullable=True)) ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/aede17d51d0f_timestamps.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/aede17d510000644000175000017500000000476700000000000033560 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """add timestamp Revision ID: aede17d51d0f Revises: 5e564e781d77 Create Date: 2016-04-21 10:45:32.278433 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'aede17d51d0f' down_revision = '5e564e781d77' tables = [ 'nsxv_router_bindings', 'nsxv_edge_vnic_bindings', 'nsxv_edge_dhcp_static_bindings', 'nsxv_internal_networks', 'nsxv_internal_edges', 'nsxv_security_group_section_mappings', 'nsxv_rule_mappings', 'nsxv_port_vnic_mappings', 'nsxv_router_ext_attributes', 'nsxv_tz_network_bindings', 'nsxv_port_index_mappings', 'nsxv_firewall_rule_bindings', 'nsxv_spoofguard_policy_network_mappings', 'nsxv_vdr_dhcp_bindings', 'nsxv_lbaas_loadbalancer_bindings', 'nsxv_lbaas_listener_bindings', 'nsxv_lbaas_pool_bindings', 'nsxv_lbaas_monitor_bindings', 'nsxv_lbaas_certificate_bindings', 'nsxv_subnet_ext_attributes', 'tz_network_bindings', 'neutron_nsx_network_mappings', 'neutron_nsx_security_group_mappings', 'neutron_nsx_firewall_section_mappings', 'neutron_nsx_rule_mappings', 'neutron_nsx_port_mappings', 'neutron_nsx_router_mappings', 'neutron_nsx_service_bindings', 'neutron_nsx_dhcp_bindings', 'multi_provider_networks', 'networkconnections', 'networkgatewaydevicereferences', 'networkgatewaydevices', 'networkgateways', 'maclearningstates', 'lsn_port', 'lsn', 'qosqueues', 'portqueuemappings', 'networkqueuemappings', 'nsx_l2gw_connection_mappings', 'neutron_nsx_qos_policy_mappings', 'vcns_router_bindings'] def upgrade(): for table in tables: op.add_column( table, sa.Column(u'created_at', sa.DateTime(), nullable=True) ) op.add_column( table, sa.Column(u'updated_at', sa.DateTime(), nullable=True) ) ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/b7f41687cbad_nsxv3_qos_policy_mapping.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/b7f41687c0000644000175000017500000000236300000000000033421 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv3_qos_policy_mapping Revision ID: b7f41687cbad Revises: 967462f585e1 Create Date: 2016-03-17 06:12:09.450116 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'b7f41687cbad' down_revision = '967462f585e1' def upgrade(): op.create_table( 'neutron_nsx_qos_policy_mappings', sa.Column('qos_policy_id', sa.String(36), nullable=False), sa.Column('switch_profile_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['qos_policy_id'], ['qos_policies.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('qos_policy_id')) ././@PaxHeader0000000000000000000000000000026300000000000011456 xustar0000000000000000157 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c288bb6a7252_nsxv_add_resource_pool_to_router_mapping.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c288bb6a70000644000175000017500000000224000000000000033466 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv add resource pool to the router bindings table Revision ID: c288bb6a7252 Revises: b7f41687cbad Create Date: 2016-05-15 06:12:09.450116 """ from alembic import op from oslo_config import cfg import sqlalchemy as sa from vmware_nsx.common import config # noqa # revision identifiers, used by Alembic. revision = 'c288bb6a7252' down_revision = 'b7f41687cbad' def upgrade(): op.add_column('nsxv_router_bindings', sa.Column('resource_pool', sa.String(36), nullable=True, server_default=cfg.CONF.nsxv.resource_pool_id)) ././@PaxHeader0000000000000000000000000000025400000000000011456 xustar0000000000000000150 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c644ec62c585_nsxv3_add_nsx_dhcp_service_tables.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c644ec62c0000644000175000017500000000412300000000000033465 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSXv3 add nsx_service_bindings and nsx_dhcp_bindings tables Revision ID: c644ec62c585 Revises: c288bb6a7252 Create Date: 2016-04-29 23:19:39.523196 """ from alembic import op import sqlalchemy as sa from vmware_nsxlib.v3 import nsx_constants # revision identifiers, used by Alembic. revision = 'c644ec62c585' down_revision = 'c288bb6a7252' nsx_service_type_enum = sa.Enum( nsx_constants.SERVICE_DHCP, name='neutron_nsx_service_bindings_service_type') def upgrade(): op.create_table( 'neutron_nsx_service_bindings', sa.Column('network_id', sa.String(36), nullable=False), sa.Column('port_id', sa.String(36), nullable=True), sa.Column('nsx_service_type', nsx_service_type_enum, nullable=False), sa.Column('nsx_service_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id', 'nsx_service_type')) op.create_table( 'neutron_nsx_dhcp_bindings', sa.Column('port_id', sa.String(36), nullable=False), sa.Column('subnet_id', sa.String(36), nullable=False), sa.Column('ip_address', sa.String(64), nullable=False), sa.Column('nsx_service_id', sa.String(36), nullable=False), sa.Column('nsx_binding_id', sa.String(36), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id', 'nsx_binding_id')) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1542525 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/0000755000175000017500000000000000000000000030535 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1902535 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/0000755000175000017500000000000000000000000032352 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/14a89ddf96e2_add_az_internal_network.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/14a89ddf0000644000175000017500000000322000000000000033516 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSX Adds a 'availability_zone' attribute to internal-networks table Revision ID: 14a89ddf96e2 Revises: 5c8f451290b7 Create Date: 2017-02-05 14:34:21.163418 """ from alembic import op import sqlalchemy as sa from sqlalchemy.engine import reflection from neutron.db import migration # revision identifiers, used by Alembic. revision = '14a89ddf96e2' down_revision = '5c8f451290b7' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.OCATA] def upgrade(): table_name = 'nsxv_internal_networks' # Add the new column op.add_column(table_name, sa.Column( 'availability_zone', sa.String(36), server_default='default')) # replace the old primary key constraint with a new one for both # purpose & az inspector = reflection.Inspector.from_engine(op.get_bind()) pk_constraint = inspector.get_pk_constraint(table_name) op.drop_constraint(pk_constraint.get('name'), table_name, type_='primary') op.create_primary_key(None, table_name, ['network_purpose', 'availability_zone']) ././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/5c8f451290b7_nsx_ipam_table_rename.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/5c8f45120000644000175000017500000000174300000000000033363 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv_subnet_ipam rename to nsx_subnet_ipam Revision ID: 5c8f451290b7 Revises: d49ac91b560e Create Date: 2016-12-25 11:08:30.300482 """ from alembic import op # revision identifiers, used by Alembic. revision = '5c8f451290b7' down_revision = 'd49ac91b560e' depends_on = ('6e6da8296c0e',) def upgrade(): op.rename_table('nsxv_subnet_ipam', 'nsx_subnet_ipam') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1902535 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/0000755000175000017500000000000000000000000032014 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/01a33f93f5fd_nsxv_lbv2_l7pol.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/01a33f93f50000644000175000017500000000330600000000000033245 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv_lbv2_l7policy Revision ID: 01a33f93f5fd Revises: dd9fe5a3a526 Create Date: 2017-01-04 10:10:59.990122 """ from alembic import op import sqlalchemy as sa from neutron.db import migration # revision identifiers, used by Alembic. revision = '01a33f93f5fd' down_revision = 'dd9fe5a3a526' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.OCATA] def upgrade(): if migration.schema_has_table('lbaas_l7policies'): op.create_table( 'nsxv_lbaas_l7policy_bindings', sa.Column('policy_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_app_rule_id', sa.String(length=36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('policy_id'), sa.ForeignKeyConstraint(['policy_id'], ['lbaas_l7policies.id'], ondelete='CASCADE')) ././@PaxHeader0000000000000000000000000000024300000000000011454 xustar0000000000000000141 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/dd9fe5a3a526_nsx_add_certificate_table.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/dd9fe5a3a50000644000175000017500000000251200000000000033471 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSX Adds certificate table for client certificate management Revision ID: dd9fe5a3a526 Revises: e816d4fe9d4f Create Date: 2017-01-06 12:30:01.070022 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'dd9fe5a3a526' down_revision = 'e816d4fe9d4f' def upgrade(): op.create_table('nsx_certificates', sa.Column('purpose', sa.String(length=32), nullable=False), sa.Column('certificate', sa.String(length=9216), nullable=False), sa.Column('private_key', sa.String(length=5120), nullable=False), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('purpose')) ././@PaxHeader0000000000000000000000000000024700000000000011460 xustar0000000000000000145 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/e816d4fe9d4f_nsx_add_policy_security_group.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/e816d4fe9d0000644000175000017500000000200400000000000033416 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """NSX Adds a 'policy' attribute to security-group Revision ID: e816d4fe9d4f Revises: 7b5ec3caa9a4 Create Date: 2016-10-06 11:30:31.263918 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'e816d4fe9d4f' down_revision = '7b5ec3caa9a4' def upgrade(): op.add_column('nsx_extended_security_group_properties', sa.Column('policy', sa.String(36))) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1542525 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/0000755000175000017500000000000000000000000030376 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1942537 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/0000755000175000017500000000000000000000000032213 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000025200000000000011454 xustar0000000000000000148 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/84ceffa27115_nsxv3_qos_policy_no_foreign_key.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/84ceffa270000644000175000017500000000250500000000000033531 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """remove the foreign key constrain from nsxv3_qos_policy_mapping Revision ID: 84ceffa27115 Revises: 8c0a81a07691 Create Date: 2017-03-15 11:47:09.450116 """ from alembic import op from sqlalchemy.engine import reflection from neutron.db import migration # revision identifiers, used by Alembic. revision = '84ceffa27115' down_revision = '8c0a81a07691' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.PIKE] def upgrade(): table_name = 'neutron_nsx_qos_policy_mappings' inspector = reflection.Inspector.from_engine(op.get_bind()) fk_constraint = inspector.get_foreign_keys(table_name)[0] op.drop_constraint(fk_constraint.get('name'), table_name, type_='foreignkey') ././@PaxHeader0000000000000000000000000000023100000000000011451 xustar0000000000000000131 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/8c0a81a07691_fix_ipam_table.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/8c0a81a070000644000175000017500000000253100000000000033353 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Update the primary key constraint of nsx_subnet_ipam Revision ID: 8c0a81a07691 Revises: 14a89ddf96e2 Create Date: 2017-02-15 15:25:21.163418 """ from alembic import op from sqlalchemy.engine import reflection # revision identifiers, used by Alembic. revision = '8c0a81a07691' down_revision = '14a89ddf96e2' def upgrade(): table_name = 'nsx_subnet_ipam' # replace the old primary key constraint with a new one for both # subnet and nsx-pool inspector = reflection.Inspector.from_engine(op.get_bind()) pk_constraint = inspector.get_pk_constraint(table_name) op.drop_constraint(pk_constraint.get('name'), table_name, type_='primary') op.create_primary_key(None, table_name, ['subnet_id', 'nsx_pool_id']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1942537 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/0000755000175000017500000000000000000000000031655 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/53eb497903a4_drop_vdr_dhcp_bindings.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/53eb497903a0000644000175000017500000000162300000000000033201 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Drop VDR DHCP bindings table Revision ID: 53eb497903a4 Revises: 8699700cd95c Create Date: 2017-02-22 10:10:59.990122 """ from alembic import op # revision identifiers, used by Alembic. revision = '53eb497903a4' down_revision = '8699700cd95c' def upgrade(): op.drop_table('nsxv_vdr_dhcp_bindings') ././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/7c4704ad37df_nsxv_lbv2_l7pol_fix.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/7c4704ad37d0000644000175000017500000000360300000000000033255 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fix NSX Lbaas L7 policy table creation Revision ID: 7c4704ad37df Revises: e4c503f4133f Create Date: 2017-02-22 10:10:59.990122 """ from alembic import op import sqlalchemy as sa from neutron.db import migration # revision identifiers, used by Alembic. revision = '7c4704ad37df' down_revision = 'e4c503f4133f' def upgrade(): # On a previous upgrade this table was created conditionally. # It should always be created, and just the ForeignKeyConstraint # should be conditional if not migration.schema_has_table('nsxv_lbaas_l7policy_bindings'): op.create_table( 'nsxv_lbaas_l7policy_bindings', sa.Column('policy_id', sa.String(length=36), nullable=False), sa.Column('edge_id', sa.String(length=36), nullable=False), sa.Column('edge_app_rule_id', sa.String(length=36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('policy_id')) if migration.schema_has_table('lbaas_l7policies'): op.create_foreign_key( 'fk_lbaas_l7policies_id', 'nsxv_lbaas_l7policy_bindings', 'lbaas_l7policies', ['policy_id'], ['id'], ondelete='CASCADE') ././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/8699700cd95c_nsxv_bgp_speaker_mapping.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/8699700cd950000644000175000017500000000356700000000000033146 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv_bgp_speaker_mapping Revision ID: 8699700cd95c Revises: 7c4704ad37df Create Date: 2017-02-16 03:13:39.775670 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '8699700cd95c' down_revision = '7c4704ad37df' def upgrade(): op.create_table( 'nsxv_bgp_speaker_bindings', sa.Column('edge_id', sa.String(36), nullable=False), sa.Column('bgp_speaker_id', sa.String(36), nullable=False), sa.Column('bgp_identifier', sa.String(64), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.ForeignKeyConstraint(['bgp_speaker_id'], ['bgp_speakers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('edge_id')) op.create_table( 'nsxv_bgp_peer_edge_bindings', sa.Column('peer_id', sa.String(36), nullable=False), sa.Column('edge_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.ForeignKeyConstraint(['peer_id'], ['bgp_peers.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('peer_id')) ././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/e4c503f4133f_port_vnic_type_support.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/e4c503f41330000644000175000017500000000260100000000000033163 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Port vnic_type support Revision ID: e4c503f4133f Revises: 01a33f93f5fd Create Date: 2017-02-20 00:05:30.894680 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'e4c503f4133f' down_revision = '01a33f93f5fd' def upgrade(): op.create_table( 'nsxv_port_ext_attributes', sa.Column('port_id', sa.String(length=36), nullable=False), sa.Column('vnic_type', sa.String(length=64), nullable=False, server_default='normal'), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=True), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id')) ././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/ea7a72ab9643_nsxv3_lbaas_mapping.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/ea7a72ab9640000644000175000017500000001140000000000000033330 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic import op import sqlalchemy as sa from neutron.db import migration """nsxv3_lbaas_mapping Revision ID: ea7a72ab9643 Revises: 53eb497903a4 Create Date: 2017-06-12 16:59:48.021909 """ # revision identifiers, used by Alembic. revision = 'ea7a72ab9643' down_revision = '53eb497903a4' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.PIKE] def upgrade(): op.create_table( 'nsxv3_lbaas_loadbalancers', sa.Column('loadbalancer_id', sa.String(36), nullable=False), sa.Column('lb_router_id', sa.String(36), nullable=False), sa.Column('lb_service_id', sa.String(36), nullable=False), sa.Column('vip_address', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('loadbalancer_id')) op.create_table( 'nsxv3_lbaas_listeners', sa.Column('loadbalancer_id', sa.String(36), nullable=False), sa.Column('listener_id', sa.String(36), nullable=False), sa.Column('app_profile_id', sa.String(36), nullable=False), sa.Column('lb_vs_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('loadbalancer_id', 'listener_id')) op.create_table( 'nsxv3_lbaas_pools', sa.Column('loadbalancer_id', sa.String(36), nullable=False), sa.Column('pool_id', sa.String(36), nullable=False), sa.Column('lb_pool_id', sa.String(36), nullable=False), sa.Column('lb_vs_id', sa.String(36), nullable=True), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('loadbalancer_id', 'pool_id')) op.create_table( 'nsxv3_lbaas_monitors', sa.Column('loadbalancer_id', sa.String(36), nullable=False), sa.Column('pool_id', sa.String(36), nullable=False), sa.Column('hm_id', sa.String(36), nullable=False), sa.Column('lb_monitor_id', sa.String(36), nullable=False), sa.Column('lb_pool_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('loadbalancer_id', 'pool_id', 'hm_id')) op.create_table( 'nsxv3_lbaas_l7rules', sa.Column('loadbalancer_id', sa.String(36), nullable=False), sa.Column('l7policy_id', sa.String(36), nullable=False), sa.Column('l7rule_id', sa.String(36), nullable=False), sa.Column('lb_rule_id', sa.String(36), nullable=False), sa.Column('lb_vs_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('loadbalancer_id', 'l7policy_id', 'l7rule_id')) if migration.schema_has_table('lbaas_loadbalancers'): op.create_foreign_key( 'fk_nsxv3_lbaas_loadbalancers_id', 'nsxv3_lbaas_loadbalancers', 'lbaas_loadbalancers', ['loadbalancer_id'], ['id'], ondelete='CASCADE') if migration.schema_has_table('lbaas_listeners'): op.create_foreign_key( 'fk_nsxv3_lbaas_listeners_id', 'nsxv3_lbaas_listeners', 'lbaas_listeners', ['listener_id'], ['id'], ondelete='CASCADE') if migration.schema_has_table('lbaas_pools'): op.create_foreign_key( 'fk_nsxv3_lbaas_pools_id', 'nsxv3_lbaas_pools', 'lbaas_pools', ['pool_id'], ['id'], ondelete='CASCADE') if migration.schema_has_table('lbaas_healthmonitors'): op.create_foreign_key( 'fk_nsxv3_lbaas_healthmonitors_id', 'nsxv3_lbaas_monitors', 'lbaas_healthmonitors', ['hm_id'], ['id'], ondelete='CASCADE') if migration.schema_has_table('lbaas_l7rules'): op.create_foreign_key( 'fk_nsxv3_lbaas_l7rules_id', 'nsxv3_lbaas_l7rules', 'lbaas_l7rules', ['l7rule_id'], ['id'], ondelete='CASCADE') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1542525 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/queens/0000755000175000017500000000000000000000000030746 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1942537 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/0000755000175000017500000000000000000000000032563 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000024100000000000011452 xustar0000000000000000139 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/717f7f63a219_nsxv3_lbaas_l7policy.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/717f7f60000644000175000017500000000473700000000000033530 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv3_lbaas_l7policy Revision ID: 717f7f63a219 Revises: a1be06050b41 Create Date: 2017-10-26 08:32:40.846088 """ from alembic import op import sqlalchemy as sa from neutron.db import migration # revision identifiers, used by Alembic. revision = '717f7f63a219' down_revision = 'a1be06050b41' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.QUEENS, migration.ROCKY, migration.STEIN, migration.TRAIN] def upgrade(): if migration.schema_has_table('nsxv3_lbaas_l7rules'): op.drop_constraint('fk_nsxv3_lbaas_l7rules_id', 'nsxv3_lbaas_l7rules', 'foreignkey') op.drop_constraint('l7rule_id', 'nsxv3_lbaas_l7rules', 'primary') op.drop_column('nsxv3_lbaas_l7rules', 'loadbalancer_id') op.drop_column('nsxv3_lbaas_l7rules', 'l7rule_id') op.rename_table('nsxv3_lbaas_l7rules', 'nsxv3_lbaas_l7policies') if migration.schema_has_table('lbaas_l7policies'): op.create_foreign_key( 'fk_nsxv3_lbaas_l7policies_id', 'nsxv3_lbaas_l7policies', 'lbaas_l7policies', ['l7policy_id'], ['id'], ondelete='CASCADE') else: op.create_table( 'nsxv3_lbaas_l7policies', sa.Column('l7policy_id', sa.String(36), nullable=False), sa.Column('lb_rule_id', sa.String(36), nullable=False), sa.Column('lb_vs_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('l7policy_id')) if migration.schema_has_table('lbaas_l7policies'): op.create_foreign_key( 'fk_nsxv3_lbaas_l7policies_id', 'nsxv3_lbaas_l7policies', 'lbaas_l7policies', ['l7policy_id'], ['id'], ondelete='CASCADE') ././@PaxHeader0000000000000000000000000000024500000000000011456 xustar0000000000000000143 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/a1be06050b41_update_nsx_binding_types.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/a1be0600000644000175000017500000000373600000000000033555 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """update nsx binding types Revision ID: a1be06050b41 Revises: 84ceffa27115 Create Date: 2017-09-04 23:58:22.003350 """ from alembic import op import sqlalchemy as sa from neutron.db import migration as neutron_op # revision identifiers, used by Alembic. revision = 'a1be06050b41' down_revision = '84ceffa27115' depends_on = ('aede17d51d0f') all_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'vxlan', 'geneve', 'portgroup', 'nsx-net', name='tz_network_bindings_binding_type') new_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'geneve', 'portgroup', 'nsx-net', name='tz_network_bindings_binding_type') def upgrade(): # add the new network types to the enum neutron_op.alter_enum_add_value( 'tz_network_bindings', 'binding_type', all_tz_binding_type_enum, False) # change existing entries with type 'vxlan' to 'geneve' op.execute("UPDATE tz_network_bindings SET binding_type='geneve' " "where binding_type='vxlan'") # remove 'vxlan' from the enum op.alter_column( 'tz_network_bindings', 'binding_type', type_=new_tz_binding_type_enum, existing_type=all_tz_binding_type_enum, existing_nullable=False) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1942537 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/0000755000175000017500000000000000000000000032225 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023400000000000011454 xustar0000000000000000134 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/0dbeda408e41_nsxv3_vpn_mapping.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/0dbeda4080000644000175000017500000000276400000000000033534 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsxv3_vpn_mapping Revision ID: 0dbeda408e41 Revises: 9799427fc0e1 Create Date: 2017-11-26 12:27:40.846088 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '0dbeda408e41' down_revision = '9799427fc0e1' def upgrade(): op.create_table( 'neutron_nsx_vpn_connection_mappings', sa.Column('neutron_id', sa.String(36), nullable=False), sa.Column('session_id', sa.String(36), nullable=False), sa.Column('dpd_profile_id', sa.String(36), nullable=False), sa.Column('ike_profile_id', sa.String(36), nullable=False), sa.Column('ipsec_profile_id', sa.String(36), nullable=False), sa.Column('peer_ep_id', sa.String(36), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('neutron_id')) ././@PaxHeader0000000000000000000000000000022500000000000011454 xustar0000000000000000127 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/9799427fc0e1_nsx_tv_map.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/9799427fc0000644000175000017500000000272200000000000033342 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """nsx map project to plugin Revision ID: 9799427fc0e1 Revises: ea7a72ab9643 Create Date: 2017-06-12 16:59:48.021909 """ from alembic import op import sqlalchemy as sa from neutron.db import migration # revision identifiers, used by Alembic. revision = '9799427fc0e1' down_revision = 'ea7a72ab9643' plugin_type_enum = sa.Enum('dvs', 'nsx-v', 'nsx-t', name='nsx_plugin_type') # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.QUEENS, migration.ROCKY] def upgrade(): op.create_table( 'nsx_project_plugin_mappings', sa.Column('project', sa.String(36), nullable=False), sa.Column('plugin', plugin_type_enum, nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('project')) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1542525 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/stein/0000755000175000017500000000000000000000000030570 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1942537 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/stein/expand/0000755000175000017500000000000000000000000032047 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000023700000000000011457 xustar0000000000000000137 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/stein/expand/99bfcb6003c6_lbaas_error_no_member.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/stein/expand/99bfcb60030000644000175000017500000000255700000000000033372 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """lbaas_error_no_member Revision ID: 99bfcb6003c6 Revises: fc6308289aca Create Date: 2019-03-07 11:27:00.000000 """ from alembic import op from neutron.db import migration # revision identifiers, used by Alembic. revision = '99bfcb6003c6' down_revision = 'fc6308289aca' # milestone identifier, used by neutron-db-manage neutron_milestone = [migration.STEIN] def upgrade(): if (migration.schema_has_table('nsxv3_lbaas_loadbalancers') and migration.schema_has_table('lbaas_loadbalancers')): # Mark as ERROR loadbalancers without nsx mapping op.execute("UPDATE lbaas_loadbalancers " "SET provisioning_status='ERROR' " "WHERE id not in (SELECT loadbalancer_id FROM " "nsxv3_lbaas_loadbalancers)") ././@PaxHeader0000000000000000000000000000023600000000000011456 xustar0000000000000000136 path=vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/stein/expand/fc6308289aca_lbaas_no_foreign_key.py 22 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/alembic_migrations/versions/stein/expand/fc6308289a0000644000175000017500000000355200000000000033314 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """lbaas_no_foreign_key Revision ID: fc6308289aca Revises: 0dbeda408e41 Create Date: 2018-06-04 13:47:09.450116 """ from alembic import op from sqlalchemy.engine import reflection from neutron.db import migration # revision identifiers, used by Alembic. revision = 'fc6308289aca' down_revision = '0dbeda408e41' depends_on = ('717f7f63a219') def upgrade(): for table_name in ['nsxv3_lbaas_loadbalancers', 'nsxv3_lbaas_listeners', 'nsxv3_lbaas_pools', 'nsxv3_lbaas_monitors', 'nsxv3_lbaas_l7rules', 'nsxv3_lbaas_l7policies', 'nsxv_lbaas_loadbalancer_bindings', 'nsxv_lbaas_listener_bindings', 'nsxv_lbaas_pool_bindings', 'nsxv_lbaas_monitor_bindings', 'nsxv_lbaas_l7policy_bindings']: if migration.schema_has_table(table_name): inspector = reflection.Inspector.from_engine(op.get_bind()) fks = inspector.get_foreign_keys(table_name) if fks: fk_constraint = fks[0] op.drop_constraint(fk_constraint.get('name'), table_name, type_='foreignkey') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1942537 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/models/0000755000175000017500000000000000000000000023231 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/models/__init__.py0000644000175000017500000000000000000000000025330 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/migration/models/head.py0000644000175000017500000000171500000000000024510 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db.migration.models import head from vmware_nsx.db import extended_security_group # noqa from vmware_nsx.db import extended_security_group_rule # noqa from vmware_nsx.db import nsx_models # noqa from vmware_nsx.db import nsxv_models # noqa from vmware_nsx.db import vcns_models # noqa def get_metadata(): return head.model_base.BASEV2.metadata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/nsx_models.py0000644000175000017500000004441700000000000022514 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NSX data models. This module defines data models used by the VMware NSX plugin family. """ from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db import models_v2 from oslo_db.sqlalchemy import models from vmware_nsxlib.v3 import nsx_constants class TzNetworkBinding(model_base.BASEV2, models.TimestampMixin): """Represents a binding of a virtual network with a transport zone. This model class associates a Neutron network with a transport zone; optionally a vlan ID might be used if the binding type is 'bridge' """ __tablename__ = 'tz_network_bindings' # TODO(arosen) - it might be worth while refactoring the how this data # is stored later so every column does not need to be a primary key. network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) # 'flat', 'vlan', 'stt', 'gre', 'l3_ext', 'geneve', 'portgroup', 'nsx-net' binding_type = sa.Column(sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext', 'geneve', 'portgroup', 'nsx-net', name='tz_network_bindings_binding_type'), nullable=False, primary_key=True) phy_uuid = sa.Column(sa.String(36), primary_key=True, default='') vlan_id = sa.Column(sa.Integer, primary_key=True, autoincrement=False, default=0) def __init__(self, network_id, binding_type, phy_uuid, vlan_id): self.network_id = network_id self.binding_type = binding_type self.phy_uuid = phy_uuid self.vlan_id = vlan_id def __repr__(self): return "" % (self.network_id, self.binding_type, self.phy_uuid, self.vlan_id) class NeutronNsxNetworkMapping(model_base.BASEV2, models.TimestampMixin): """Maps neutron network identifiers to NSX identifiers. Because of chained logical switches more than one mapping might exist for a single Neutron network. For a VLAN network, one neutron network may map to multiple logical switches(port groups) created on multiple DVSes in the backend for NSX-V plugin. DVS-ID will store the moref of the DVS where the nsx id is being created. For other types and plugins, this value will remain null. """ __tablename__ = 'neutron_nsx_network_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), primary_key=True) nsx_id = sa.Column(sa.String(36), primary_key=True) dvs_id = sa.Column(sa.String(36), nullable=True) class NeutronNsxSecurityGroupMapping(model_base.BASEV2, models.TimestampMixin): """Backend mappings for Neutron Security Group identifiers. This class maps a neutron security group identifier to the corresponding NSX security profile identifier. """ __tablename__ = 'neutron_nsx_security_group_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('securitygroups.id', ondelete="CASCADE"), primary_key=True) nsx_id = sa.Column(sa.String(36), primary_key=True) class NeutronNsxFirewallSectionMapping(model_base.BASEV2, models.TimestampMixin): """Backend mappings for Neutron Security-group associated fw sections.""" __tablename__ = 'neutron_nsx_firewall_section_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('securitygroups.id', ondelete='CASCADE'), primary_key=True, nullable=False) nsx_id = sa.Column(sa.String(36), nullable=False) class NeutronNsxRuleMapping(model_base.BASEV2, models.TimestampMixin): """Backend mappings for firewall rules. This class maps a neutron security group rule with NSX firewall rule. """ __tablename__ = 'neutron_nsx_rule_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('securitygrouprules.id', ondelete="CASCADE"), primary_key=True, nullable=False) nsx_id = sa.Column(sa.String(36), nullable=False) class NeutronNsxPortMapping(model_base.BASEV2, models.TimestampMixin): """Represents the mapping between neutron and nsx port uuids.""" __tablename__ = 'neutron_nsx_port_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) nsx_switch_id = sa.Column(sa.String(36)) nsx_port_id = sa.Column(sa.String(36), nullable=False) def __init__(self, neutron_id, nsx_switch_id, nsx_port_id): self.neutron_id = neutron_id self.nsx_switch_id = nsx_switch_id self.nsx_port_id = nsx_port_id class NeutronNsxRouterMapping(model_base.BASEV2, models.TimestampMixin): """Maps neutron router identifiers to NSX identifiers.""" __tablename__ = 'neutron_nsx_router_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete='CASCADE'), primary_key=True) nsx_id = sa.Column(sa.String(36)) class NeutronNsxServiceBinding(model_base.BASEV2, models.TimestampMixin): """Represents a binding of a Neutron network with enabled NSX services.""" __tablename__ = 'neutron_nsx_service_bindings' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), nullable=False, primary_key=True) port_id = sa.Column(sa.String(36), nullable=True) nsx_service_type = sa.Column( sa.Enum(nsx_constants.SERVICE_DHCP, name='neutron_nsx_service_bindings_service_type'), nullable=False, primary_key=True) nsx_service_id = sa.Column(sa.String(36), nullable=False) class NeutronNsxDhcpBinding(model_base.BASEV2, models.TimestampMixin): """Represents a binding of a Neutron port with DHCP address binding.""" __tablename__ = 'neutron_nsx_dhcp_bindings' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), nullable=False, primary_key=True) subnet_id = sa.Column(sa.String(36), nullable=False) ip_address = sa.Column(sa.String(64), nullable=False) nsx_service_id = sa.Column(sa.String(36), nullable=False) nsx_binding_id = sa.Column(sa.String(36), nullable=False, primary_key=True) class MultiProviderNetworks(model_base.BASEV2, models.TimestampMixin): """Networks provisioned through multiprovider extension.""" __tablename__ = 'multi_provider_networks' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) def __init__(self, network_id): self.network_id = network_id class NetworkConnection(model_base.BASEV2, model_base.HasProject, models.TimestampMixin): """Defines a connection between a network gateway and a network.""" # We use port_id as the primary key as one can connect a gateway # to a network in multiple ways (and we cannot use the same port form # more than a single gateway) network_gateway_id = sa.Column(sa.String(36), sa.ForeignKey('networkgateways.id', ondelete='CASCADE')) network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE')) segmentation_type = sa.Column( sa.Enum('flat', 'vlan', name='networkconnections_segmentation_type')) segmentation_id = sa.Column(sa.Integer) __table_args__ = (sa.UniqueConstraint(network_gateway_id, segmentation_type, segmentation_id), model_base.BASEV2.__table_args__) # Also, storing port id comes back useful when disconnecting a network # from a gateway port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete='CASCADE'), primary_key=True) class NetworkGatewayDeviceReference(model_base.BASEV2, models.TimestampMixin): id = sa.Column(sa.String(36), primary_key=True) network_gateway_id = sa.Column(sa.String(36), sa.ForeignKey('networkgateways.id', ondelete='CASCADE'), primary_key=True) interface_name = sa.Column(sa.String(64), primary_key=True) class NetworkGatewayDevice(model_base.BASEV2, model_base.HasId, model_base.HasProject, models.TimestampMixin): nsx_id = sa.Column(sa.String(36)) # Optional name for the gateway device name = sa.Column(sa.String(255)) # Transport connector type. Not using enum as range of # connector types might vary with backend version connector_type = sa.Column(sa.String(10)) # Transport connector IP Address connector_ip = sa.Column(sa.String(64)) # operational status status = sa.Column(sa.String(16)) class NetworkGateway(model_base.BASEV2, model_base.HasId, model_base.HasProject, models.TimestampMixin): """Defines the data model for a network gateway.""" name = sa.Column(sa.String(255)) default = sa.Column(sa.Boolean()) devices = orm.relationship(NetworkGatewayDeviceReference, backref='networkgateways', cascade='all,delete') network_connections = orm.relationship(NetworkConnection, lazy='joined') class MacLearningState(model_base.BASEV2, models.TimestampMixin): port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) mac_learning_enabled = sa.Column(sa.Boolean(), nullable=False) # Add a relationship to the Port model using the backref attribute. # This will instruct SQLAlchemy to eagerly load this association. port = orm.relationship( models_v2.Port, backref=orm.backref("mac_learning_state", lazy='joined', uselist=False, cascade='delete')) class LsnPort(models_v2.model_base.BASEV2, models.TimestampMixin): __tablename__ = 'lsn_port' lsn_port_id = sa.Column(sa.String(36), primary_key=True) lsn_id = sa.Column(sa.String(36), sa.ForeignKey('lsn.lsn_id', ondelete="CASCADE"), nullable=False) sub_id = sa.Column(sa.String(36), nullable=False, unique=True) mac_addr = sa.Column(sa.String(32), nullable=False, unique=True) def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id): self.lsn_port_id = lsn_port_id self.lsn_id = lsn_id self.sub_id = subnet_id self.mac_addr = mac_address class Lsn(models_v2.model_base.BASEV2, models.TimestampMixin): __tablename__ = 'lsn' lsn_id = sa.Column(sa.String(36), primary_key=True) net_id = sa.Column(sa.String(36), nullable=False) def __init__(self, net_id, lsn_id): self.net_id = net_id self.lsn_id = lsn_id class NsxL2GWConnectionMapping(model_base.BASEV2, models.TimestampMixin): """Define a mapping between L2 gateway connection and bridge endpoint.""" __tablename__ = 'nsx_l2gw_connection_mappings' connection_id = sa.Column(sa.String(36), nullable=False, primary_key=True) port_id = sa.Column(sa.String(36), sa.ForeignKey("ports.id", ondelete="CASCADE"), nullable=False) bridge_endpoint_id = sa.Column(sa.String(36), nullable=False) class QosPolicySwitchProfile(model_base.BASEV2, models.TimestampMixin): # Maps neutron qos policy identifiers to NSX-V3 switch profile identifiers __tablename__ = 'neutron_nsx_qos_policy_mappings' qos_policy_id = sa.Column(sa.String(36), primary_key=True) switch_profile_id = sa.Column(sa.String(36), nullable=False) class NsxPortMirrorSessionMapping(model_base.BASEV2): """Define a mapping between Tap Flow and PortMirrorSession object.""" __tablename__ = 'nsx_port_mirror_session_mappings' tap_flow_id = sa.Column(sa.String(36), nullable=False, primary_key=True) port_mirror_session_id = sa.Column(sa.String(36), nullable=False) class NsxSubnetIpam(model_base.BASEV2, models.TimestampMixin): """Map Subnets with their backend pool id.""" __tablename__ = 'nsx_subnet_ipam' # the Subnet id is not a foreign key because the subnet is deleted # before the pool does subnet_id = sa.Column(sa.String(36), primary_key=True) nsx_pool_id = sa.Column(sa.String(36), primary_key=True) class NsxCertificateRepository(model_base.BASEV2, models.TimestampMixin): """Stores certificate and private key per logical purpose. For now, will have zero or one rows with nsxv3 client certificate """ __tablename__ = 'nsx_certificates' purpose = sa.Column(sa.String(32), nullable=False, primary_key=True) certificate = sa.Column(sa.String(9216), nullable=False) private_key = sa.Column(sa.String(5120), nullable=False) class NsxLbaasLoadbalancer(model_base.BASEV2, models.TimestampMixin): """Stores mapping of LBaaS loadbalancer and NSX LB service and router Since in NSXv3, multiple loadbalancers may share the same LB service on NSX backend. And the in turn LB service attaches to a logical router. This stores the mapping between LBaaS loadbalancer and NSX LB service id and NSX logical router id. """ __tablename__ = 'nsxv3_lbaas_loadbalancers' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) lb_router_id = sa.Column(sa.String(36), nullable=False) lb_service_id = sa.Column(sa.String(36), nullable=False) vip_address = sa.Column(sa.String(36), nullable=False) class NsxLbaasListener(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between LBaaS listener and NSX LB virtual server""" __tablename__ = 'nsxv3_lbaas_listeners' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) listener_id = sa.Column(sa.String(36), primary_key=True) app_profile_id = sa.Column(sa.String(36), nullable=False) lb_vs_id = sa.Column(sa.String(36), nullable=False) class NsxLbaasPool(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between LBaaS pool and NSX LB Pool""" __tablename__ = 'nsxv3_lbaas_pools' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) pool_id = sa.Column(sa.String(36), primary_key=True) lb_pool_id = sa.Column(sa.String(36), nullable=False) lb_vs_id = sa.Column(sa.String(36)) class NsxLbaasMonitor(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between LBaaS monitor and NSX LB monitor""" __tablename__ = 'nsxv3_lbaas_monitors' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) pool_id = sa.Column(sa.String(36), primary_key=True) hm_id = sa.Column(sa.String(36), primary_key=True) lb_monitor_id = sa.Column(sa.String(36), nullable=False) lb_pool_id = sa.Column(sa.String(36), nullable=False) class NsxLbaasL7Rule(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between LBaaS monitor and NSX LB monitor This table is only used in Pike and obsoleted since Queen as the mapping has been stored in nsxv3_lbaas_l7policies table instead. This original table was added in pike so that we cannot change DB migration script there, but instead we update the table with a new db migration script in Queen. """ __tablename__ = 'nsxv3_lbaas_l7rules' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) l7policy_id = sa.Column(sa.String(36), primary_key=True) l7rule_id = sa.Column(sa.String(36), primary_key=True) lb_rule_id = sa.Column(sa.String(36), nullable=False) lb_vs_id = sa.Column(sa.String(36), nullable=False) class NsxLbaasL7Policy(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between LBaaS l7policy and NSX LB rule""" __tablename__ = 'nsxv3_lbaas_l7policies' l7policy_id = sa.Column(sa.String(36), primary_key=True) lb_rule_id = sa.Column(sa.String(36), nullable=False) lb_vs_id = sa.Column(sa.String(36), nullable=False) class NsxProjectPluginMapping(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between the neutron plugin and the project id""" __tablename__ = 'nsx_project_plugin_mappings' project = sa.Column(sa.String(36), primary_key=True) plugin = sa.Column(sa.Enum('dvs', 'nsx-v', 'nsx-t', name='nsx_plugin_type'), nullable=False) class NsxVpnConnectionMapping(model_base.BASEV2, models.TimestampMixin): """Stores the mapping between VPNaaS connections and NSX objects""" __tablename__ = 'neutron_nsx_vpn_connection_mappings' neutron_id = sa.Column(sa.String(36), primary_key=True) session_id = sa.Column(sa.String(36), nullable=False) dpd_profile_id = sa.Column(sa.String(36), nullable=False) ike_profile_id = sa.Column(sa.String(36), nullable=False) ipsec_profile_id = sa.Column(sa.String(36), nullable=False) peer_ep_id = sa.Column(sa.String(36), nullable=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/nsx_portbindings_db.py0000644000175000017500000001753100000000000024375 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import portbindings as pbin from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api import validators from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib import exceptions from neutron_lib.plugins import directory from neutron_lib.plugins import utils as p_utils from neutron.db import portbindings_db as pbin_db from neutron.plugins.ml2 import models as pbin_model from vmware_nsx._i18n import _ from vmware_nsx.common import nsx_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap LOG = logging.getLogger(__name__) FLAT_VLAN = 0 SUPPORTED_VNIC_TYPES = (pbin.VNIC_NORMAL, pbin.VNIC_DIRECT, pbin.VNIC_DIRECT_PHYSICAL) VNIC_TYPES_DIRECT_PASSTHROUGH = (pbin.VNIC_DIRECT, pbin.VNIC_DIRECT_PHYSICAL) SUPPORTED_V_NETWORK_TYPES = (c_utils.NsxVNetworkTypes.VLAN, c_utils.NsxVNetworkTypes.FLAT, c_utils.NsxVNetworkTypes.PORTGROUP) SUPPORTED_T_NETWORK_TYPES = (c_utils.NsxV3NetworkTypes.VLAN, c_utils.NsxV3NetworkTypes.FLAT) #Note(asarfaty): This class is currently used also by the NSX-V3 plugin, # although it uses the NsxvPortExtAttributes DB table (which can be renamed # in the future) @resource_extend.has_resource_extenders class NsxPortBindingMixin(pbin_db.PortBindingMixin): def _validate_port_vnic_type( self, context, port_data, network_id, plugin_type=projectpluginmap.NsxPlugins.NSX_V): vnic_type = port_data.get(pbin.VNIC_TYPE) if vnic_type and vnic_type not in SUPPORTED_VNIC_TYPES: err_msg = _("Invalid port vnic-type '%(vnic_type)s'." "Supported vnic-types are %(valid_types)s" ) % {'vnic_type': vnic_type, 'valid_types': SUPPORTED_VNIC_TYPES} raise exceptions.InvalidInput(error_message=err_msg) direct_vnic_type = vnic_type in VNIC_TYPES_DIRECT_PASSTHROUGH if direct_vnic_type: self._validate_vnic_type_direct_passthrough_for_network( context, network_id, plugin_type) return direct_vnic_type def _validate_vnic_type_direct_passthrough_for_network(self, context, network_id, plugin_type): supported_network_types = SUPPORTED_V_NETWORK_TYPES if plugin_type == projectpluginmap.NsxPlugins.NSX_T: supported_network_types = SUPPORTED_T_NETWORK_TYPES if not self._validate_network_type(context, network_id, supported_network_types): msg_info = { 'vnic_types': VNIC_TYPES_DIRECT_PASSTHROUGH, 'networks': supported_network_types} err_msg = _("%(vnic_types)s port vnic-types are only supported " "for ports on networks of types " "%(networks)s") % msg_info raise exceptions.InvalidInput(error_message=err_msg) def _process_portbindings_create_and_update( self, context, port, port_res, vif_type=nsx_constants.VIF_TYPE_DVS): # Allow clearing the host id if pbin.HOST_ID in port and port[pbin.HOST_ID] is None: port[pbin.HOST_ID] = '' super(NsxPortBindingMixin, self)._process_portbindings_create_and_update( context, port, port_res) port_id = port_res['id'] org_vnic_type = nsxv_db.get_nsxv_ext_attr_port_vnic_type( context.session, port_id) vnic_type = port.get(pbin.VNIC_TYPE, org_vnic_type) cap_port_filter = (port.get(pbin.VNIC_TYPE, org_vnic_type) == pbin.VNIC_NORMAL) vif_details = {pbin.CAP_PORT_FILTER: cap_port_filter} network = self.get_network(context, port_res['network_id']) if network.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.FLAT: vif_details[pbin.VIF_DETAILS_VLAN] = FLAT_VLAN elif network.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN: vif_details[pbin.VIF_DETAILS_VLAN] = network[pnet.SEGMENTATION_ID] with db_api.CONTEXT_WRITER.using(context): port_binding = context.session.query( pbin_model.PortBinding).filter_by(port_id=port_id).first() if not port_binding: port_binding = pbin_model.PortBinding( port_id=port_id, vif_type=vif_type) context.session.add(port_binding) port_binding.host = port_res[pbin.HOST_ID] or '' port_binding.vnic_type = vnic_type port_binding.vif_details = jsonutils.dumps(vif_details) nsxv_db.update_nsxv_port_ext_attributes( context.session, port_id, vnic_type) profile = port.get(pbin.PROFILE, constants.ATTR_NOT_SPECIFIED) if validators.is_attr_set(profile) or profile is None: port_binding.profile = (jsonutils.dumps(profile) if profile else "") port_res[pbin.VNIC_TYPE] = vnic_type self.extend_port_portbinding(port_res, port_binding) def extend_port_portbinding(self, port_res, binding): port_res[pbin.PROFILE] = self._get_profile(binding) port_res[pbin.VIF_TYPE] = binding.vif_type port_res[pbin.VIF_DETAILS] = self._get_vif_details(binding) def _get_vif_details(self, binding): if binding.vif_details: try: return jsonutils.loads(binding.vif_details) except Exception: LOG.error("Serialized vif_details DB value '%(value)s' " "for port %(port)s is invalid", {'value': binding.vif_details, 'port': binding.port_id}) return {} def _get_profile(self, binding): if binding.profile: try: return jsonutils.loads(binding.profile) except Exception: LOG.error("Serialized profile DB value '%(value)s' for " "port %(port)s is invalid", {'value': binding.profile, 'port': binding.port_id}) return {} @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_portbinding(port_res, port_db): plugin = directory.get_plugin() plugin.extend_port_dict_binding(port_res, port_db) if port_db.nsx_port_attributes: port_res[pbin.VNIC_TYPE] = port_db.nsx_port_attributes.vnic_type if hasattr(port_db, 'port_bindings'): binding = p_utils.get_port_binding_by_status_and_host( port_db.port_bindings, constants.ACTIVE) if binding: plugin.extend_port_portbinding(port_res, binding) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/nsxrouter.py0000644000175000017500000000456100000000000022406 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.db import resource_extend from oslo_log import log as logging from vmware_nsx.db import nsxv_models LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class NsxRouterMixin(object): """Mixin class to enable nsx router support.""" nsx_attributes = [] @staticmethod def _extend_nsx_router_dict(router_res, router_db, nsx_attributes): nsx_attrs = router_db['nsx_attributes'] for attr in nsx_attributes: name = attr['name'] default = attr['default'] router_res[name] = ( nsx_attrs and nsx_attrs[name] or default) def _process_nsx_router_create( self, context, router_db, router_req): if not router_db['nsx_attributes']: kwargs = {} for attr in self.nsx_attributes: name = attr['name'] default = attr['default'] kwargs[name] = router_req.get(name, default) nsx_attributes = nsxv_models.NsxvRouterExtAttributes( router_id=router_db['id'], **kwargs) context.session.add(nsx_attributes) router_db['nsx_attributes'] = nsx_attributes else: # The situation where the record already exists will # be likely once the NSXRouterExtAttributes model # will allow for defining several attributes pertaining # to different extensions for attr in self.nsx_attributes: name = attr['name'] default = attr['default'] router_db['nsx_attributes'][name] = router_req.get( name, default) LOG.debug("Nsx router extension successfully processed " "for router:%s", router_db['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/nsxv_db.py0000644000175000017500000011046600000000000022002 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import decorator from neutron_lib.api.definitions import portbindings as pbin from neutron_lib import constants as lib_const from neutron_lib.db import api as db_api from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils import six from sqlalchemy import func from sqlalchemy.orm import exc from sqlalchemy.sql import expression as expr from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import nsxv_constants from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsxv_models from vmware_nsx.extensions import dhcp_mtu as ext_dhcp_mtu from vmware_nsx.extensions import dns_search_domain as ext_dns_search_domain from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import constants NsxvEdgeDhcpStaticBinding = nsxv_models.NsxvEdgeDhcpStaticBinding LOG = logging.getLogger(__name__) def add_nsxv_router_binding(session, router_id, vse_id, lswitch_id, status, appliance_size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): with session.begin(subtransactions=True): binding = nsxv_models.NsxvRouterBinding( router_id=router_id, edge_id=vse_id, lswitch_id=lswitch_id, status=status, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) session.add(binding) return binding @decorator.decorator def warn_on_binding_status_error(f, *args, **kwargs): result = f(*args, **kwargs) if result is None: return # we support functions that return a single entry or a list if isinstance(result, list): bindings = result else: bindings = [result] for binding in bindings: if binding and binding['status'] == lib_const.ERROR: LOG.warning("Found NSXV router binding entry with status " "%(status)s: router %(router)s, " "edge %(edge)s, lswitch %(lswitch)s, " "status description: %(desc)s ", {'status': binding['status'], 'router': binding['router_id'], 'edge': binding['edge_id'], 'lswitch': binding['lswitch_id'], 'desc': binding['status_description']}) return result @warn_on_binding_status_error def get_nsxv_router_binding(session, router_id): return session.query(nsxv_models.NsxvRouterBinding).filter_by( router_id=router_id).first() @warn_on_binding_status_error def get_nsxv_router_binding_by_edge(session, edge_id): return session.query(nsxv_models.NsxvRouterBinding).filter_by( edge_id=edge_id).first() @warn_on_binding_status_error def get_nsxv_router_bindings_by_edge(session, edge_id): return session.query(nsxv_models.NsxvRouterBinding).filter_by( edge_id=edge_id).all() @warn_on_binding_status_error def get_nsxv_router_bindings(session, filters=None, like_filters=None): session = db_api.get_reader_session() query = session.query(nsxv_models.NsxvRouterBinding) return nsx_db._apply_filters_to_query(query, nsxv_models.NsxvRouterBinding, filters, like_filters).all() def update_nsxv_router_binding(session, router_id, **kwargs): with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvRouterBinding). filter_by(router_id=router_id).one()) for key, value in six.iteritems(kwargs): binding[key] = value return binding def delete_nsxv_router_binding(session, router_id): with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvRouterBinding). filter_by(router_id=router_id).first()) if binding: session.delete(binding) def get_edge_availability_zone(session, edge_id): binding = get_nsxv_router_binding_by_edge(session, edge_id) if binding: return binding['availability_zone'] def get_router_availability_zone(session, router_id): binding = get_nsxv_router_binding(session, router_id) if binding: return binding['availability_zone'] def clean_edge_router_binding(session, edge_id): with session.begin(subtransactions=True): (session.query(nsxv_models.NsxvRouterBinding). filter_by(edge_id=edge_id).delete()) def get_edge_vnic_bindings_with_networks(session): query = session.query(nsxv_models.NsxvEdgeVnicBinding) return query.filter( nsxv_models.NsxvEdgeVnicBinding.network_id != expr.null()).all() def get_edge_vnic_binding(session, edge_id, network_id): return session.query(nsxv_models.NsxvEdgeVnicBinding).filter_by( edge_id=edge_id, network_id=network_id).first() def get_edge_vnic_bindings_by_edge(session, edge_id): query = session.query(nsxv_models.NsxvEdgeVnicBinding) return query.filter( nsxv_models.NsxvEdgeVnicBinding.edge_id == edge_id, nsxv_models.NsxvEdgeVnicBinding.network_id != expr.null()).all() def get_edge_vnic_bindings_by_int_lswitch(session, lswitch_id): return session.query(nsxv_models.NsxvEdgeVnicBinding).filter_by( network_id=lswitch_id).all() def create_edge_vnic_binding(session, edge_id, vnic_index, network_id, tunnel_index=-1): with session.begin(subtransactions=True): binding = nsxv_models.NsxvEdgeVnicBinding( edge_id=edge_id, vnic_index=vnic_index, tunnel_index=tunnel_index, network_id=network_id) session.add(binding) return binding def delete_edge_vnic_binding_by_network(session, edge_id, network_id): with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(edge_id=edge_id, network_id=network_id).one()) session.delete(binding) def init_edge_vnic_binding(session, edge_id): """Init edge vnic binding to preallocated 10 available edge vnics.""" with session.begin(subtransactions=True): for vnic_index in range(constants.MAX_VNIC_NUM)[1:]: start = (vnic_index - 1) * constants.MAX_TUNNEL_NUM stop = vnic_index * constants.MAX_TUNNEL_NUM for tunnel_index in range(start, stop): binding = nsxv_models.NsxvEdgeVnicBinding( edge_id=edge_id, vnic_index=vnic_index, tunnel_index=tunnel_index + 1) session.add(binding) def clean_edge_vnic_binding(session, edge_id): """Clean edge vnic binding.""" with session.begin(subtransactions=True): (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(edge_id=edge_id).delete()) def allocate_edge_vnic(session, edge_id, network_id): """Allocate an available edge vnic to network.""" # get vnic count of specific edge bindings = (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(edge_id=edge_id, vnic_index=1).all()) vnic_tunnels_per_index = len(bindings) with session.begin(subtransactions=True): bindings = (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(edge_id=edge_id, network_id=None).all()) for binding in bindings: if binding['tunnel_index'] % vnic_tunnels_per_index == 1: binding['network_id'] = network_id session.add(binding) return binding msg = (_("Edge VNIC: Failed to allocate one available vnic on edge_id: " ":%(edge_id)s to network_id: %(network_id)s") % {'edge_id': edge_id, 'network_id': network_id}) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) def allocate_edge_vnic_with_tunnel_index(session, edge_id, network_id, availability_zone): """Allocate an available edge vnic with tunnel index to network.""" # TODO(berlin): temporary solution to let metadata and dhcp use # different vnics int_net = get_nsxv_internal_network( session, constants.InternalEdgePurposes.INTER_EDGE_PURPOSE, availability_zone) metadata_net_id = int_net['network_id'] if int_net else None with session.begin(subtransactions=True): query = session.query(nsxv_models.NsxvEdgeVnicBinding) query = query.filter( nsxv_models.NsxvEdgeVnicBinding.edge_id == edge_id, nsxv_models.NsxvEdgeVnicBinding.network_id == expr.null()) if metadata_net_id: vnic_binding = get_edge_vnic_binding( session, edge_id, metadata_net_id) if vnic_binding: vnic_index = vnic_binding.vnic_index query = query.filter( nsxv_models.NsxvEdgeVnicBinding.vnic_index != vnic_index) binding = query.first() if not binding: msg = (_("Failed to allocate one available vnic on edge_id: " ":%(edge_id)s to network_id: %(network_id)s") % {'edge_id': edge_id, 'network_id': network_id}) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) binding['network_id'] = network_id session.add(binding) return binding def allocate_specific_edge_vnic(session, edge_id, vnic_index, tunnel_index, network_id): """Allocate an specific edge vnic to network.""" with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(edge_id=edge_id, vnic_index=vnic_index, tunnel_index=tunnel_index).one()) binding['network_id'] = network_id session.add(binding) return binding def get_dhcp_edge_network_binding(session, network_id): with session.begin(subtransactions=True): dhcp_router_edges = [binding['edge_id'] for binding in get_nsxv_router_bindings(session) if binding['router_id'].startswith( constants.DHCP_EDGE_PREFIX)] bindings = (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(network_id=network_id)) for binding in bindings: edge_id = binding['edge_id'] if edge_id in dhcp_router_edges: return binding def free_edge_vnic_by_network(session, edge_id, network_id): """Free an edge vnic.""" with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvEdgeVnicBinding). filter_by(edge_id=edge_id, network_id=network_id).one()) binding['network_id'] = None session.add(binding) return binding def _create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvEdgeDhcpStaticBinding( edge_id=edge_id, mac_address=mac_address, binding_id=binding_id) session.add(binding) return binding def create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id): try: return _create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id) except db_exc.DBDuplicateEntry: LOG.warning('Conflicting DHCP binding entry for ' '%(edge_id)s:%(mac_address)s. Overwriting!', {'edge_id': edge_id, 'mac_address': mac_address}) delete_edge_dhcp_static_binding(session, edge_id, mac_address) return _create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id) def get_edge_dhcp_static_binding(session, edge_id, mac_address): return session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by( edge_id=edge_id, mac_address=mac_address).first() def get_dhcp_static_bindings_by_edge(session, edge_id): return session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by( edge_id=edge_id).all() def delete_edge_dhcp_static_binding(session, edge_id, mac_address): with session.begin(subtransactions=True): session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by( edge_id=edge_id, mac_address=mac_address).delete() def delete_edge_dhcp_static_binding_id(session, edge_id, binding_id): with session.begin(subtransactions=True): session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by( edge_id=edge_id, binding_id=binding_id).delete() def get_nsxv_dhcp_bindings_count_per_edge(session): return ( session.query( NsxvEdgeDhcpStaticBinding.edge_id, func.count(NsxvEdgeDhcpStaticBinding.mac_address)).group_by( NsxvEdgeDhcpStaticBinding.edge_id).all()) def clean_edge_dhcp_static_bindings_by_edge(session, edge_id): with session.begin(subtransactions=True): session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by( edge_id=edge_id).delete() def create_nsxv_internal_network(session, network_purpose, availability_zone, network_id): with session.begin(subtransactions=True): try: network = nsxv_models.NsxvInternalNetworks( network_purpose=network_purpose, network_id=network_id, availability_zone=availability_zone) session.add(network) except db_exc.DBDuplicateEntry: with excutils.save_and_reraise_exception(): LOG.exception("Duplicate internal network for purpose " "%(p)s and availabiltiy zone %(az)s", {'p': network_purpose, 'az': availability_zone}) def get_nsxv_internal_network(session, network_purpose, availability_zone, default_fallback=True): with session.begin(subtransactions=True): net_list = (session.query(nsxv_models.NsxvInternalNetworks). filter_by(network_purpose=network_purpose, availability_zone=availability_zone).all()) if net_list: # Should have only one results as purpose+az are the keys return net_list[0] elif default_fallback and availability_zone != nsx_az.DEFAULT_NAME: # try the default availability zone, since this zone does not # have his own internal edge net_list = (session.query(nsxv_models.NsxvInternalNetworks). filter_by(network_purpose=network_purpose, availability_zone=nsx_az.DEFAULT_NAME).all()) if net_list: return net_list[0] def get_nsxv_internal_network_for_az(session, network_purpose, availability_zone): return get_nsxv_internal_network(session, network_purpose, availability_zone, default_fallback=False) def get_nsxv_internal_networks(session, network_purpose): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalNetworks). filter_by(network_purpose=network_purpose).all()) def get_nsxv_internal_network_by_id(session, network_id): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalNetworks). filter_by(network_id=network_id).first()) def delete_nsxv_internal_network(session, network_purpose, network_id): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalNetworks). filter_by(network_purpose=network_purpose, network_id=network_id).delete()) def create_nsxv_internal_edge(session, ext_ip_address, purpose, router_id): with session.begin(subtransactions=True): try: internal_edge = nsxv_models.NsxvInternalEdges( ext_ip_address=ext_ip_address, purpose=purpose, router_id=router_id) session.add(internal_edge) except db_exc.DBDuplicateEntry: with excutils.save_and_reraise_exception(): LOG.exception("Duplicate internal Edge IP %s", ext_ip_address) def get_nsxv_internal_edge(session, ext_ip_address): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalEdges). filter_by(ext_ip_address=ext_ip_address).all()) def update_nsxv_internal_edge(session, ext_ip_address, router_id): with session.begin(subtransactions=True): edges = get_nsxv_internal_edge(session, ext_ip_address) for edge in edges: edge['router_id'] = router_id def get_nsxv_internal_edges_by_purpose(session, purpose): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalEdges). filter_by(purpose=purpose).all()) def get_nsxv_internal_edge_by_router(session, router_id): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalEdges). filter_by(router_id=router_id).first()) def delete_nsxv_internal_edge(session, ext_ip_address): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvInternalEdges). filter_by(ext_ip_address=ext_ip_address).delete()) def add_neutron_nsx_section_mapping(session, neutron_id, section_id): with session.begin(subtransactions=True): mapping = nsxv_models.NsxvSecurityGroupSectionMapping( neutron_id=neutron_id, ip_section_id=section_id) session.add(mapping) return mapping def add_neutron_nsx_rule_mapping(session, neutron_id, nsx_rule_id): with session.begin(subtransactions=True): mapping = nsxv_models.NsxvRuleMapping(neutron_id=neutron_id, nsx_rule_id=nsx_rule_id) session.add(mapping) return mapping def add_neutron_nsx_port_vnic_mapping(session, neutron_id, nsx_id): with session.begin(subtransactions=True): mapping = nsxv_models.NsxvPortVnicMapping( neutron_id=neutron_id, nsx_id=nsx_id) session.add(mapping) return mapping def get_nsx_section(session, neutron_id): try: mapping = (session.query(nsxv_models.NsxvSecurityGroupSectionMapping). filter_by(neutron_id=neutron_id). one()) return mapping except exc.NoResultFound: LOG.debug("NSX identifiers for neutron security group %s not yet " "stored in Neutron DB", neutron_id) def delete_neutron_nsx_section_mapping(session, neutron_id): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvSecurityGroupSectionMapping). filter_by(neutron_id=neutron_id).delete()) def get_nsx_rule_id(session, neutron_id): try: mapping = (session.query(nsxv_models.NsxvRuleMapping). filter_by(neutron_id=neutron_id). one()) return mapping['nsx_rule_id'] except exc.NoResultFound: LOG.debug("NSX identifiers for neutron rule %s not yet " "stored in Neutron DB", neutron_id) def get_nsx_vnic_id(session, neutron_id): try: mapping = (session.query(nsxv_models.NsxvPortVnicMapping). filter_by(neutron_id=neutron_id). one()) return mapping['nsx_id'] except exc.NoResultFound: LOG.debug("NSX identifiers for neutron port %s not yet " "stored in Neutron DB", neutron_id) def get_network_bindings(session, network_id): session = session or db_api.get_reader_session() return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(network_id=network_id). all()) def get_network_bindings_by_vlanid_and_physical_net(session, vlan_id, phy_uuid): session = session or db_api.get_reader_session() return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(vlan_id=vlan_id, phy_uuid=phy_uuid). all()) def get_network_bindings_by_ids(session, vlan_id, phy_uuid): return get_network_bindings_by_vlanid_and_physical_net( session, vlan_id, phy_uuid) def get_network_bindings_by_physical_net(session, phy_uuid): session = session or db_api.get_reader_session() return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(phy_uuid=phy_uuid). all()) def get_network_bindings_by_physical_net_and_type(session, phy_uuid, binding_type): session = session or db_api.get_reader_session() return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(phy_uuid=phy_uuid, binding_type=binding_type). all()) def delete_network_bindings(session, network_id): return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(network_id=network_id).delete()) def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvTzNetworkBinding(network_id, binding_type, phy_uuid, vlan_id) session.add(binding) return binding def get_network_bindings_by_vlanid(session, vlan_id): session = session or db_api.get_reader_session() return (session.query(nsxv_models.NsxvTzNetworkBinding). filter_by(vlan_id=vlan_id). all()) def update_network_binding_phy_uuid(session, network_id, binding_type, vlan_id, phy_uuid): with session.begin(subtransactions=True): bindings = (session.query(nsxv_models.NsxvTzNetworkBinding).filter_by( vlan_id=vlan_id, network_id=network_id, binding_type=binding_type).all()) for binding in bindings: binding['phy_uuid'] = phy_uuid # # Edge Firewall binding methods # def add_nsxv_edge_firewallrule_binding(session, map_info): with session.begin(subtransactions=True): binding = nsxv_models.NsxvEdgeFirewallRuleBinding( rule_id=map_info['rule_id'], rule_vse_id=map_info['rule_vseid'], edge_id=map_info['edge_id']) session.add(binding) return binding def delete_nsxv_edge_firewallrule_binding(session, id): with session.begin(subtransactions=True): if not (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding). filter_by(rule_id=id).delete()): msg = _("Rule Resource binding with id:%s not found!") % id raise nsx_exc.NsxPluginException(err_msg=msg) def get_nsxv_edge_firewallrule_binding(session, id, edge_id): with session.begin(subtransactions=True): return (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding). filter_by(rule_id=id, edge_id=edge_id).first()) def get_nsxv_edge_firewallrule_binding_by_vseid( session, edge_id, rule_vseid): with session.begin(subtransactions=True): try: return (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding). filter_by(edge_id=edge_id, rule_vse_id=rule_vseid).one()) except exc.NoResultFound: return def cleanup_nsxv_edge_firewallrule_binding(session, edge_id): with session.begin(subtransactions=True): session.query( nsxv_models.NsxvEdgeFirewallRuleBinding).filter_by( edge_id=edge_id).delete() def map_spoofguard_policy_for_network(session, network_id, policy_id): with session.begin(subtransactions=True): mapping = nsxv_models.NsxvSpoofGuardPolicyNetworkMapping( network_id=network_id, policy_id=policy_id) session.add(mapping) return mapping def get_spoofguard_policy_id(session, network_id): try: mapping = (session.query( nsxv_models.NsxvSpoofGuardPolicyNetworkMapping). filter_by(network_id=network_id).one()) return mapping['policy_id'] except exc.NoResultFound: LOG.debug("SpoofGuard Policy for network %s was not found", network_id) def get_spoofguard_policy_network_id(session, policy_id): try: mapping = (session.query( nsxv_models.NsxvSpoofGuardPolicyNetworkMapping). filter_by(policy_id=policy_id).one()) return mapping['network_id'] except exc.NoResultFound: LOG.debug("SpoofGuard Policy %s was not found in Neutron DB", policy_id) def get_nsxv_spoofguard_policy_network_mappings(session, filters=None, like_filters=None): session = db_api.get_reader_session() query = session.query(nsxv_models.NsxvSpoofGuardPolicyNetworkMapping) return nsx_db._apply_filters_to_query( query, nsxv_models.NsxvSpoofGuardPolicyNetworkMapping, filters, like_filters).all() def del_nsxv_spoofguard_binding(session, policy_id): return (session.query(nsxv_models.NsxvSpoofGuardPolicyNetworkMapping). filter_by(policy_id=policy_id).delete()) def add_nsxv_lbaas_loadbalancer_binding( session, loadbalancer_id, edge_id, edge_fw_rule_id, vip_address): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasLoadbalancerBinding( loadbalancer_id=loadbalancer_id, edge_id=edge_id, edge_fw_rule_id=edge_fw_rule_id, vip_address=vip_address) session.add(binding) return binding def get_nsxv_lbaas_loadbalancer_bindings(session, filters=None, like_filters=None): session = db_api.get_reader_session() query = session.query(nsxv_models.NsxvLbaasLoadbalancerBinding) return nsx_db._apply_filters_to_query( query, nsxv_models.NsxvLbaasLoadbalancerBinding, filters, like_filters).all() def get_nsxv_lbaas_loadbalancer_binding(session, loadbalancer_id): try: return session.query( nsxv_models.NsxvLbaasLoadbalancerBinding).filter_by( loadbalancer_id=loadbalancer_id).one() except exc.NoResultFound: return def get_nsxv_lbaas_loadbalancer_binding_by_edge(session, edge_id): return session.query( nsxv_models.NsxvLbaasLoadbalancerBinding).filter_by( edge_id=edge_id).all() def del_nsxv_lbaas_loadbalancer_binding(session, loadbalancer_id): return (session.query(nsxv_models.NsxvLbaasLoadbalancerBinding). filter_by(loadbalancer_id=loadbalancer_id).delete()) def add_nsxv_lbaas_listener_binding(session, loadbalancer_id, listener_id, app_profile_id, vse_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasListenerBinding( loadbalancer_id=loadbalancer_id, listener_id=listener_id, app_profile_id=app_profile_id, vse_id=vse_id) session.add(binding) return binding def get_nsxv_lbaas_listener_binding(session, loadbalancer_id, listener_id): try: return session.query( nsxv_models.NsxvLbaasListenerBinding).filter_by( loadbalancer_id=loadbalancer_id, listener_id=listener_id).one() except exc.NoResultFound: return def del_nsxv_lbaas_listener_binding(session, loadbalancer_id, listener_id): return (session.query(nsxv_models.NsxvLbaasListenerBinding). filter_by(loadbalancer_id=loadbalancer_id, listener_id=listener_id).delete()) def get_nsxv_lbaas_listener_binding_by_vse(session, loadbalancer_id, vse_id): try: return session.query( nsxv_models.NsxvLbaasListenerBinding).filter_by( loadbalancer_id=loadbalancer_id, vse_id=vse_id).one() except exc.NoResultFound: return def add_nsxv_lbaas_pool_binding(session, loadbalancer_id, pool_id, edge_pool_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasPoolBinding( loadbalancer_id=loadbalancer_id, pool_id=pool_id, edge_pool_id=edge_pool_id) session.add(binding) return binding def get_nsxv_lbaas_pool_binding(session, loadbalancer_id, pool_id): try: return session.query( nsxv_models.NsxvLbaasPoolBinding).filter_by( loadbalancer_id=loadbalancer_id, pool_id=pool_id).one() except exc.NoResultFound: return def del_nsxv_lbaas_pool_binding(session, loadbalancer_id, pool_id): return (session.query(nsxv_models.NsxvLbaasPoolBinding). filter_by(loadbalancer_id=loadbalancer_id, pool_id=pool_id).delete()) def add_nsxv_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id, edge_id, edge_mon_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasMonitorBinding( loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id, edge_id=edge_id, edge_mon_id=edge_mon_id) session.add(binding) return binding def get_nsxv_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id, edge_id): try: return session.query( nsxv_models.NsxvLbaasMonitorBinding).filter_by( loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id, edge_id=edge_id).one() except exc.NoResultFound: return def del_nsxv_lbaas_monitor_binding(session, loadbalancer_id, pool_id, hm_id, edge_id): return (session.query(nsxv_models.NsxvLbaasMonitorBinding). filter_by(loadbalancer_id=loadbalancer_id, pool_id=pool_id, hm_id=hm_id, edge_id=edge_id).delete()) def add_nsxv_lbaas_certificate_binding(session, cert_id, edge_id, edge_cert_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasCertificateBinding( cert_id=cert_id, edge_id=edge_id, edge_cert_id=edge_cert_id) session.add(binding) return binding def get_nsxv_lbaas_certificate_binding(session, cert_id, edge_id): try: return session.query( nsxv_models.NsxvLbaasCertificateBinding).filter_by( cert_id=cert_id, edge_id=edge_id).one() except exc.NoResultFound: return def del_nsxv_lbaas_certificate_binding(session, cert_id, edge_id): return (session.query(nsxv_models.NsxvLbaasCertificateBinding). filter_by(cert_id=cert_id, edge_id=edge_id).delete()) def add_nsxv_lbaas_l7policy_binding(session, policy_id, edge_id, edge_app_rule_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvLbaasL7PolicyBinding( policy_id=policy_id, edge_id=edge_id, edge_app_rule_id=edge_app_rule_id) session.add(binding) return binding def get_nsxv_lbaas_l7policy_binding(session, policy_id): try: return session.query( nsxv_models.NsxvLbaasL7PolicyBinding).filter_by( policy_id=policy_id).one() except exc.NoResultFound: return def del_nsxv_lbaas_l7policy_binding(session, policy_id): try: return (session.query(nsxv_models.NsxvLbaasL7PolicyBinding). filter_by(policy_id=policy_id).delete()) except exc.NoResultFound: return def add_nsxv_subnet_ext_attributes(session, subnet_id, dns_search_domain=None, dhcp_mtu=None): with session.begin(subtransactions=True): binding = nsxv_models.NsxvSubnetExtAttributes( subnet_id=subnet_id, dns_search_domain=dns_search_domain, dhcp_mtu=dhcp_mtu) session.add(binding) return binding def get_nsxv_subnet_ext_attributes(session, subnet_id): try: return session.query( nsxv_models.NsxvSubnetExtAttributes).filter_by( subnet_id=subnet_id).one() except exc.NoResultFound: return def update_nsxv_subnet_ext_attributes(session, subnet_id, dns_search_domain=None, dhcp_mtu=None): with session.begin(subtransactions=True): binding = (session.query(nsxv_models.NsxvSubnetExtAttributes). filter_by(subnet_id=subnet_id).one()) binding[ext_dns_search_domain.DNS_SEARCH_DOMAIN] = dns_search_domain binding[ext_dhcp_mtu.DHCP_MTU] = dhcp_mtu return binding def add_nsxv_port_ext_attributes(session, port_id, vnic_type=pbin.VNIC_NORMAL): with session.begin(subtransactions=True): binding = nsxv_models.NsxvPortExtAttributes( port_id=port_id, vnic_type=vnic_type) session.add(binding) return binding def get_nsxv_ext_attr_port_vnic_type(session, port_id): try: binding = session.query(nsxv_models.NsxvPortExtAttributes).filter_by( port_id=port_id).one() return binding['vnic_type'] except exc.NoResultFound: return pbin.VNIC_NORMAL def update_nsxv_port_ext_attributes(session, port_id, vnic_type=pbin.VNIC_NORMAL): try: binding = session.query( nsxv_models.NsxvPortExtAttributes).filter_by( port_id=port_id).one() binding['vnic_type'] = vnic_type return binding except exc.NoResultFound: return add_nsxv_port_ext_attributes( session, port_id, vnic_type=vnic_type) def add_nsxv_bgp_speaker_binding(session, edge_id, speaker_id, bgp_identifier): with session.begin(subtransactions=True): binding = nsxv_models.NsxvBgpSpeakerBinding( edge_id=edge_id, bgp_speaker_id=speaker_id, bgp_identifier=bgp_identifier) session.add(binding) return binding def get_nsxv_bgp_speaker_binding(session, edge_id): try: binding = (session.query(nsxv_models.NsxvBgpSpeakerBinding). filter_by(edge_id=edge_id). one()) return binding except exc.NoResultFound: LOG.debug("No dynamic routing enabled on edge %s.", edge_id) def get_nsxv_bgp_speaker_bindings(session, speaker_id): try: return (session.query(nsxv_models.NsxvBgpSpeakerBinding). filter_by(bgp_speaker_id=speaker_id).all()) except exc.NoResultFound: return [] def delete_nsxv_bgp_speaker_binding(session, edge_id): binding = session.query( nsxv_models.NsxvBgpSpeakerBinding).filter_by(edge_id=edge_id) if binding: binding.delete() def add_nsxv_bgp_peer_edge_binding(session, peer_id, edge_id): with session.begin(subtransactions=True): binding = nsxv_models.NsxvBgpPeerEdgeBinding(edge_id=edge_id, peer_id=peer_id) session.add(binding) return binding def get_nsxv_bgp_peer_edge_binding(session, peer_id): try: binding = (session.query(nsxv_models.NsxvBgpPeerEdgeBinding). filter_by(peer_id=peer_id).one()) return binding except exc.NoResultFound: pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/nsxv_models.py0000644000175000017500000003470100000000000022675 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import portbindings from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy import orm from neutron.db.models import l3 as l3_db from neutron.db import models_v2 from oslo_db.sqlalchemy import models from vmware_nsx.common import nsxv_constants class NsxvRouterBinding(model_base.BASEV2, model_base.HasStatusDescription, models.TimestampMixin): """Represents the mapping between neutron router and vShield Edge.""" __tablename__ = 'nsxv_router_bindings' # no ForeignKey to routers.id because for now, a router can be removed # from routers when delete_router is executed, but the binding is only # removed after the Edge is deleted router_id = sa.Column(sa.String(36), primary_key=True) edge_id = sa.Column(sa.String(36), nullable=True) lswitch_id = sa.Column(sa.String(36), nullable=True) appliance_size = sa.Column(sa.Enum( nsxv_constants.COMPACT, nsxv_constants.LARGE, nsxv_constants.XLARGE, nsxv_constants.QUADLARGE, name='nsxv_router_bindings_appliance_size')) edge_type = sa.Column(sa.Enum(nsxv_constants.SERVICE_EDGE, nsxv_constants.VDR_EDGE, name='nsxv_router_bindings_edge_type')) availability_zone = sa.Column(sa.String(36), nullable=True) class NsxvEdgeVnicBinding(model_base.BASEV2, models.TimestampMixin): """Represents mapping between vShield Edge vnic and neutron netowrk.""" __tablename__ = 'nsxv_edge_vnic_bindings' edge_id = sa.Column(sa.String(36), primary_key=True) vnic_index = sa.Column(sa.Integer(), primary_key=True) tunnel_index = sa.Column(sa.Integer(), primary_key=True) network_id = sa.Column(sa.String(36), nullable=True) class NsxvEdgeDhcpStaticBinding(model_base.BASEV2, models.TimestampMixin): """Represents mapping between mac addr and bindingId.""" __tablename__ = 'nsxv_edge_dhcp_static_bindings' edge_id = sa.Column(sa.String(36), primary_key=True) mac_address = sa.Column(sa.String(32), primary_key=True) binding_id = sa.Column(sa.String(36), nullable=False) class NsxvInternalNetworks(model_base.BASEV2, models.TimestampMixin): """Represents internal networks between NSXV plugin elements.""" __tablename__ = 'nsxv_internal_networks' network_purpose = sa.Column( sa.Enum(nsxv_constants.INTER_EDGE_PURPOSE, name='nsxv_internal_networks_purpose'), primary_key=True) network_id = sa.Column(sa.String(36), sa.ForeignKey("networks.id", ondelete="CASCADE"), nullable=True) availability_zone = sa.Column(sa.String(36), primary_key=True) class NsxvInternalEdges(model_base.BASEV2, models.TimestampMixin): """Represents internal Edge appliances for NSXV plugin operations.""" __tablename__ = 'nsxv_internal_edges' ext_ip_address = sa.Column(sa.String(64), primary_key=True) router_id = sa.Column(sa.String(36), nullable=True) purpose = sa.Column( sa.Enum(nsxv_constants.INTER_EDGE_PURPOSE, name='nsxv_internal_edges_purpose')) class NsxvSecurityGroupSectionMapping(model_base.BASEV2, models.TimestampMixin): """Backend mappings for Neutron Rule Sections. This class maps a neutron security group identifier to the corresponding NSX layer 3 section. """ __tablename__ = 'nsxv_security_group_section_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('securitygroups.id', ondelete="CASCADE"), primary_key=True) ip_section_id = sa.Column(sa.String(100)) class NsxvRuleMapping(model_base.BASEV2, models.TimestampMixin): """Backend mappings for Neutron Rule Sections. This class maps a neutron security group identifier to the corresponding NSX layer 3 and layer 2 sections. """ __tablename__ = 'nsxv_rule_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('securitygrouprules.id', ondelete="CASCADE"), primary_key=True) nsx_rule_id = sa.Column(sa.String(36), primary_key=True) class NsxvPortVnicMapping(model_base.BASEV2, models.TimestampMixin): """Maps neutron port to NSXv VM Vnic Id.""" __tablename__ = 'nsxv_port_vnic_mappings' neutron_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) nsx_id = sa.Column(sa.String(42), primary_key=True) class NsxvRouterExtAttributes(model_base.BASEV2, models.TimestampMixin): """Router attributes managed by NSX plugin extensions.""" __tablename__ = 'nsxv_router_ext_attributes' router_id = sa.Column(sa.String(36), sa.ForeignKey('routers.id', ondelete="CASCADE"), primary_key=True) distributed = sa.Column(sa.Boolean, default=False, nullable=False) router_type = sa.Column( sa.Enum('shared', 'exclusive', name='nsxv_router_type'), default='exclusive', nullable=False) service_router = sa.Column(sa.Boolean, default=False, nullable=False) # Add a relationship to the Router model in order to instruct # SQLAlchemy to eagerly load this association router = orm.relationship( l3_db.Router, backref=orm.backref("nsx_attributes", lazy='joined', uselist=False, cascade='delete')) class NsxvTzNetworkBinding(model_base.BASEV2, models.TimestampMixin): """Represents a binding of a virtual network with a transport zone. This model class associates a Neutron network with a transport zone; optionally a vlan ID might be used if the binding type is 'bridge' """ __tablename__ = 'nsxv_tz_network_bindings' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete="CASCADE"), primary_key=True) binding_type = sa.Column( sa.Enum('flat', 'vlan', 'portgroup', 'vxlan', name='nsxv_tz_network_bindings_binding_type'), nullable=False, primary_key=True) phy_uuid = sa.Column(sa.String(36), primary_key=True, nullable=True) vlan_id = sa.Column(sa.Integer, primary_key=True, nullable=True, autoincrement=False) def __init__(self, network_id, binding_type, phy_uuid, vlan_id): self.network_id = network_id self.binding_type = binding_type self.phy_uuid = phy_uuid self.vlan_id = vlan_id def __repr__(self): return "" % (self.network_id, self.binding_type, self.phy_uuid, self.vlan_id) class NsxvPortIndexMapping(model_base.BASEV2, models.TimestampMixin): """Associates attached Neutron ports with the instance VNic index.""" __tablename__ = 'nsxv_port_index_mappings' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) device_id = sa.Column(sa.String(255), nullable=False) index = sa.Column(sa.Integer, nullable=False) __table_args__ = (sa.UniqueConstraint(device_id, index), model_base.BASEV2.__table_args__) # Add a relationship to the Port model in order to instruct SQLAlchemy to # eagerly read port vnic-index port = orm.relationship( models_v2.Port, backref=orm.backref("vnic_index", lazy='joined', uselist=False, cascade='delete')) class NsxvEdgeFirewallRuleBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between firewall rule and edge firewall rule_id.""" __tablename__ = 'nsxv_firewall_rule_bindings' rule_id = sa.Column(sa.String(36), primary_key=True) edge_id = sa.Column(sa.String(36), primary_key=True) rule_vse_id = sa.Column(sa.String(36)) class NsxvSpoofGuardPolicyNetworkMapping(model_base.BASEV2, models.TimestampMixin): """Mapping between SpoofGuard and neutron networks""" __tablename__ = 'nsxv_spoofguard_policy_network_mappings' network_id = sa.Column(sa.String(36), sa.ForeignKey('networks.id', ondelete='CASCADE'), primary_key=True, nullable=False) policy_id = sa.Column(sa.String(36), nullable=False) class NsxvLbaasLoadbalancerBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between Edge LB and LBaaSv2""" __tablename__ = 'nsxv_lbaas_loadbalancer_bindings' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) edge_id = sa.Column(sa.String(36), nullable=False) edge_fw_rule_id = sa.Column(sa.String(36), nullable=False) vip_address = sa.Column(sa.String(36), nullable=False) class NsxvLbaasListenerBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between Edge VSE and LBaaSv2""" __tablename__ = 'nsxv_lbaas_listener_bindings' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) listener_id = sa.Column(sa.String(36), primary_key=True) app_profile_id = sa.Column(sa.String(36), nullable=False) vse_id = sa.Column(sa.String(36), nullable=False) class NsxvLbaasPoolBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between Edge Pool and LBaaSv2""" __tablename__ = 'nsxv_lbaas_pool_bindings' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) pool_id = sa.Column(sa.String(36), primary_key=True) edge_pool_id = sa.Column(sa.String(36), nullable=False) class NsxvLbaasMonitorBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between Edge Monitor and LBaaSv2""" __tablename__ = 'nsxv_lbaas_monitor_bindings' loadbalancer_id = sa.Column(sa.String(36), primary_key=True) pool_id = sa.Column(sa.String(36), primary_key=True) hm_id = sa.Column(sa.String(36), primary_key=True) edge_id = sa.Column(sa.String(36), primary_key=True) edge_mon_id = sa.Column(sa.String(36), nullable=False) class NsxvLbaasCertificateBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between Edge certificate and LBaaSv2 object""" __tablename__ = 'nsxv_lbaas_certificate_bindings' cert_id = sa.Column(sa.String(128), primary_key=True) edge_id = sa.Column(sa.String(36), primary_key=True) edge_cert_id = sa.Column(sa.String(36), nullable=False) class NsxvLbaasL7PolicyBinding(model_base.BASEV2, models.TimestampMixin): """Mapping between NSX Edge and LBaaSv2 L7 policy """ __tablename__ = 'nsxv_lbaas_l7policy_bindings' policy_id = sa.Column(sa.String(36), primary_key=True) edge_id = sa.Column(sa.String(36), nullable=False) edge_app_rule_id = sa.Column(sa.String(36), nullable=False) class NsxvSubnetExtAttributes(model_base.BASEV2, models.TimestampMixin): """Subnet attributes managed by NSX plugin extensions.""" __tablename__ = 'nsxv_subnet_ext_attributes' subnet_id = sa.Column(sa.String(36), sa.ForeignKey('subnets.id', ondelete="CASCADE"), primary_key=True) dns_search_domain = sa.Column(sa.String(255), nullable=True) dhcp_mtu = sa.Column(sa.Integer, nullable=True) # Add a relationship to the Subnet model in order to instruct # SQLAlchemy to eagerly load this association subnet = orm.relationship( models_v2.Subnet, backref=orm.backref("nsxv_subnet_attributes", lazy='joined', uselist=False, cascade='delete')) class NsxvPortExtAttributes(model_base.BASEV2, models.TimestampMixin): """Port attributes managed by NSX plugin extensions.""" __tablename__ = 'nsxv_port_ext_attributes' port_id = sa.Column(sa.String(36), sa.ForeignKey('ports.id', ondelete="CASCADE"), primary_key=True) vnic_type = sa.Column(sa.String(64), nullable=False, default=portbindings.VNIC_NORMAL, server_default=portbindings.VNIC_NORMAL) # Add a relationship to the port model in order to instruct # SQLAlchemy to eagerly load this association port = orm.relationship( models_v2.Port, backref=orm.backref("nsx_port_attributes", lazy='joined', uselist=False, cascade='delete')) class NsxvBgpSpeakerBinding(model_base.BASEV2, models.TimestampMixin): # Maps bgp_speaker_id to NSXv edge id __tablename__ = 'nsxv_bgp_speaker_bindings' edge_id = sa.Column(sa.String(36), primary_key=True) bgp_speaker_id = sa.Column(sa.String(36), sa.ForeignKey('bgp_speakers.id', ondelete='CASCADE'), nullable=False) # A given BGP speaker sets the value of its BGP Identifier to an IP address # that is assigned to that BGP speaker. bgp_identifier = sa.Column(sa.String(64), nullable=False) class NsxvBgpPeerEdgeBinding(model_base.BASEV2, models.TimestampMixin): # Maps between bgp-peer and edges service gateway. __tablename__ = 'nsxv_bgp_peer_edge_bindings' peer_id = sa.Column(sa.String(36), sa.ForeignKey('bgp_peers.id', ondelete='CASCADE'), primary_key=True, nullable=False) edge_id = sa.Column(sa.String(36), nullable=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/routertype.py0000644000175000017500000000206200000000000022551 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import constants from vmware_nsx.db import ( distributedrouter as dist_rtr) from vmware_nsx.extensions import routertype as rt_rtr class RouterType_mixin(dist_rtr.DistributedRouter_mixin): """Mixin class to enable Router type support.""" nsx_attributes = ( dist_rtr.DistributedRouter_mixin.nsx_attributes + [{ 'name': rt_rtr.ROUTER_TYPE, 'default': constants.SHARED }]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/vcns_models.py0000644000175000017500000000260600000000000022647 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.db import model_base import sqlalchemy as sa from oslo_db.sqlalchemy import models class VcnsRouterBinding(model_base.BASEV2, model_base.HasStatusDescription, models.TimestampMixin): """Represents the mapping between neutron router and vShield Edge.""" __tablename__ = 'vcns_router_bindings' # no ForeignKey to routers.id because for now, a router can be removed # from routers when delete_router is executed, but the binding is only # removed after the Edge is deleted router_id = sa.Column(sa.String(36), primary_key=True) edge_id = sa.Column(sa.String(16), nullable=True) lswitch_id = sa.Column(sa.String(36), nullable=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/db/vnic_index_db.py0000644000175000017500000001035300000000000023124 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.orm import exc from neutron_lib.api.definitions import port as port_def from neutron_lib.db import resource_extend from oslo_db import exception as db_exc from oslo_log import log as logging from vmware_nsx.db import nsxv_models from vmware_nsx.extensions import vnicindex as vnicidx LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class VnicIndexDbMixin(object): @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _extend_port_vnic_index_binding(port_res, port_db): state = port_db.vnic_index port_res[vnicidx.VNIC_INDEX] = state.index if state else None def _get_port_vnic_index(self, context, port_id): """Returns the vnic index for the given port. If the port is not associated with any vnic then return None """ session = context.session try: mapping = (session.query(nsxv_models.NsxvPortIndexMapping). filter_by(port_id=port_id).one()) return mapping['index'] except exc.NoResultFound: LOG.debug("No record in DB for vnic-index of port %s", port_id) def _get_mappings_for_device_id(self, context, device_id): session = context.session mappings = (session.query(nsxv_models.NsxvPortIndexMapping). filter_by(device_id=device_id)) return mappings def _create_port_vnic_index_mapping(self, context, port_id, device_id, index): """Save the port vnic-index to DB.""" session = context.session with session.begin(subtransactions=True): index_mapping_model = nsxv_models.NsxvPortIndexMapping( port_id=port_id, device_id=device_id, index=index) session.add(index_mapping_model) def _update_port_vnic_index_mapping(self, context, port_id, device_id, index): session = context.session # delete original entry query = (session.query(nsxv_models.NsxvPortIndexMapping). filter_by(device_id=device_id, index=index)) query.delete() # create a new one self._create_port_vnic_index_mapping(context, port_id, device_id, index) def _set_port_vnic_index_mapping(self, context, port_id, device_id, index): """Save the port vnic-index to DB.""" try: self._create_port_vnic_index_mapping(context, port_id, device_id, index) except db_exc.DBDuplicateEntry: # A retry for the nova scheduling could result in this error. LOG.debug("Entry already exists for %s %s %s", port_id, device_id, index) mappings = self._get_mappings_for_device_id(context, device_id) for mapping in mappings: if (mapping['port_id'] != port_id and mapping['index'] == index): # a new port is using this device - update! self._update_port_vnic_index_mapping(context, port_id, device_id, index) return if (mapping['port_id'] == port_id and mapping['index'] != index): raise def _delete_port_vnic_index_mapping(self, context, port_id): """Delete the port vnic-index association.""" session = context.session query = (session.query(nsxv_models.NsxvPortIndexMapping). filter_by(port_id=port_id)) query.delete() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1942537 vmware-nsx-15.0.1.dev143/vmware_nsx/dhcp_meta/0000755000175000017500000000000000000000000021314 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/dhcp_meta/__init__.py0000644000175000017500000000000000000000000023413 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/dhcp_meta/combined.py0000644000175000017500000001016600000000000023452 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron_lib.agent import topics from neutron_lib import constants as const from vmware_nsx.dhcp_meta import nsx as nsx_svc from vmware_nsx.dhcp_meta import rpc as nsx_rpc class DhcpAgentNotifyAPI(dhcp_rpc_agent_api.DhcpAgentNotifyAPI): def __init__(self, plugin, manager): super(DhcpAgentNotifyAPI, self).__init__(topic=topics.DHCP_AGENT) self.agentless_notifier = nsx_svc.DhcpAgentNotifyAPI(plugin, manager) def notify(self, context, data, methodname): [resource, action, _e] = methodname.split('.') lsn_manager = self.agentless_notifier.plugin.lsn_manager plugin = self.agentless_notifier.plugin if resource == 'network': net_id = data['network']['id'] elif resource in ['port', 'subnet']: net_id = data[resource]['network_id'] else: # no valid resource return lsn_exists = lsn_manager.lsn_exists(context, net_id) treat_dhcp_owner_specially = False if lsn_exists: # if lsn exists, the network is one created with the new model if (resource == 'subnet' and action == 'create' and const.DEVICE_OWNER_DHCP not in plugin.port_special_owners): # network/subnet provisioned in the new model have a plain # nsx lswitch port, no vif attachment plugin.port_special_owners.append(const.DEVICE_OWNER_DHCP) treat_dhcp_owner_specially = True if (resource == 'port' and action == 'update' or resource == 'subnet'): self.agentless_notifier.notify(context, data, methodname) elif not lsn_exists and resource in ['port', 'subnet']: # call notifier for the agent-based mode super(DhcpAgentNotifyAPI, self).notify(context, data, methodname) if treat_dhcp_owner_specially: # if subnets belong to networks created with the old model # dhcp port does not need to be special cased, so put things # back, since they were modified plugin.port_special_owners.remove(const.DEVICE_OWNER_DHCP) def handle_network_dhcp_access(plugin, context, network, action): nsx_svc.handle_network_dhcp_access(plugin, context, network, action) def handle_port_dhcp_access(plugin, context, port, action): if plugin.lsn_manager.lsn_exists(context, port['network_id']): nsx_svc.handle_port_dhcp_access(plugin, context, port, action) else: nsx_rpc.handle_port_dhcp_access(plugin, context, port, action) def handle_port_metadata_access(plugin, context, port, is_delete=False): if plugin.lsn_manager.lsn_exists(context, port['network_id']): nsx_svc.handle_port_metadata_access(plugin, context, port, is_delete) else: nsx_rpc.handle_port_metadata_access(plugin, context, port, is_delete) def handle_router_metadata_access(plugin, context, router_id, interface=None): if interface: subnet = plugin.get_subnet(context, interface['subnet_id']) network_id = subnet['network_id'] if plugin.lsn_manager.lsn_exists(context, network_id): nsx_svc.handle_router_metadata_access( plugin, context, router_id, interface) else: nsx_rpc.handle_router_metadata_access( plugin, context, router_id, interface) else: nsx_rpc.handle_router_metadata_access( plugin, context, router_id, interface) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/dhcp_meta/constants.py0000644000175000017500000000216000000000000023701 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.db import l3_db from neutron_lib import constants as const # A unique MAC to quickly identify the LSN port used for metadata services # when dhcp on the subnet is off. Inspired by leet-speak for 'metadata'. METADATA_MAC = "fa:15:73:74:d4:74" METADATA_PORT_ID = 'metadata:id' METADATA_PORT_NAME = 'metadata:name' METADATA_DEVICE_ID = 'metadata:device' SPECIAL_OWNERS = (const.DEVICE_OWNER_DHCP, const.DEVICE_OWNER_ROUTER_GW, l3_db.DEVICE_OWNER_ROUTER_INTF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/dhcp_meta/lsnmanager.py0000644000175000017500000005265100000000000024026 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as p_exc from vmware_nsx.common import nsx_utils from vmware_nsx.db import lsn_db from vmware_nsx.dhcp_meta import constants as const from vmware_nsx.nsxlib.mh import lsn as lsn_api from vmware_nsx.nsxlib.mh import switch as switch_api LOG = logging.getLogger(__name__) META_CONF = 'metadata-proxy' DHCP_CONF = 'dhcp' lsn_opts = [ cfg.BoolOpt('sync_on_missing_data', default=False, help=_('Pull LSN information from NSX in case it is missing ' 'from the local data store. This is useful to rebuild ' 'the local store in case of server recovery.')) ] def register_lsn_opts(config): config.CONF.register_opts(lsn_opts, "NSX_LSN") class LsnManager(object): """Manage LSN entities associated with networks.""" def __init__(self, plugin): self.plugin = plugin @property def cluster(self): return self.plugin.cluster def lsn_exists(self, context, network_id): """Return True if a Logical Service Node exists for the network.""" return self.lsn_get( context, network_id, raise_on_err=False) is not None def lsn_get(self, context, network_id, raise_on_err=True): """Retrieve the LSN id associated to the network.""" try: return lsn_api.lsn_for_network_get(self.cluster, network_id) except (n_exc.NotFound, api_exc.NsxApiException): if raise_on_err: LOG.error('Unable to find Logical Service Node for ' 'network %s.', network_id) raise p_exc.LsnNotFound(entity='network', entity_id=network_id) else: LOG.warning('Unable to find Logical Service Node for ' 'the requested network %s.', network_id) def lsn_create(self, context, network_id): """Create a LSN associated to the network.""" try: return lsn_api.lsn_for_network_create(self.cluster, network_id) except api_exc.NsxApiException: err_msg = _('Unable to create LSN for network %s') % network_id raise p_exc.NsxPluginException(err_msg=err_msg) def lsn_delete(self, context, lsn_id): """Delete a LSN given its id.""" try: lsn_api.lsn_delete(self.cluster, lsn_id) except (n_exc.NotFound, api_exc.NsxApiException): LOG.warning('Unable to delete Logical Service Node %s', lsn_id) def lsn_delete_by_network(self, context, network_id): """Delete a LSN associated to the network.""" lsn_id = self.lsn_get(context, network_id, raise_on_err=False) if lsn_id: self.lsn_delete(context, lsn_id) def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): """Retrieve LSN and LSN port for the network and the subnet.""" lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) if lsn_id: try: lsn_port_id = lsn_api.lsn_port_by_subnet_get( self.cluster, lsn_id, subnet_id) except (n_exc.NotFound, api_exc.NsxApiException): if raise_on_err: LOG.error('Unable to find Logical Service Node Port ' 'for LSN %(lsn_id)s and subnet ' '%(subnet_id)s', {'lsn_id': lsn_id, 'subnet_id': subnet_id}) raise p_exc.LsnPortNotFound(lsn_id=lsn_id, entity='subnet', entity_id=subnet_id) else: LOG.warning('Unable to find Logical Service Node Port ' 'for LSN %(lsn_id)s and subnet ' '%(subnet_id)s', {'lsn_id': lsn_id, 'subnet_id': subnet_id}) return (lsn_id, None) else: return (lsn_id, lsn_port_id) else: return (None, None) def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): """Retrieve LSN and LSN port given network and mac address.""" lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err) if lsn_id: try: lsn_port_id = lsn_api.lsn_port_by_mac_get( self.cluster, lsn_id, mac) except (n_exc.NotFound, api_exc.NsxApiException): if raise_on_err: LOG.error('Unable to find Logical Service Node Port ' 'for LSN %(lsn_id)s and mac address ' '%(mac)s', {'lsn_id': lsn_id, 'mac': mac}) raise p_exc.LsnPortNotFound(lsn_id=lsn_id, entity='MAC', entity_id=mac) else: LOG.warning('Unable to find Logical Service Node ' 'Port for LSN %(lsn_id)s and mac address ' '%(mac)s', {'lsn_id': lsn_id, 'mac': mac}) return (lsn_id, None) else: return (lsn_id, lsn_port_id) else: return (None, None) def lsn_port_create(self, context, lsn_id, subnet_info): """Create and return LSN port for associated subnet.""" try: return lsn_api.lsn_port_create(self.cluster, lsn_id, subnet_info) except n_exc.NotFound: raise p_exc.LsnNotFound(entity='', entity_id=lsn_id) except api_exc.NsxApiException: err_msg = _('Unable to create port for LSN %s') % lsn_id raise p_exc.NsxPluginException(err_msg=err_msg) def lsn_port_delete(self, context, lsn_id, lsn_port_id): """Delete a LSN port from the Logical Service Node.""" try: lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) except (n_exc.NotFound, api_exc.NsxApiException): LOG.warning('Unable to delete LSN Port %s', lsn_port_id) def lsn_port_dispose(self, context, network_id, mac_address): """Delete a LSN port given the network and the mac address.""" lsn_id, lsn_port_id = self.lsn_port_get_by_mac( context, network_id, mac_address, raise_on_err=False) if lsn_port_id: self.lsn_port_delete(context, lsn_id, lsn_port_id) if mac_address == const.METADATA_MAC: try: lswitch_port_id = switch_api.get_port_by_neutron_tag( self.cluster, network_id, const.METADATA_PORT_ID)['uuid'] switch_api.delete_port( self.cluster, network_id, lswitch_port_id) except (n_exc.PortNotFoundOnNetwork, api_exc.NsxApiException): LOG.warning("Metadata port not found while attempting " "to delete it from network %s", network_id) else: LOG.warning("Unable to find Logical Services Node " "Port with MAC %s", mac_address) def lsn_port_dhcp_setup( self, context, network_id, port_id, port_data, subnet_config=None): """Connect network to LSN via specified port and port_data.""" try: lsn_id = None switch_id = nsx_utils.get_nsx_switch_ids( context.session, self.cluster, network_id)[0] lswitch_port_id = switch_api.get_port_by_neutron_tag( self.cluster, switch_id, port_id)['uuid'] lsn_id = self.lsn_get(context, network_id) lsn_port_id = self.lsn_port_create(context, lsn_id, port_data) except (n_exc.NotFound, p_exc.NsxPluginException): raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=port_id) else: try: lsn_api.lsn_port_plug_network( self.cluster, lsn_id, lsn_port_id, lswitch_port_id) except p_exc.LsnConfigurationConflict: self.lsn_port_delete(context, lsn_id, lsn_port_id) raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=port_id) if subnet_config: self.lsn_port_dhcp_configure( context, lsn_id, lsn_port_id, subnet_config) else: return (lsn_id, lsn_port_id) def lsn_port_metadata_setup(self, context, lsn_id, subnet): """Connect subnet to specified LSN.""" data = { "mac_address": const.METADATA_MAC, "ip_address": subnet['cidr'], "subnet_id": subnet['id'] } network_id = subnet['network_id'] tenant_id = subnet['tenant_id'] lswitch_port_id = None try: switch_id = nsx_utils.get_nsx_switch_ids( context.session, self.cluster, network_id)[0] lswitch_port_id = switch_api.create_lport( self.cluster, switch_id, tenant_id, const.METADATA_PORT_ID, const.METADATA_PORT_NAME, const.METADATA_DEVICE_ID, True)['uuid'] lsn_port_id = self.lsn_port_create(context, lsn_id, data) except (n_exc.NotFound, p_exc.NsxPluginException, api_exc.NsxApiException): raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=lswitch_port_id) else: try: lsn_api.lsn_port_plug_network( self.cluster, lsn_id, lsn_port_id, lswitch_port_id) except p_exc.LsnConfigurationConflict: self.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) switch_api.delete_port( self.cluster, network_id, lswitch_port_id) raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) def lsn_port_dhcp_configure(self, context, lsn_id, lsn_port_id, subnet): """Enable/disable dhcp services with the given config options.""" is_enabled = subnet["enable_dhcp"] dhcp_options = { "domain_name": cfg.CONF.NSX_DHCP.domain_name, "default_lease_time": cfg.CONF.NSX_DHCP.default_lease_time, } dns_servers = cfg.CONF.NSX_DHCP.extra_domain_name_servers or [] dns_servers.extend(subnet["dns_nameservers"]) if subnet['gateway_ip']: dhcp_options["routers"] = subnet["gateway_ip"] if dns_servers: dhcp_options["domain_name_servers"] = ",".join(dns_servers) if subnet["host_routes"]: dhcp_options["classless_static_routes"] = ( ",".join(subnet["host_routes"]) ) try: lsn_api.lsn_port_dhcp_configure( self.cluster, lsn_id, lsn_port_id, is_enabled, dhcp_options) except (n_exc.NotFound, api_exc.NsxApiException): err_msg = (_('Unable to configure dhcp for Logical Service ' 'Node %(lsn_id)s and port %(lsn_port_id)s') % {'lsn_id': lsn_id, 'lsn_port_id': lsn_port_id}) LOG.error(err_msg) raise p_exc.NsxPluginException(err_msg=err_msg) def lsn_metadata_configure(self, context, subnet_id, is_enabled): """Configure metadata service for the specified subnet.""" subnet = self.plugin.get_subnet(context, subnet_id) network_id = subnet['network_id'] meta_conf = cfg.CONF.NSX_METADATA metadata_options = { 'metadata_server_ip': meta_conf.metadata_server_address, 'metadata_server_port': meta_conf.metadata_server_port, 'metadata_proxy_shared_secret': meta_conf.metadata_shared_secret } try: lsn_id = self.lsn_get(context, network_id) lsn_api.lsn_metadata_configure( self.cluster, lsn_id, is_enabled, metadata_options) except (p_exc.LsnNotFound, api_exc.NsxApiException): err_msg = (_('Unable to configure metadata ' 'for subnet %s') % subnet_id) LOG.error(err_msg) raise p_exc.NsxPluginException(err_msg=err_msg) if is_enabled: try: # test that the lsn port exists self.lsn_port_get(context, network_id, subnet_id) except p_exc.LsnPortNotFound: # this might happen if subnet had dhcp off when created # so create one, and wire it self.lsn_port_metadata_setup(context, lsn_id, subnet) else: self.lsn_port_dispose(context, network_id, const.METADATA_MAC) def _lsn_port_host_conf(self, context, network_id, subnet_id, data, hdlr): lsn_id, lsn_port_id = self.lsn_port_get( context, network_id, subnet_id, raise_on_err=False) try: if lsn_id and lsn_port_id: hdlr(self.cluster, lsn_id, lsn_port_id, data) except (n_exc.NotFound, api_exc.NsxApiException): LOG.error('Error while configuring LSN ' 'port %s', lsn_port_id) raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) def lsn_port_dhcp_host_add(self, context, network_id, subnet_id, host): """Add dhcp host entry to LSN port configuration.""" self._lsn_port_host_conf(context, network_id, subnet_id, host, lsn_api.lsn_port_dhcp_host_add) def lsn_port_dhcp_host_remove(self, context, network_id, subnet_id, host): """Remove dhcp host entry from LSN port configuration.""" self._lsn_port_host_conf(context, network_id, subnet_id, host, lsn_api.lsn_port_dhcp_host_remove) def lsn_port_meta_host_add(self, context, network_id, subnet_id, host): """Add dhcp host entry to LSN port configuration.""" self._lsn_port_host_conf(context, network_id, subnet_id, host, lsn_api.lsn_port_metadata_host_add) def lsn_port_meta_host_remove(self, context, network_id, subnet_id, host): """Remove dhcp host entry from LSN port configuration.""" self._lsn_port_host_conf(context, network_id, subnet_id, host, lsn_api.lsn_port_metadata_host_remove) def lsn_port_update( self, context, network_id, subnet_id, dhcp=None, meta=None): """Update the specified configuration for the LSN port.""" if not dhcp and not meta: return try: lsn_id, lsn_port_id = self.lsn_port_get( context, network_id, subnet_id, raise_on_err=False) if dhcp and lsn_id and lsn_port_id: lsn_api.lsn_port_host_entries_update( self.cluster, lsn_id, lsn_port_id, DHCP_CONF, dhcp) if meta and lsn_id and lsn_port_id: lsn_api.lsn_port_host_entries_update( self.cluster, lsn_id, lsn_port_id, META_CONF, meta) except api_exc.NsxApiException: raise p_exc.PortConfigurationError( net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id) class PersistentLsnManager(LsnManager): """Add local persistent state to LSN Manager.""" def __init__(self, plugin): super(PersistentLsnManager, self).__init__(plugin) self.sync_on_missing = cfg.CONF.NSX_LSN.sync_on_missing_data def lsn_get(self, context, network_id, raise_on_err=True): try: obj = lsn_db.lsn_get_for_network( context, network_id, raise_on_err=raise_on_err) return obj.lsn_id if obj else None except p_exc.LsnNotFound: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False if self.sync_on_missing: lsn_id = super(PersistentLsnManager, self).lsn_get( context, network_id, raise_on_err=raise_on_err) self.lsn_save(context, network_id, lsn_id) return lsn_id if raise_on_err: ctxt.reraise = True def lsn_save(self, context, network_id, lsn_id): """Save LSN-Network mapping to the DB.""" try: lsn_db.lsn_add(context, network_id, lsn_id) except db_exc.DBError: err_msg = _('Unable to save LSN for network %s') % network_id LOG.exception(err_msg) raise p_exc.NsxPluginException(err_msg=err_msg) def lsn_create(self, context, network_id): lsn_id = super(PersistentLsnManager, self).lsn_create(context, network_id) try: self.lsn_save(context, network_id, lsn_id) except p_exc.NsxPluginException: with excutils.save_and_reraise_exception(): super(PersistentLsnManager, self).lsn_delete(context, lsn_id) return lsn_id def lsn_delete(self, context, lsn_id): lsn_db.lsn_remove(context, lsn_id) super(PersistentLsnManager, self).lsn_delete(context, lsn_id) def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True): try: obj = lsn_db.lsn_port_get_for_subnet( context, subnet_id, raise_on_err=raise_on_err) return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) except p_exc.LsnPortNotFound: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False if self.sync_on_missing: lsn_id, lsn_port_id = ( super(PersistentLsnManager, self).lsn_port_get( context, network_id, subnet_id, raise_on_err=raise_on_err)) mac_addr = lsn_api.lsn_port_info_get( self.cluster, lsn_id, lsn_port_id)['mac_address'] self.lsn_port_save( context, lsn_port_id, subnet_id, mac_addr, lsn_id) return (lsn_id, lsn_port_id) if raise_on_err: ctxt.reraise = True def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True): try: obj = lsn_db.lsn_port_get_for_mac( context, mac, raise_on_err=raise_on_err) return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None) except p_exc.LsnPortNotFound: with excutils.save_and_reraise_exception() as ctxt: ctxt.reraise = False if self.sync_on_missing: lsn_id, lsn_port_id = ( super(PersistentLsnManager, self).lsn_port_get_by_mac( context, network_id, mac, raise_on_err=raise_on_err)) subnet_id = lsn_api.lsn_port_info_get( self.cluster, lsn_id, lsn_port_id).get('subnet_id') self.lsn_port_save( context, lsn_port_id, subnet_id, mac, lsn_id) return (lsn_id, lsn_port_id) if raise_on_err: ctxt.reraise = True def lsn_port_save(self, context, lsn_port_id, subnet_id, mac_addr, lsn_id): """Save LSN Port information to the DB.""" try: lsn_db.lsn_port_add_for_lsn( context, lsn_port_id, subnet_id, mac_addr, lsn_id) except db_exc.DBError: err_msg = _('Unable to save LSN port for subnet %s') % subnet_id LOG.exception(err_msg) raise p_exc.NsxPluginException(err_msg=err_msg) def lsn_port_create(self, context, lsn_id, subnet_info): lsn_port_id = super(PersistentLsnManager, self).lsn_port_create(context, lsn_id, subnet_info) try: self.lsn_port_save(context, lsn_port_id, subnet_info['subnet_id'], subnet_info['mac_address'], lsn_id) except p_exc.NsxPluginException: with excutils.save_and_reraise_exception(): super(PersistentLsnManager, self).lsn_port_delete( context, lsn_id, lsn_port_id) return lsn_port_id def lsn_port_delete(self, context, lsn_id, lsn_port_id): lsn_db.lsn_port_remove(context, lsn_port_id) super(PersistentLsnManager, self).lsn_port_delete( context, lsn_id, lsn_port_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/dhcp_meta/migration.py0000644000175000017500000001663600000000000023673 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import constants as const from neutron_lib import exceptions as n_exc from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as p_exc from vmware_nsx.dhcp_meta import nsx from vmware_nsx.dhcp_meta import rpc LOG = logging.getLogger(__name__) class DhcpMetadataBuilder(object): def __init__(self, plugin, agent_notifier): self.plugin = plugin self.notifier = agent_notifier def dhcp_agent_get_all(self, context, network_id): """Return the agents managing the network.""" return self.plugin.list_dhcp_agents_hosting_network( context, network_id)['agents'] def dhcp_port_get_all(self, context, network_id): """Return the dhcp ports allocated for the network.""" filters = { 'network_id': [network_id], 'device_owner': [const.DEVICE_OWNER_DHCP] } return self.plugin.get_ports(context, filters=filters) def router_id_get(self, context, subnet=None): """Return the router and interface used for the subnet.""" if not subnet: return network_id = subnet['network_id'] filters = { 'network_id': [network_id], 'device_owner': [const.DEVICE_OWNER_ROUTER_INTF] } ports = self.plugin.get_ports(context, filters=filters) for port in ports: if port['fixed_ips'][0]['subnet_id'] == subnet['id']: return port['device_id'] def metadata_deallocate(self, context, router_id, subnet_id): """Deallocate metadata services for the subnet.""" interface = {'subnet_id': subnet_id} self.plugin.remove_router_interface(context, router_id, interface) def metadata_allocate(self, context, router_id, subnet_id): """Allocate metadata resources for the subnet via the router.""" interface = {'subnet_id': subnet_id} self.plugin.add_router_interface(context, router_id, interface) def dhcp_deallocate(self, context, network_id, agents, ports): """Deallocate dhcp resources for the network.""" for agent in agents: self.plugin.remove_network_from_dhcp_agent( context, agent['id'], network_id) for port in ports: try: self.plugin.delete_port(context, port['id']) except n_exc.PortNotFound: LOG.error('Port %s is already gone', port['id']) def dhcp_allocate(self, context, network_id, subnet): """Allocate dhcp resources for the subnet.""" # Create LSN resources network_data = {'id': network_id} nsx.handle_network_dhcp_access(self.plugin, context, network_data, 'create_network') if subnet: subnet_data = {'subnet': subnet} self.notifier.notify(context, subnet_data, 'subnet.create.end') # Get DHCP host and metadata entries created for the LSN port = { 'network_id': network_id, 'fixed_ips': [{'subnet_id': subnet['id']}] } self.notifier.notify(context, {'port': port}, 'port.update.end') class MigrationManager(object): def __init__(self, plugin, lsn_manager, agent_notifier): self.plugin = plugin self.manager = lsn_manager self.builder = DhcpMetadataBuilder(plugin, agent_notifier) def validate(self, context, network_id): """Validate and return subnet's dhcp info for migration.""" network = self.plugin.get_network(context, network_id) if self.manager.lsn_exists(context, network_id): reason = _("LSN already exist") raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) if network[extnet_apidef.EXTERNAL]: reason = _("Cannot migrate an external network") raise n_exc.BadRequest(resource='network', msg=reason) filters = {'network_id': [network_id]} subnets = self.plugin.get_subnets(context, filters=filters) count = len(subnets) if count == 0: return None elif count == 1 and subnets[0]['cidr'] == rpc.METADATA_SUBNET_CIDR: reason = _("Cannot migrate a 'metadata' network") raise n_exc.BadRequest(resource='network', msg=reason) elif count > 1: reason = _("Unable to support multiple subnets per network") raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason) else: return subnets[0] def migrate(self, context, network_id, subnet=None): """Migrate subnet resources to LSN.""" router_id = self.builder.router_id_get(context, subnet) if router_id and subnet: # Deallocate resources taken for the router, if any self.builder.metadata_deallocate(context, router_id, subnet['id']) if subnet: # Deallocate reources taken for the agent, if any agents = self.builder.dhcp_agent_get_all(context, network_id) ports = self.builder.dhcp_port_get_all(context, network_id) self.builder.dhcp_deallocate(context, network_id, agents, ports) # (re)create the configuration for LSN self.builder.dhcp_allocate(context, network_id, subnet) if router_id and subnet: # Allocate resources taken for the router, if any self.builder.metadata_allocate(context, router_id, subnet['id']) def report(self, context, network_id, subnet_id=None): """Return a report of the dhcp and metadata resources in use.""" if subnet_id: lsn_id, lsn_port_id = self.manager.lsn_port_get( context, network_id, subnet_id, raise_on_err=False) else: filters = {'network_id': [network_id]} subnets = self.plugin.get_subnets(context, filters=filters) if subnets: lsn_id, lsn_port_id = self.manager.lsn_port_get( context, network_id, subnets[0]['id'], raise_on_err=False) else: lsn_id = self.manager.lsn_get(context, network_id, raise_on_err=False) lsn_port_id = None if lsn_id: ports = [lsn_port_id] if lsn_port_id else [] report = { 'type': 'lsn', 'services': [lsn_id], 'ports': ports } else: agents = self.builder.dhcp_agent_get_all(context, network_id) ports = self.builder.dhcp_port_get_all(context, network_id) report = { 'type': 'agent', 'services': [a['id'] for a in agents], 'ports': [p['id'] for p in ports] } return report ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/dhcp_meta/modes.py0000644000175000017500000002003000000000000022770 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import weakref from neutron_lib.agent import topics from neutron_lib.api.definitions import agent as agent_apidef from neutron_lib.api.definitions import dhcpagentscheduler from neutron_lib import constants as const from neutron_lib import rpc as n_rpc from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import importutils from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import metadata_rpc from neutron.db import agents_db from vmware_nsx._i18n import _ from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.dhcp_meta import combined from vmware_nsx.dhcp_meta import lsnmanager from vmware_nsx.dhcp_meta import migration from vmware_nsx.dhcp_meta import nsx as nsx_svc from vmware_nsx.dhcp_meta import rpc as nsx_rpc from vmware_nsx.extensions import lsn LOG = logging.getLogger(__name__) class SynchronizedDhcpRpcCallback(dhcp_rpc.DhcpRpcCallback): """DHCP RPC callbakcs synchronized with VMware plugin mutex.""" @lockutils.synchronized('vmware', 'neutron-') def create_dhcp_port(self, context, **kwargs): return super(SynchronizedDhcpRpcCallback, self).create_dhcp_port( context, **kwargs) class DhcpMetadataAccess(object): def setup_dhcpmeta_access(self): """Initialize support for DHCP and Metadata services.""" self._init_extensions() if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENT: self._setup_rpc_dhcp_metadata() mod = nsx_rpc elif cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: self._setup_nsx_dhcp_metadata() mod = nsx_svc elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: notifier = self._setup_nsx_dhcp_metadata() self._setup_rpc_dhcp_metadata(notifier=notifier) mod = combined else: error = _("Invalid agent_mode: %s") % cfg.CONF.NSX.agent_mode LOG.error(error) raise nsx_exc.NsxPluginException(err_msg=error) self.handle_network_dhcp_access_delegate = ( mod.handle_network_dhcp_access ) self.handle_port_dhcp_access_delegate = ( mod.handle_port_dhcp_access ) self.handle_port_metadata_access_delegate = ( mod.handle_port_metadata_access ) self.handle_metadata_access_delegate = ( mod.handle_router_metadata_access ) def _setup_rpc_dhcp_metadata(self, notifier=None): self.topic = topics.PLUGIN self.conn = n_rpc.Connection() self.endpoints = [SynchronizedDhcpRpcCallback(), agents_db.AgentExtRpcCallback(), metadata_rpc.MetadataRpcCallback()] self.conn.create_consumer(self.topic, self.endpoints, fanout=False) self.conn.create_consumer(topics.REPORTS, [agents_db.AgentExtRpcCallback()], fanout=False) self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( notifier or dhcp_rpc_agent_api.DhcpAgentNotifyAPI()) self.conn.consume_in_threads() self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) self.supported_extension_aliases.extend( [agent_apidef.ALIAS, dhcpagentscheduler.ALIAS]) def _setup_nsx_dhcp_metadata(self): self._check_services_requirements() nsx_svc.register_dhcp_opts(cfg) nsx_svc.register_metadata_opts(cfg) lsnmanager.register_lsn_opts(cfg) lsn_manager = lsnmanager.PersistentLsnManager(weakref.proxy(self)) self.lsn_manager = lsn_manager if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS: notifier = nsx_svc.DhcpAgentNotifyAPI(weakref.proxy(self), lsn_manager) self.agent_notifiers[const.AGENT_TYPE_DHCP] = notifier # In agentless mode, ports whose owner is DHCP need to # be special cased; so add it to the list of special # owners list if const.DEVICE_OWNER_DHCP not in self.port_special_owners: self.port_special_owners.append(const.DEVICE_OWNER_DHCP) elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED: # This becomes ineffective, as all new networks creations # are handled by Logical Services Nodes in NSX cfg.CONF.set_override('network_auto_schedule', False) LOG.warning('network_auto_schedule has been disabled') notifier = combined.DhcpAgentNotifyAPI(weakref.proxy(self), lsn_manager) self.supported_extension_aliases.append(lsn.ALIAS) # Add the capability to migrate dhcp and metadata services over self.migration_manager = ( migration.MigrationManager( weakref.proxy(self), lsn_manager, notifier)) return notifier def _init_extensions(self): extensions = (lsn.ALIAS, agent_apidef.ALIAS, dhcpagentscheduler.ALIAS) for ext in extensions: if ext in self.supported_extension_aliases: self.supported_extension_aliases.remove(ext) def _check_services_requirements(self): try: error = None nsx_svc.check_services_requirements(self.cluster) except nsx_exc.InvalidVersion: error = _("Unable to run Neutron with config option '%s', as NSX " "does not support it") % cfg.CONF.NSX.agent_mode except nsx_exc.ServiceClusterUnavailable: error = _("Unmet dependency for config option " "'%s'") % cfg.CONF.NSX.agent_mode if error: LOG.error(error) raise nsx_exc.NsxPluginException(err_msg=error) def get_lsn(self, context, network_id, fields=None): report = self.migration_manager.report(context, network_id) return {'network': network_id, 'report': report} def create_lsn(self, context, lsn): network_id = lsn['lsn']['network'] subnet = self.migration_manager.validate(context, network_id) subnet_id = None if not subnet else subnet['id'] self.migration_manager.migrate(context, network_id, subnet) r = self.migration_manager.report(context, network_id, subnet_id) return {'network': network_id, 'report': r} def handle_network_dhcp_access(self, context, network, action): self.handle_network_dhcp_access_delegate(weakref.proxy(self), context, network, action) def handle_port_dhcp_access(self, context, port_data, action): self.handle_port_dhcp_access_delegate(weakref.proxy(self), context, port_data, action) def handle_port_metadata_access(self, context, port, is_delete=False): self.handle_port_metadata_access_delegate(weakref.proxy(self), context, port, is_delete) def handle_router_metadata_access(self, context, router_id, interface=None): self.handle_metadata_access_delegate(weakref.proxy(self), context, router_id, interface) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/dhcp_meta/nsx.py0000644000175000017500000003342300000000000022503 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import constants as const from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as p_exc from vmware_nsx.dhcp_meta import constants as d_const from vmware_nsx.nsxlib.mh import lsn as lsn_api LOG = logging.getLogger(__name__) dhcp_opts = [ cfg.ListOpt('extra_domain_name_servers', deprecated_group='NVP_DHCP', default=[], help=_('Comma separated list of additional ' 'domain name servers')), cfg.StrOpt('domain_name', deprecated_group='NVP_DHCP', default='openstacklocal', help=_('Domain to use for building the hostnames')), cfg.IntOpt('default_lease_time', default=43200, deprecated_group='NVP_DHCP', help=_("Default DHCP lease time")), ] metadata_opts = [ cfg.StrOpt('metadata_server_address', deprecated_group='NVP_METADATA', default='127.0.0.1', help=_("IP address used by Metadata server.")), cfg.PortOpt('metadata_server_port', deprecated_group='NVP_METADATA', default=8775, help=_("TCP Port used by Metadata server.")), cfg.StrOpt('metadata_shared_secret', deprecated_group='NVP_METADATA', default='', help=_('When proxying metadata requests, Neutron signs the ' 'Instance-ID header with a shared secret to prevent ' 'spoofing. You may select any string for a secret, ' 'but it MUST match with the configuration used by the ' 'Metadata server.'), secret=True) ] def register_dhcp_opts(config): config.CONF.register_opts(dhcp_opts, group="NSX_DHCP") def register_metadata_opts(config): config.CONF.register_opts(metadata_opts, group="NSX_METADATA") class DhcpAgentNotifyAPI(object): def __init__(self, plugin, lsn_manager): self.plugin = plugin self.lsn_manager = lsn_manager self._handle_subnet_dhcp_access = {'create': self._subnet_create, 'update': self._subnet_update, 'delete': self._subnet_delete} def notify(self, context, data, methodname): [resource, action, _e] = methodname.split('.') if resource == 'subnet': self._handle_subnet_dhcp_access[action](context, data['subnet']) elif resource == 'port' and action == 'update': self._port_update(context, data['port']) def _port_update(self, context, port): # With no fixed IP's there's nothing that can be updated if not port["fixed_ips"]: return network_id = port['network_id'] subnet_id = port["fixed_ips"][0]['subnet_id'] filters = {'network_id': [network_id]} # Because NSX does not support updating a single host entry we # got to build the whole list from scratch and update in bulk ports = self.plugin.get_ports(context, filters) if not ports: return dhcp_conf = [ {'mac_address': p['mac_address'], 'ip_address': p["fixed_ips"][0]['ip_address']} for p in ports if is_user_port(p) ] meta_conf = [ {'instance_id': p['device_id'], 'ip_address': p["fixed_ips"][0]['ip_address']} for p in ports if is_user_port(p, check_dev_id=True) ] self.lsn_manager.lsn_port_update( context, network_id, subnet_id, dhcp=dhcp_conf, meta=meta_conf) def _subnet_create(self, context, subnet, clean_on_err=True): if subnet['enable_dhcp']: network_id = subnet['network_id'] # Create port for DHCP service dhcp_port = { "name": "", "admin_state_up": True, "device_id": "", "device_owner": const.DEVICE_OWNER_DHCP, "network_id": network_id, "tenant_id": subnet["tenant_id"], "mac_address": const.ATTR_NOT_SPECIFIED, "fixed_ips": [{"subnet_id": subnet['id']}] } try: # This will end up calling handle_port_dhcp_access # down below as well as handle_port_metadata_access self.plugin.create_port(context, {'port': dhcp_port}) except p_exc.PortConfigurationError as e: LOG.error("Error while creating subnet %(cidr)s for " "network %(network)s. Please, contact " "administrator", {"cidr": subnet["cidr"], "network": network_id}) db_base_plugin_v2.NeutronDbPluginV2.delete_port( self.plugin, context, e.port_id) if clean_on_err: self.plugin.delete_subnet(context, subnet['id']) raise n_exc.Conflict() def _subnet_update(self, context, subnet): network_id = subnet['network_id'] try: lsn_id, lsn_port_id = self.lsn_manager.lsn_port_get( context, network_id, subnet['id']) self.lsn_manager.lsn_port_dhcp_configure( context, lsn_id, lsn_port_id, subnet) except p_exc.LsnPortNotFound: # It's possible that the subnet was created with dhcp off; # check if the subnet was uplinked onto a router, and if so # remove the patch attachment between the metadata port and # the lsn port, in favor on the one we'll be creating during # _subnet_create self.lsn_manager.lsn_port_dispose( context, network_id, d_const.METADATA_MAC) # also, check that a dhcp port exists first and provision it # accordingly filters = dict(network_id=[network_id], device_owner=[const.DEVICE_OWNER_DHCP]) ports = self.plugin.get_ports(context, filters=filters) if ports: handle_port_dhcp_access( self.plugin, context, ports[0], 'create_port') else: self._subnet_create(context, subnet, clean_on_err=False) def _subnet_delete(self, context, subnet): # FIXME(armando-migliaccio): it looks like that a subnet filter # is ineffective; so filter by network for now. network_id = subnet['network_id'] filters = dict(network_id=[network_id], device_owner=[const.DEVICE_OWNER_DHCP]) # FIXME(armando-migliaccio): this may be race-y ports = self.plugin.get_ports(context, filters=filters) if ports: # This will end up calling handle_port_dhcp_access # down below as well as handle_port_metadata_access self.plugin.delete_port(context, ports[0]['id']) def is_user_port(p, check_dev_id=False): usable = p['fixed_ips'] and p['device_owner'] not in d_const.SPECIAL_OWNERS return usable if not check_dev_id else usable and p['device_id'] def check_services_requirements(cluster): ver = cluster.api_client.get_version() # 4.1 is the first and only release where DHCP in NSX # will have this feature, as experimental if ver.major == 4 and ver.minor == 1: cluster_id = cfg.CONF.default_service_cluster_uuid if not lsn_api.service_cluster_exists(cluster, cluster_id): raise p_exc.ServiceClusterUnavailable(cluster_id=cluster_id) else: raise p_exc.InvalidVersion(version=ver) def handle_network_dhcp_access(plugin, context, network, action): LOG.info("Performing DHCP %(action)s for resource: %(resource)s", {"action": action, "resource": network}) if action == 'create_network': network_id = network['id'] if network.get(extnet_apidef.EXTERNAL): LOG.info("Network %s is external: no LSN to create", network_id) return plugin.lsn_manager.lsn_create(context, network_id) elif action == 'delete_network': # NOTE(armando-migliaccio): on delete_network, network # is just the network id network_id = network plugin.lsn_manager.lsn_delete_by_network(context, network_id) LOG.info("Logical Services Node for network " "%s configured successfully", network_id) def handle_port_dhcp_access(plugin, context, port, action): LOG.info("Performing DHCP %(action)s for resource: %(resource)s", {"action": action, "resource": port}) if port["device_owner"] == const.DEVICE_OWNER_DHCP: network_id = port["network_id"] if action == "create_port": # at this point the port must have a subnet and a fixed ip subnet_id = port["fixed_ips"][0]['subnet_id'] subnet = plugin.get_subnet(context, subnet_id) subnet_data = { "mac_address": port["mac_address"], "ip_address": subnet['cidr'], "subnet_id": subnet['id'] } try: plugin.lsn_manager.lsn_port_dhcp_setup( context, network_id, port['id'], subnet_data, subnet) except p_exc.PortConfigurationError: LOG.error("Error while configuring DHCP for " "port %s", port['id']) raise n_exc.NeutronException() elif action == "delete_port": plugin.lsn_manager.lsn_port_dispose(context, network_id, port['mac_address']) elif port["device_owner"] != const.DEVICE_OWNER_DHCP: if port.get("fixed_ips"): # do something only if there are IP's and dhcp is enabled subnet_id = port["fixed_ips"][0]['subnet_id'] if not plugin.get_subnet(context, subnet_id)['enable_dhcp']: LOG.info("DHCP is disabled for subnet %s: nothing " "to do", subnet_id) return host_data = { "mac_address": port["mac_address"], "ip_address": port["fixed_ips"][0]['ip_address'] } network_id = port["network_id"] if action == "create_port": handler = plugin.lsn_manager.lsn_port_dhcp_host_add elif action == "delete_port": handler = plugin.lsn_manager.lsn_port_dhcp_host_remove try: handler(context, network_id, subnet_id, host_data) except p_exc.PortConfigurationError: with excutils.save_and_reraise_exception(): if action == 'create_port': db_base_plugin_v2.NeutronDbPluginV2.delete_port( plugin, context, port['id']) LOG.info("DHCP for port %s configured successfully", port['id']) def handle_port_metadata_access(plugin, context, port, is_delete=False): if is_user_port(port, check_dev_id=True): network_id = port["network_id"] network = plugin.get_network(context, network_id) if network[extnet_apidef.EXTERNAL]: LOG.info("Network %s is external: nothing to do", network_id) return subnet_id = port["fixed_ips"][0]['subnet_id'] host_data = { "instance_id": port["device_id"], "tenant_id": port["tenant_id"], "ip_address": port["fixed_ips"][0]['ip_address'] } LOG.info("Configuring metadata entry for port %s", port) if not is_delete: handler = plugin.lsn_manager.lsn_port_meta_host_add else: handler = plugin.lsn_manager.lsn_port_meta_host_remove try: handler(context, network_id, subnet_id, host_data) except p_exc.PortConfigurationError: with excutils.save_and_reraise_exception(): if not is_delete: db_base_plugin_v2.NeutronDbPluginV2.delete_port( plugin, context, port['id']) LOG.info("Metadata for port %s configured successfully", port['id']) def handle_router_metadata_access(plugin, context, router_id, interface=None): LOG.info("Handle metadata access via router: %(r)s and " "interface %(i)s", {'r': router_id, 'i': interface}) if interface: try: plugin.get_port(context, interface['port_id']) is_enabled = True except n_exc.NotFound: is_enabled = False subnet_id = interface['subnet_id'] try: plugin.lsn_manager.lsn_metadata_configure( context, subnet_id, is_enabled) except p_exc.NsxPluginException: with excutils.save_and_reraise_exception(): if is_enabled: l3_db.L3_NAT_db_mixin.remove_router_interface( plugin, context, router_id, interface) LOG.info("Metadata for router %s handled successfully", router_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/dhcp_meta/rpc.py0000644000175000017500000002533500000000000022462 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import constants as const from neutron_lib.db import api as db_api from neutron_lib import exceptions as ntn_exc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.db import db_base_plugin_v2 from neutron.db import models_v2 from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc LOG = logging.getLogger(__name__) METADATA_DEFAULT_PREFIX = 30 METADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX METADATA_GATEWAY_IP = '169.254.169.253' METADATA_DHCP_ROUTE = '169.254.169.254/32' def handle_network_dhcp_access(plugin, context, network, action): pass def handle_port_dhcp_access(plugin, context, port_data, action): pass def handle_port_metadata_access(plugin, context, port, is_delete=False): # For instances supporting DHCP option 121 and created in a # DHCP-enabled but isolated network. This method is useful # only when no network namespace support. plugin_cfg = getattr(cfg.CONF, plugin.cfg_group) if (plugin_cfg.metadata_mode == config.MetadataModes.INDIRECT and port.get('device_owner') == const.DEVICE_OWNER_DHCP): if not port.get('fixed_ips'): # If port does not have an IP, the associated subnet is in # deleting state. LOG.info('Port %s has no IP due to subnet in deleting state', port['id']) return fixed_ip = port['fixed_ips'][0] query = context.session.query(models_v2.Subnet) subnet = query.filter( models_v2.Subnet.id == fixed_ip['subnet_id']).one() # If subnet does not have a gateway, do not create metadata # route. This is done via the enable_isolated_metadata # option if desired. if not subnet.get('gateway_ip'): LOG.info('Subnet %s does not have a gateway, the ' 'metadata route will not be created', subnet['id']) return metadata_routes = [r for r in subnet.routes if r['destination'] == METADATA_DHCP_ROUTE] if metadata_routes: # We should have only a single metadata route at any time # because the route logic forbids two routes with the same # destination. Update next hop with the provided IP address if not is_delete: metadata_routes[0].nexthop = fixed_ip['ip_address'] else: context.session.delete(metadata_routes[0]) else: # add the metadata route route = models_v2.SubnetRoute( subnet_id=subnet.id, destination=METADATA_DHCP_ROUTE, nexthop=fixed_ip['ip_address']) context.session.add(route) def handle_router_metadata_access(plugin, context, router_id, interface=None): # For instances created in a DHCP-disabled network but connected to # a router. # The parameter "interface" is only used as a Boolean flag to indicate # whether to add (True) or delete (False) an internal metadata network. plugin_cfg = getattr(cfg.CONF, plugin.cfg_group) if plugin_cfg.metadata_mode != config.MetadataModes.DIRECT: LOG.debug("Metadata access network is disabled") return if not cfg.CONF.allow_overlapping_ips: LOG.warning("Overlapping IPs must be enabled in order to setup " "the metadata access network") return ctx_elevated = context.elevated() on_demand = getattr(plugin_cfg, 'metadata_on_demand', False) try: if interface: # Add interface case filters = {'device_id': [router_id], 'device_owner': const.ROUTER_INTERFACE_OWNERS, 'fixed_ips': {'ip_address': [METADATA_GATEWAY_IP]}} # Retrieve metadata ports by calling database plugin ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports( plugin, ctx_elevated, filters=filters) if not ports and (not on_demand or _find_dhcp_disabled_subnet_by_router( plugin, ctx_elevated, router_id)): _create_metadata_access_network( plugin, ctx_elevated, router_id) else: # Remove interface case filters = {'device_id': [router_id], 'device_owner': const.ROUTER_INTERFACE_OWNERS} # Retrieve router interface ports by calling database plugin ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports( plugin, ctx_elevated, filters=filters) if len(ports) == 1 or (on_demand and not _find_dhcp_disabled_subnet_by_port( plugin, ctx_elevated, ports)): # Delete the internal metadata network if the router port # is the last port left or no more DHCP-disabled subnet # attached to the router. _destroy_metadata_access_network( plugin, ctx_elevated, router_id, ports) # TODO(salvatore-orlando): A better exception handling in the # NSX plugin would allow us to improve error handling here except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, api_exc.NsxApiException): # Any exception here should be regarded as non-fatal LOG.exception("An error occurred while operating on the " "metadata access network for router:'%s'", router_id) def _find_metadata_port(plugin, context, ports): for port in ports: for fixed_ip in port['fixed_ips']: if fixed_ip['ip_address'] == METADATA_GATEWAY_IP: return port def _find_dhcp_disabled_subnet_by_port(plugin, context, ports): for port in ports: for fixed_ip in port['fixed_ips']: # NOTE(ihrachys) explicit use of reader.using guarantees we don't # fetch an old state of subnet with incorrect value for # enable_dhcp. A more correct fix would be switching all operations # of the vmware plugin (and base db neutron plugin) to engine # facade to avoid cross transaction session cache reuse but such # change wouldn't happen overnight. with db_api.CONTEXT_READER.using(context): subnet = plugin.get_subnet(context, fixed_ip['subnet_id']) if not subnet['enable_dhcp']: return subnet def _find_dhcp_disabled_subnet_by_router(plugin, context, router_id): filters = {'device_id': [router_id], 'device_owner': const.ROUTER_INTERFACE_OWNERS} ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports( plugin, context, filters=filters) return _find_dhcp_disabled_subnet_by_port(plugin, context, ports) def _create_metadata_access_network(plugin, context, router_id): # Add network # Network name is likely to be truncated on NSX net_data = {'name': 'meta-%s' % router_id, 'tenant_id': '', # intentionally not set 'admin_state_up': True, 'port_security_enabled': False, 'shared': False, 'status': const.NET_STATUS_ACTIVE} meta_net = plugin.create_network(context, {'network': net_data}) plugin.schedule_network(context, meta_net) # From this point on there will be resources to garbage-collect # in case of failures meta_sub = None try: # Add subnet subnet_data = {'network_id': meta_net['id'], 'tenant_id': '', # intentionally not set 'name': 'meta-%s' % router_id, 'ip_version': 4, 'shared': False, 'cidr': METADATA_SUBNET_CIDR, 'enable_dhcp': True, # Ensure default allocation pool is generated 'allocation_pools': const.ATTR_NOT_SPECIFIED, 'gateway_ip': METADATA_GATEWAY_IP, 'dns_nameservers': [], 'host_routes': []} meta_sub = plugin.create_subnet(context, {'subnet': subnet_data}) plugin.add_router_interface(context, router_id, {'subnet_id': meta_sub['id']}) # Tell to start the metadata agent proxy, only if we had success _notify_rpc_agent(context, {'subnet': meta_sub}, 'subnet.create.end') except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, api_exc.NsxApiException): # It is not necessary to explicitly delete the subnet # as it will be removed with the network plugin.delete_network(context, meta_net['id']) def _destroy_metadata_access_network(plugin, context, router_id, ports): if not ports: return meta_port = _find_metadata_port(plugin, context, ports) if not meta_port: return meta_net_id = meta_port['network_id'] meta_sub_id = meta_port['fixed_ips'][0]['subnet_id'] plugin.remove_router_interface( context, router_id, {'port_id': meta_port['id']}) context.session.expunge_all() try: # Remove network (this will remove the subnet too) plugin.delete_network(context, meta_net_id) except (ntn_exc.NeutronException, nsx_exc.NsxPluginException, api_exc.NsxApiException): # must re-add the router interface plugin.add_router_interface(context, router_id, {'subnet_id': meta_sub_id}) except db_exc.DBReferenceError as e: LOG.debug("Unable to delete network %s. Reason: %s", meta_net_id, e) # Tell to stop the metadata agent proxy _notify_rpc_agent( context, {'network': {'id': meta_net_id}}, 'network.delete.end') def _notify_rpc_agent(context, payload, event): if cfg.CONF.dhcp_agent_notification: dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI() dhcp_notifier.notify(context, payload, event) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1942537 vmware-nsx-15.0.1.dev143/vmware_nsx/dvs/0000755000175000017500000000000000000000000020164 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/dvs/__init__.py0000644000175000017500000000000000000000000022263 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/dvs/dvs.py0000644000175000017500000012647200000000000021346 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from oslo_log import log as logging from oslo_utils import excutils from oslo_vmware import vim_util from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.dvs import dvs_utils LOG = logging.getLogger(__name__) PORTGROUP_PREFIX = 'dvportgroup' API_FIND_ALL_BY_UUID = 'FindAllByUuid' # QoS related constants QOS_IN_DIRECTION = 'incomingPackets' QOS_AGENT_NAME = 'dvfilter-generic-vmware' DSCP_RULE_DESCRIPTION = 'Openstack Dscp Marking RUle' class SingleDvsManager(object): """Management class for dvs related tasks for the dvs plugin For the globally configured dvs. the moref of the configured DVS will be learnt. This will be used in the operations supported by the manager. """ def __init__(self): self.dvs = DvsManager() self._dvs_moref = self._get_dvs_moref_by_name( self.dvs.get_vc_session(), dvs_utils.dvs_name_get()) def _get_dvs_moref_by_name(self, session, dvs_name): """Get the moref of the configured DVS.""" return self.dvs.get_dvs_moref_by_name(dvs_name, session) def add_port_group(self, net_id, vlan_tag=None, trunk_mode=False): return self.dvs.add_port_group(self._dvs_moref, net_id, vlan_tag=vlan_tag, trunk_mode=trunk_mode) def delete_port_group(self, net_id): return self.dvs.delete_port_group(self._dvs_moref, net_id) def get_port_group_info(self, net_id): return self.dvs.get_port_group_info(self._dvs_moref, net_id) def net_id_to_moref(self, net_id): return self.dvs._net_id_to_moref(self._dvs_moref, net_id) class VCManagerBase(object): """Base class for all VC related classes, to initialize the session""" def __init__(self): """Initializer. A global session with the VC will be established. NOTE: the DVS port group name will be the Neutron network UUID. """ self._session = dvs_utils.dvs_create_session() def get_vc_session(self): return self._session class DvsManager(VCManagerBase): """Management class for dvs related tasks The dvs-id is not a class member, since multiple dvs-es can be supported. """ def get_dvs_moref_by_name(self, dvs_name, session=None): """Get the moref of DVS.""" if not session: session = self.get_vc_session() results = session.invoke_api(vim_util, 'get_objects', session.vim, 'DistributedVirtualSwitch', 100) while results: for dvs in results.objects: for prop in dvs.propSet: if dvs_name == prop.val: vim_util.cancel_retrieval(session.vim, results) return dvs.obj results = vim_util.continue_retrieval(session.vim, results) raise nsx_exc.DvsNotFound(dvs=dvs_name) def _get_dvs_moref_by_id(self, dvs_id): return vim_util.get_moref(dvs_id, 'VmwareDistributedVirtualSwitch') def _get_vlan_spec(self, vlan_tag): """Gets portgroup vlan spec.""" # Create the spec for the vlan tag client_factory = self._session.vim.client.factory spec_ns = 'ns0:VmwareDistributedVirtualSwitchVlanIdSpec' vl_spec = client_factory.create(spec_ns) vl_spec.vlanId = vlan_tag vl_spec.inherited = '0' return vl_spec def _get_trunk_vlan_spec(self, start=0, end=4094): """Gets portgroup trunk vlan spec.""" client_factory = self._session.vim.client.factory spec_ns = 'ns0:VmwareDistributedVirtualSwitchTrunkVlanSpec' range = client_factory.create('ns0:NumericRange') range.start = start range.end = end vlan_tag = range vl_spec = client_factory.create(spec_ns) vl_spec.vlanId = vlan_tag vl_spec.inherited = '0' return vl_spec def _get_port_group_spec(self, net_id, vlan_tag, trunk_mode=False, pg_spec=None): """Gets the port groups spec for net_id and vlan_tag.""" client_factory = self._session.vim.client.factory if not pg_spec: pg_spec = client_factory.create('ns0:DVPortgroupConfigSpec') pg_spec.name = net_id pg_spec.type = 'ephemeral' config = client_factory.create('ns0:VMwareDVSPortSetting') if trunk_mode: config.vlan = self._get_trunk_vlan_spec() elif vlan_tag: config.vlan = self._get_vlan_spec(vlan_tag) pg_spec.defaultPortConfig = config return pg_spec def add_port_group(self, dvs_moref, net_id, vlan_tag=None, trunk_mode=False): """Add a new port group to the configured DVS.""" pg_spec = self._get_port_group_spec(net_id, vlan_tag, trunk_mode=trunk_mode) task = self._session.invoke_api(self._session.vim, 'CreateDVPortgroup_Task', dvs_moref, spec=pg_spec) try: # NOTE(garyk): cache the returned moref self._session.wait_for_task(task) except Exception: # NOTE(garyk): handle more specific exceptions with excutils.save_and_reraise_exception(): LOG.exception('Failed to create port group for ' '%(net_id)s with tag %(tag)s.', {'net_id': net_id, 'tag': vlan_tag}) LOG.info("%(net_id)s with tag %(vlan_tag)s created on %(dvs)s.", {'net_id': net_id, 'vlan_tag': vlan_tag, 'dvs': dvs_moref.value}) def _get_portgroup(self, net_id): """Get the port group moref of the net_id.""" results = self._session.invoke_api(vim_util, 'get_objects', self._session.vim, 'DistributedVirtualPortgroup', 100) while results: for pg in results.objects: for prop in pg.propSet: if net_id == prop.val: vim_util.cancel_retrieval(self._session.vim, results) return pg.obj results = vim_util.continue_retrieval(self._session.vim, results) raise exceptions.NetworkNotFound(net_id=net_id) def _net_id_to_moref(self, dvs_moref, net_id): """Gets the moref for the specific neutron network.""" # NOTE(garyk): return this from a cache if not found then invoke # code below. if dvs_moref: port_groups = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, dvs_moref, ['portgroup']) if len(port_groups) and hasattr(port_groups[0], 'propSet'): for prop in port_groups[0].propSet: for val in prop.val[0]: props = self._session.invoke_api( vim_util, 'get_object_properties', self._session.vim, val, ['name']) if len(props) and hasattr(props[0], 'propSet'): for prop in props[0].propSet: # match name or mor id if net_id == prop.val or net_id == val.value: # NOTE(garyk): update cache return val raise exceptions.NetworkNotFound(net_id=net_id) else: return self._get_portgroup(net_id) def _is_vlan_network_by_moref(self, moref): """ This can either be a VXLAN or a VLAN network. The type is determined by the prefix of the moref. """ return moref.startswith(PORTGROUP_PREFIX) def _copy_port_group_spec(self, orig_spec): client_factory = self._session.vim.client.factory pg_spec = client_factory.create('ns0:DVPortgroupConfigSpec') pg_spec.autoExpand = orig_spec['autoExpand'] pg_spec.configVersion = orig_spec['configVersion'] pg_spec.defaultPortConfig = orig_spec['defaultPortConfig'] pg_spec.name = orig_spec['name'] pg_spec.numPorts = orig_spec['numPorts'] pg_spec.policy = orig_spec['policy'] pg_spec.type = orig_spec['type'] return pg_spec def update_port_group_spec_qos(self, pg_spec, qos_data): port_conf = pg_spec.defaultPortConfig # Update the in bandwidth shaping policy # Note: openstack refers to the directions from the VM point of view, # while the NSX refers to the vswitch point of view. # so open stack egress is actually inShaping here. inPol = port_conf.inShapingPolicy if qos_data.egress.bandwidthEnabled: inPol.inherited = False inPol.enabled.inherited = False inPol.enabled.value = True inPol.averageBandwidth.inherited = False inPol.averageBandwidth.value = qos_data.egress.averageBandwidth inPol.peakBandwidth.inherited = False inPol.peakBandwidth.value = qos_data.egress.peakBandwidth inPol.burstSize.inherited = False inPol.burstSize.value = qos_data.egress.burstSize else: inPol.inherited = True outPol = port_conf.outShapingPolicy if qos_data.ingress.bandwidthEnabled: outPol.inherited = False outPol.enabled.inherited = False outPol.enabled.value = True outPol.averageBandwidth.inherited = False outPol.averageBandwidth.value = qos_data.ingress.averageBandwidth outPol.peakBandwidth.inherited = False outPol.peakBandwidth.value = qos_data.ingress.peakBandwidth outPol.burstSize.inherited = False outPol.burstSize.value = qos_data.ingress.burstSize else: outPol.inherited = True # Update the DSCP marking if (port_conf.filterPolicy.inherited or len(port_conf.filterPolicy.filterConfig) == 0 or len(port_conf.filterPolicy.filterConfig[ 0].trafficRuleset.rules) == 0): if qos_data.dscpMarkEnabled: # create the entire structure client_factory = self._session.vim.client.factory filter_rule = client_factory.create('ns0:DvsTrafficRule') filter_rule.description = DSCP_RULE_DESCRIPTION filter_rule.action = client_factory.create( 'ns0:DvsUpdateTagNetworkRuleAction') filter_rule.action.dscpTag = qos_data.dscpMarkValue # mark only incoming packets (openstack egress = nsx ingress) filter_rule.direction = QOS_IN_DIRECTION # Add IP any->any qualifier qualifier = client_factory.create( 'ns0:DvsIpNetworkRuleQualifier') qualifier.protocol = 0 qualifier.sourceAddress = None qualifier.destinationAddress = None filter_rule.qualifier = [qualifier] traffic_filter_config = client_factory.create( 'ns0:DvsTrafficFilterConfig') traffic_filter_config.trafficRuleset.rules = [filter_rule] traffic_filter_config.trafficRuleset.enabled = True traffic_filter_config.agentName = QOS_AGENT_NAME traffic_filter_config.inherited = False port_conf.filterPolicy = client_factory.create( 'ns0:DvsFilterPolicy') port_conf.filterPolicy.filterConfig = [ traffic_filter_config] port_conf.filterPolicy.inherited = False else: # The structure was already initialized filter_policy = port_conf.filterPolicy if qos_data.dscpMarkEnabled: # just update the DSCP value traffic_filter_config = filter_policy.filterConfig[0] filter_rule = traffic_filter_config.trafficRuleset.rules[0] filter_rule.action.dscpTag = qos_data.dscpMarkValue else: # delete the filter policy data filter_policy.filterConfig = [] def _reconfigure_port_group(self, pg_moref, spec_update_calback, spec_update_data): # Get the current configuration of the port group pg_spec = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, pg_moref, ['config']) if len(pg_spec) == 0 or len(pg_spec[0].propSet[0]) == 0: LOG.error('Failed to get object properties of %s', pg_moref) raise nsx_exc.DvsNotFound(dvs=pg_moref) # Convert the extracted config to DVPortgroupConfigSpec new_spec = self._copy_port_group_spec(pg_spec[0].propSet[0].val) # Update the configuration using the callback & data spec_update_calback(new_spec, spec_update_data) # Update the port group configuration task = self._session.invoke_api(self._session.vim, 'ReconfigureDVPortgroup_Task', pg_moref, spec=new_spec) try: self._session.wait_for_task(task) except Exception: LOG.error('Failed to reconfigure DVPortGroup %s', pg_moref) raise nsx_exc.DvsNotFound(dvs=pg_moref) # Update the dvs port groups config for a vxlan/vlan network # update the spec using a callback and user data def update_port_groups_config(self, dvs_id, net_id, net_moref, spec_update_calback, spec_update_data): is_vlan = self._is_vlan_network_by_moref(net_moref) if is_vlan: return self._update_net_port_groups_config(net_moref, spec_update_calback, spec_update_data) else: dvs_moref = self._get_dvs_moref_by_id(dvs_id) return self._update_vxlan_port_groups_config(dvs_moref, net_id, net_moref, spec_update_calback, spec_update_data) # Update the dvs port groups config for a vxlan network # Searching the port groups for a partial match to the network id & moref # update the spec using a callback and user data def _update_vxlan_port_groups_config(self, dvs_moref, net_id, net_moref, spec_update_calback, spec_update_data): port_groups = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, dvs_moref, ['portgroup']) found = False if len(port_groups) and hasattr(port_groups[0], 'propSet'): for prop in port_groups[0].propSet: for pg_moref in prop.val[0]: props = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, pg_moref, ['name']) if len(props) and hasattr(props[0], 'propSet'): for prop in props[0].propSet: if net_id in prop.val and net_moref in prop.val: found = True self._reconfigure_port_group( pg_moref, spec_update_calback, spec_update_data) if not found: raise exceptions.NetworkNotFound(net_id=net_id) # Update the dvs port groups config for a vlan network # Finding the port group using the exact moref of the network # update the spec using a callback and user data def _update_net_port_groups_config(self, net_moref, spec_update_calback, spec_update_data): pg_moref = vim_util.get_moref(net_moref, "DistributedVirtualPortgroup") self._reconfigure_port_group(pg_moref, spec_update_calback, spec_update_data) def delete_port_group(self, dvs_moref, net_id): """Delete a specific port group.""" moref = self._net_id_to_moref(dvs_moref, net_id) task = self._session.invoke_api(self._session.vim, 'Destroy_Task', moref) try: self._session.wait_for_task(task) except Exception: # NOTE(garyk): handle more specific exceptions with excutils.save_and_reraise_exception(): LOG.exception('Failed to delete port group for %s.', net_id) LOG.info("%(net_id)s delete from %(dvs)s.", {'net_id': net_id, 'dvs': dvs_moref.value}) def get_port_group_info(self, dvs_moref, net_id): """Get portgroup information.""" pg_moref = self._net_id_to_moref(dvs_moref, net_id) # Expand the properties to collect on need basis. properties = ['name'] pg_info = self._session.invoke_api(vim_util, 'get_object_properties_dict', self._session.vim, pg_moref, properties) return pg_info, pg_moref def _get_dvs_moref_from_teaming_data(self, teaming_data): """Get the moref dvs that belongs to the teaming data""" if 'switchObj' in teaming_data: if 'objectId' in teaming_data['switchObj']: dvs_id = teaming_data['switchObj']['objectId'] return vim_util.get_moref( dvs_id, 'VmwareDistributedVirtualSwitch') def update_port_group_spec_teaming(self, pg_spec, teaming_data): mapping = {'FAILOVER_ORDER': 'failover_explicit', 'ETHER_CHANNEL': 'loadbalance_ip', 'LACP_ACTIVE': 'loadbalance_ip', 'LACP_PASSIVE': 'loadbalance_ip', 'LACP_V2': 'loadbalance_ip', 'LOADBALANCE_SRCID': 'loadbalance_srcid', 'LOADBALANCE_SRCMAC': 'loadbalance_srcmac', 'LOADBALANCE_LOADBASED': 'loadbalance_loadbased'} dvs_moref = self._get_dvs_moref_from_teaming_data(teaming_data) port_conf = pg_spec.defaultPortConfig policy = port_conf.uplinkTeamingPolicy policy.inherited = False policy.policy.inherited = False policy.policy.value = mapping[teaming_data['teamingPolicy']] policy.uplinkPortOrder.inherited = False ports = teaming_data['failoverUplinkPortNames'] policy.uplinkPortOrder.activeUplinkPort = ports # The standby port will be those not configure as active ones uplinks = self._session.invoke_api(vim_util, "get_object_property", self._session.vim, dvs_moref, "config.uplinkPortPolicy") # VC does not support LAG and normal uplinks. So need to check # if we need to configure standby links if set(ports) & set(uplinks.uplinkPortName): standby = list(set(uplinks.uplinkPortName) - set(ports)) policy.uplinkPortOrder.standbyUplinkPort = standby def update_port_group_spec_name(self, pg_spec, name): pg_spec.name = name def update_port_group_spec_trunk(self, pg_spec, trunk_data): port_conf = pg_spec.defaultPortConfig port_conf.vlan = self._get_trunk_vlan_spec() def update_port_group_security_policy(self, pg_spec, status): policy = pg_spec.policy policy.securityPolicyOverrideAllowed = status def _update_port_security_policy(self, dvs_moref, port, status): client_factory = self._session.vim.client.factory ps = client_factory.create('ns0:DVPortConfigSpec') ps.key = port.portKey ps.operation = 'edit' policy = client_factory.create('ns0:DVSSecurityPolicy') bp = client_factory.create('ns0:BoolPolicy') bp.inherited = False bp.value = status policy.allowPromiscuous = bp policy.forgedTransmits = bp policy.inherited = False setting = client_factory.create('ns0:VMwareDVSPortSetting') setting.securityPolicy = policy ps.setting = setting task = self._session.invoke_api(self._session.vim, 'ReconfigureDVPort_Task', dvs_moref, port=ps) try: self._session.wait_for_task(task) LOG.info("Updated port security status") except Exception as e: LOG.error("Failed to update port %s. Reason: %s", port.key, e) class VMManager(VCManagerBase): """Management class for VMs related VC tasks.""" def get_vm_moref_obj(self, instance_uuid): """Get reference to the VM. The method will make use of FindAllByUuid to get the VM reference. This method finds all VM's on the backend that match the instance_uuid, more specifically all VM's on the backend that have 'config_spec.instanceUuid' set to 'instance_uuid'. """ vm_refs = self._session.invoke_api( self._session.vim, API_FIND_ALL_BY_UUID, self._session.vim.service_content.searchIndex, uuid=instance_uuid, vmSearch=True, instanceUuid=True) if vm_refs: return vm_refs[0] def get_vm_moref(self, instance_uuid): """Get reference to the VM. """ vm_ref = self.get_vm_moref_obj(instance_uuid) if vm_ref: return vm_ref.value def get_vm_spec(self, vm_moref): vm_specs = self._session.invoke_api(vim_util, 'get_object_properties', self._session.vim, vm_moref, ['network']) if vm_specs: return vm_specs[0] def _build_vm_spec_attach(self, neutron_port_id, port_mac, nsx_net_id, device_type): # Code inspired by nova: _create_vif_spec client_factory = self._session.vim.client.factory vm_spec = client_factory.create('ns0:VirtualMachineConfigSpec') device_change = client_factory.create('ns0:VirtualDeviceConfigSpec') device_change.operation = "add" net_device = client_factory.create('ns0:' + device_type) net_device.key = -47 net_device.addressType = "manual" # configure the neutron port id and mac net_device.externalId = neutron_port_id net_device.macAddress = port_mac net_device.wakeOnLanEnabled = True backing = client_factory.create( 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo') # configure the NSX network Id backing.opaqueNetworkId = nsx_net_id backing.opaqueNetworkType = "nsx.LogicalSwitch" net_device.backing = backing connectable_spec = client_factory.create( 'ns0:VirtualDeviceConnectInfo') connectable_spec.startConnected = True connectable_spec.allowGuestControl = True connectable_spec.connected = True net_device.connectable = connectable_spec device_change.device = net_device vm_spec.deviceChange = [device_change] return vm_spec def attach_vm_interface(self, vm_moref, neutron_port_id, port_mac, nsx_net_id, device_type): new_spec = self._build_vm_spec_attach( neutron_port_id, port_mac, nsx_net_id, device_type) task = self._session.invoke_api(self._session.vim, 'ReconfigVM_Task', vm_moref, spec=new_spec) try: self._session.wait_for_task(task) LOG.info("Updated VM moref %(moref)s spec - " "attached an interface", {'moref': vm_moref.value}) except Exception as e: LOG.error("Failed to reconfigure VM %(moref)s spec: %(e)s", {'moref': vm_moref.value, 'e': e}) def _build_vm_spec_update(self, devices): client_factory = self._session.vim.client.factory vm_spec = client_factory.create('ns0:VirtualMachineConfigSpec') vm_spec.deviceChange = [devices] return vm_spec def update_vm_interface(self, vm_moref, devices): update_spec = self._build_vm_spec_update(devices) task = self._session.invoke_api(self._session.vim, 'ReconfigVM_Task', vm_moref, spec=update_spec) try: self._session.wait_for_task(task) LOG.info("Updated VM moref %(moref)s spec - " "attached an interface", {'moref': vm_moref.value}) except Exception as e: LOG.error("Failed to reconfigure VM %(moref)s spec: %(e)s", {'moref': vm_moref.value, 'e': e}) def _build_vm_spec_detach(self, device): """Builds the vif detach config spec.""" # Code inspired by nova: get_network_detach_config_spec client_factory = self._session.vim.client.factory config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') virtual_device_config = client_factory.create( 'ns0:VirtualDeviceConfigSpec') virtual_device_config.operation = "remove" virtual_device_config.device = device config_spec.deviceChange = [virtual_device_config] return config_spec def detach_vm_interface(self, vm_moref, device): new_spec = self._build_vm_spec_detach(device) task = self._session.invoke_api(self._session.vim, 'ReconfigVM_Task', vm_moref, spec=new_spec) try: self._session.wait_for_task(task) LOG.info("Updated VM %(moref)s spec - detached an interface", {'moref': vm_moref.value}) except Exception as e: LOG.error("Failed to reconfigure vm moref %(moref)s: %(e)s", {'moref': vm_moref.value, 'e': e}) def get_vm_interfaces_info(self, vm_moref): hardware_devices = self._session.invoke_api(vim_util, "get_object_property", self._session.vim, vm_moref, "config.hardware.device") return hardware_devices def _get_device_port(self, device_id, mac_address): vm_moref = self.get_vm_moref_obj(device_id) hardware_devices = self.get_vm_interfaces_info(vm_moref) if not hardware_devices: return if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if hasattr(device, 'macAddress'): if device.macAddress == mac_address: return device.backing.port def update_port_security_policy(self, dvs_id, net_id, net_moref, device_id, mac_address, status): dvs_moref = self._get_dvs_moref_by_id(dvs_id) port = self._get_device_port(device_id, mac_address) if port: self._update_port_security_policy(dvs_moref, port, status) def update_vm_network(self, device, name='VM Network'): # In order to live migrate need a common network for interfaces client_factory = self._session.vim.client.factory network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec') network_spec.operation = 'edit' backing = client_factory.create( 'ns0:VirtualEthernetCardNetworkBackingInfo') backing.deviceName = name device.backing = backing network_spec.device = device return network_spec def update_vm_opaque_spec(self, vif_info, device): """Updates the backing for the VIF spec.""" client_factory = self._session.vim.client.factory network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec') network_spec.operation = 'edit' backing = client_factory.create( 'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo') backing.opaqueNetworkId = vif_info['nsx_id'] backing.opaqueNetworkType = 'nsx.LogicalSwitch' # Configure externalId device.externalId = vif_info['iface_id'] device.backing = backing network_spec.device = device return network_spec def relocate_vm_spec(self, client_factory, respool_moref=None, datastore_moref=None, host_moref=None, disk_move_type="moveAllDiskBackingsAndAllowSharing"): rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec') if datastore_moref: datastore = vim_util.get_moref(datastore_moref, 'Datastore') else: datastore = None rel_spec.datastore = datastore host = vim_util.get_moref(host_moref, 'HostSystem') rel_spec.host = host res_pool = vim_util.get_moref(respool_moref, 'ResourcePool') rel_spec.pool = res_pool return rel_spec def relocate_vm(self, vm_ref, respool_moref=None, datastore_moref=None, host_moref=None, disk_move_type="moveAllDiskBackingsAndAllowSharing"): client_factory = self._session.vim.client.factory rel_spec = self.relocate_vm_spec(client_factory, respool_moref, datastore_moref, host_moref, disk_move_type) task = self._session.invoke_api(self._session.vim, "RelocateVM_Task", vm_ref, spec=rel_spec) self._session.wait_for_task(task) class ClusterManager(VCManagerBase): """Management class for Cluster related VC tasks.""" def _reconfigure_cluster(self, session, cluster, config_spec): """Reconfigure a cluster in vcenter""" try: reconfig_task = session.invoke_api( session.vim, "ReconfigureComputeResource_Task", cluster, spec=config_spec, modify=True) session.wait_for_task(reconfig_task) except Exception as excep: LOG.exception('Failed to reconfigure cluster %s', excep) def _create_vm_group_spec(self, client_factory, name, vm_refs, group=None): if group is None: group = client_factory.create('ns0:ClusterVmGroup') group.name = name operation = 'add' else: operation = 'edit' # On vCenter UI, it is not possible to create VM group without # VMs attached to it. But, using APIs, it is possible to create # VM group without VMs attached. Therefore, check for existence # of vm attribute in the group to avoid exceptions if hasattr(group, 'vm'): group.vm += vm_refs else: group.vm = vm_refs group_spec = client_factory.create('ns0:ClusterGroupSpec') group_spec.operation = operation group_spec.info = group return [group_spec] def _create_cluster_rules_spec(self, client_factory, name, vm_group_name, host_group_name): rules_spec = client_factory.create('ns0:ClusterRuleSpec') rules_spec.operation = 'add' policy_class = 'ns0:ClusterVmHostRuleInfo' rules_info = client_factory.create(policy_class) rules_info.enabled = True rules_info.mandatory = False rules_info.name = name rules_info.vmGroupName = vm_group_name rules_info.affineHostGroupName = host_group_name rules_spec.info = rules_info return rules_spec def _group_name(self, index, host_group_names): return 'neutron-group-%s-%s' % (index, host_group_names[index - 1]) def _rule_name(self, index, host_group_names): return 'neutron-rule-%s-%s' % (index, host_group_names[index - 1]) def get_configured_vms(self, resource_id, host_group_names): n_host_groups = len(host_group_names) session = self._session resource = vim_util.get_moref(resource_id, 'ResourcePool') # TODO(garyk): cache the cluster details cluster = session.invoke_api( vim_util, "get_object_property", self._session.vim, resource, "owner") cluster_config = session.invoke_api( vim_util, "get_object_property", self._session.vim, cluster, "configurationEx") configured_vms = [] for index in range(n_host_groups): vm_group = None entry_id = index + 1 groups = [] if hasattr(cluster_config, 'group'): groups = cluster_config.group for group in groups: if self._group_name(entry_id, host_group_names) == group.name: vm_group = group break if vm_group and hasattr(vm_group, 'vm'): for vm in vm_group.vm: configured_vms.append(vm.value) return configured_vms def update_cluster_edge_failover(self, resource_id, vm_moids, host_group_names): """Updates cluster for vm placement using DRS""" session = self._session resource = vim_util.get_moref(resource_id, 'ResourcePool') # TODO(garyk): cache the cluster details cluster = session.invoke_api( vim_util, "get_object_property", self._session.vim, resource, "owner") cluster_config = session.invoke_api( vim_util, "get_object_property", self._session.vim, cluster, "configurationEx") vms = [vim_util.get_moref(vm_moid, 'VirtualMachine') if vm_moid else None for vm_moid in vm_moids] client_factory = session.vim.client.factory config_spec = client_factory.create('ns0:ClusterConfigSpecEx') num_host_groups = len(host_group_names) rules = [] if hasattr(cluster_config, 'rule'): rules = cluster_config.rule for index, vm in enumerate(vms, start=1): if not vm: continue vmGroup = None groups = [] if hasattr(cluster_config, 'group'): groups = cluster_config.group for group in groups: if self._group_name(index, host_group_names) == group.name: vmGroup = group break # Create/update the VM group groupSpec = self._create_vm_group_spec( client_factory, self._group_name(index, host_group_names), [vm], vmGroup) config_spec.groupSpec.append(groupSpec) config_rule = None # Create the config rule if it does not exist for rule in rules: if self._rule_name(index, host_group_names) == rule.name: config_rule = rule break if config_rule is None and index <= num_host_groups: ruleSpec = self._create_cluster_rules_spec( client_factory, self._rule_name(index, host_group_names), self._group_name(index, host_group_names), host_group_names[index - 1]) config_spec.rulesSpec.append(ruleSpec) self._reconfigure_cluster(session, cluster, config_spec) def validate_host_groups(self, resource_id, host_group_names): session = self._session resource = vim_util.get_moref(resource_id, 'ResourcePool') cluster = session.invoke_api( vim_util, "get_object_property", self._session.vim, resource, "owner") client_factory = session.vim.client.factory config_spec = client_factory.create('ns0:ClusterConfigSpecEx') cluster_config = session.invoke_api( vim_util, "get_object_property", self._session.vim, cluster, "configurationEx") groups = [] if hasattr(cluster_config, 'group'): groups = cluster_config.group for host_group_name in host_group_names: found = False for group in groups: if host_group_name == group.name: found = True break if not found: LOG.error("%s does not exist", host_group_name) raise exceptions.NotFound() update_cluster = False num_host_groups = len(host_group_names) rules = [] if hasattr(cluster_config, 'rule'): rules = cluster_config.rule # Ensure that the VM groups are created for index in range(num_host_groups): entry_id = index + 1 vmGroup = None for group in groups: if self._group_name(entry_id, host_group_names) == group.name: vmGroup = group break if vmGroup is None: groupSpec = self._create_vm_group_spec( client_factory, self._group_name(entry_id, host_group_names), [], vmGroup) config_spec.groupSpec.append(groupSpec) update_cluster = True config_rule = None # Create the config rule if it does not exist for rule in rules: if self._rule_name(entry_id, host_group_names) == rule.name: config_rule = rule break if config_rule is None and index < num_host_groups: ruleSpec = self._create_cluster_rules_spec( client_factory, self._rule_name(entry_id, host_group_names), self._group_name(entry_id, host_group_names), host_group_names[index - 1]) config_spec.rulesSpec.append(ruleSpec) update_cluster = True if update_cluster: try: self._reconfigure_cluster(session, cluster, config_spec) except Exception as e: LOG.error('Unable to update cluster for host groups %s', e) def _delete_vm_group_spec(self, client_factory, name): group_spec = client_factory.create('ns0:ClusterGroupSpec') group = client_factory.create('ns0:ClusterVmGroup') group.name = name group_spec.operation = 'remove' group_spec.removeKey = name group_spec.info = group return [group_spec] def _delete_cluster_rules_spec(self, client_factory, rule): rules_spec = client_factory.create('ns0:ClusterRuleSpec') rules_spec.operation = 'remove' rules_spec.removeKey = int(rule.key) policy_class = 'ns0:ClusterVmHostRuleInfo' rules_info = client_factory.create(policy_class) rules_info.name = rule.name rules_info.vmGroupName = rule.vmGroupName rules_info.affineHostGroupName = rule.affineHostGroupName rules_spec.info = rules_info return rules_spec def cluster_host_group_cleanup(self, resource_id, host_group_names): n_host_groups = len(host_group_names) session = self._session resource = vim_util.get_moref(resource_id, 'ResourcePool') # TODO(garyk): cache the cluster details cluster = session.invoke_api( vim_util, "get_object_property", self._session.vim, resource, "owner") client_factory = session.vim.client.factory config_spec = client_factory.create('ns0:ClusterConfigSpecEx') cluster_config = session.invoke_api( vim_util, "get_object_property", self._session.vim, cluster, "configurationEx") groups = [] if hasattr(cluster_config, 'group'): groups = cluster_config.group rules = [] if hasattr(cluster_config, 'rule'): rules = cluster_config.rule groupSpec = [] ruleSpec = [] for index in range(n_host_groups): entry_id = index + 1 for group in groups: if self._group_name(entry_id, host_group_names) == group.name: groupSpec.append(self._delete_vm_group_spec( client_factory, group.name)) # Delete the config rule if it exists for rule in rules: if self._rule_name(entry_id, host_group_names) == rule.name: ruleSpec.append(self._delete_cluster_rules_spec( client_factory, rule)) if groupSpec: config_spec.groupSpec = groupSpec if ruleSpec: config_spec.rulesSpec = ruleSpec if groupSpec or ruleSpec: self._reconfigure_cluster(session, cluster, config_spec) class VCManager(DvsManager, VMManager, ClusterManager): """Management class for all vc related tasks.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/dvs/dvs_utils.py0000644000175000017500000000667000000000000022563 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_vmware import api from oslo_vmware import exceptions as oslo_vmware_exc from vmware_nsx._i18n import _ dvs_opts = [ cfg.StrOpt('host_ip', help='Hostname or IP address for connection to VMware vCenter ' 'host.'), cfg.PortOpt('host_port', default=443, help='Port for connection to VMware vCenter host.'), cfg.StrOpt('host_username', help='Username for connection to VMware vCenter host.'), cfg.StrOpt('host_password', help='Password for connection to VMware vCenter host.', secret=True), cfg.FloatOpt('task_poll_interval', default=0.5, help='The interval used for polling of remote tasks.'), cfg.StrOpt('ca_file', help='Specify a CA bundle file to use in verifying the ' 'vCenter server certificate.'), cfg.BoolOpt('insecure', default=False, help='If true, the vCenter server certificate is not ' 'verified. If false, then the default CA truststore is ' 'used for verification. This option is ignored if ' '"ca_file" is set.'), cfg.IntOpt('api_retry_count', default=10, help='The number of times we retry on failures, e.g., ' 'socket error, etc.'), cfg.StrOpt('dvs_name', help='The name of the preconfigured DVS.'), cfg.StrOpt('metadata_mode', help=_("This value should not be set. It is just required for " "ensuring that the DVS plugin works with the generic " "NSX metadata code")), ] CONF = cfg.CONF CONF.register_opts(dvs_opts, 'dvs') # Create and register exceptions not in oslo.vmware class DvsOperationBulkFault(oslo_vmware_exc.VimException): msg_fmt = _("Cannot complete a DVS operation for one or more members.") def dvs_register_exceptions(): oslo_vmware_exc.register_fault_class('DvsOperationBulkFault', DvsOperationBulkFault) def dvs_is_enabled(dvs_id=None): """Returns the configured DVS status.""" return bool(CONF.dvs.host_ip and CONF.dvs.host_username and CONF.dvs.host_password and (dvs_id or CONF.dvs.dvs_name)) def dvs_create_session(): return api.VMwareAPISession(CONF.dvs.host_ip, CONF.dvs.host_username, CONF.dvs.host_password, CONF.dvs.api_retry_count, CONF.dvs.task_poll_interval, port=CONF.dvs.host_port, cacert=CONF.dvs.ca_file, insecure=CONF.dvs.insecure) def dvs_name_get(): return CONF.dvs.dvs_name ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1942537 vmware-nsx-15.0.1.dev143/vmware_nsx/extension_drivers/0000755000175000017500000000000000000000000023142 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extension_drivers/__init__.py0000644000175000017500000000117400000000000025256 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os NSX_EXT_PATH = os.path.join(os.path.dirname(__file__), 'extensions') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extension_drivers/dns_integration.py0000644000175000017500000005434400000000000026715 0ustar00coreycorey00000000000000# Copyright (c) 2018 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import dns from neutron_lib.api import validators from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_context from neutron_lib.exceptions import dns as dns_exc from neutron_lib.objects import registry as obj_reg from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging from neutron.db.models import dns as dns_model from neutron.db import models_v2 from neutron.services.externaldns import driver from vmware_nsx.common import driver_api from vmware_nsx.plugins.nsx_p import availability_zones as nsxp_az from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az LOG = logging.getLogger(__name__) DNS_DOMAIN_DEFAULT = 'openstacklocal.' def _dotted_domain(dns_domain): if dns_domain.endswith('.'): return dns_domain return '%s.' % dns_domain # TODO(asarfaty) use dns-domain/nameserver from network az instead of global class DNSExtensionDriver(driver_api.ExtensionDriver): _supported_extension_alias = dns.ALIAS @property def extension_alias(self): return self._supported_extension_alias def process_create_network(self, plugin_context, request_data, db_data): dns_domain = request_data.get(dns.DNSDOMAIN) if not validators.is_attr_set(dns_domain): return if dns_domain: obj_reg.new_instance('NetworkDNSDomain', plugin_context, network_id=db_data['id'], dns_domain=dns_domain).create() db_data[dns.DNSDOMAIN] = dns_domain def process_update_network(self, plugin_context, request_data, db_data): new_value = request_data.get(dns.DNSDOMAIN) if not validators.is_attr_set(new_value): return current_dns_domain = db_data.get(dns.DNSDOMAIN) if current_dns_domain == new_value: return net_id = db_data['id'] if current_dns_domain: net_dns_domain = obj_reg.load_class('NetworkDNSDomain').get_object( plugin_context, network_id=net_id) if new_value: net_dns_domain['dns_domain'] = new_value db_data[dns.DNSDOMAIN] = new_value net_dns_domain.update() else: net_dns_domain.delete() db_data[dns.DNSDOMAIN] = '' elif new_value: obj_reg.new_instance('NetworkDNSDomain', plugin_context, network_id=net_id, dns_domain=new_value).create() db_data[dns.DNSDOMAIN] = new_value def process_create_port(self, plugin_context, request_data, db_data): if not (request_data.get(dns.DNSNAME) or request_data.get(dns.DNSDOMAIN)): return dns_name, is_dns_domain_default = self._get_request_dns_name( request_data, db_data['network_id'], plugin_context) if is_dns_domain_default: return network = self._get_network(plugin_context, db_data['network_id']) self._create_port_dns_record(plugin_context, request_data, db_data, network, dns_name) def _create_port_dns_record(self, plugin_context, request_data, db_data, network, dns_name): external_dns_domain = (request_data.get(dns.DNSDOMAIN) or network.get(dns.DNSDOMAIN)) current_dns_name, current_dns_domain = ( self._calculate_current_dns_name_and_domain( dns_name, external_dns_domain, self.external_dns_not_needed(plugin_context, network))) dns_data_obj = obj_reg.new_instance( 'PortDNS', plugin_context, port_id=db_data['id'], current_dns_name=current_dns_name, current_dns_domain=current_dns_domain, previous_dns_name='', previous_dns_domain='', dns_name=dns_name, dns_domain=request_data.get(dns.DNSDOMAIN, '')) dns_data_obj.create() return dns_data_obj def _calculate_current_dns_name_and_domain(self, dns_name, external_dns_domain, no_external_dns_service): # When creating a new PortDNS object, the current_dns_name and # current_dns_domain fields hold the data that the integration driver # will send to the external DNS service. They are set to non-blank # values only if all the following conditions are met: # 1) There is an external DNS integration driver configured # 2) The user request contains a valid non-blank value for the port's # dns_name # 3) The user request contains a valid non-blank value for the port's # dns_domain or the port's network has a non-blank value in its # dns_domain attribute are_both_dns_attributes_set = dns_name and external_dns_domain if no_external_dns_service or not are_both_dns_attributes_set: return '', '' return dns_name, external_dns_domain def _update_dns_db(self, dns_name, dns_domain, db_data, plugin_context, has_fixed_ips): dns_data_db = obj_reg.load_class('PortDNS').get_object( plugin_context, port_id=db_data['id']) if dns_data_db: is_dns_name_changed = (dns_name is not None and dns_data_db['current_dns_name'] != dns_name) if is_dns_name_changed or (has_fixed_ips and dns_data_db['current_dns_name']): dns_data_db['previous_dns_name'] = ( dns_data_db['current_dns_name']) dns_data_db['previous_dns_domain'] = ( dns_data_db['current_dns_domain']) if is_dns_name_changed: dns_data_db[dns.DNSNAME] = dns_name dns_data_db['current_dns_name'] = dns_name if dns_name: dns_data_db['current_dns_domain'] = dns_domain else: dns_data_db['current_dns_domain'] = '' dns_data_db.update() return dns_data_db if dns_name: dns_data_db = obj_reg.new_instance( 'PortDNS', plugin_context, port_id=db_data['id'], current_dns_name=dns_name, current_dns_domain=dns_domain, previous_dns_name='', previous_dns_domain='', dns_name=dns_name) dns_data_db.create() return dns_data_db def process_update_port(self, plugin_context, request_data, db_data): dns_name = request_data.get(dns.DNSNAME) has_fixed_ips = 'fixed_ips' in request_data if dns_name is None and not has_fixed_ips: return if dns_name is not None: dns_name, is_dns_domain_default = self._get_request_dns_name( request_data, db_data['network_id'], plugin_context) if is_dns_domain_default: self._extend_port_dict(db_data, db_data, None, plugin_context) return network = self._get_network(plugin_context, db_data['network_id']) dns_domain = network[dns.DNSDOMAIN] dns_data_db = None if not dns_domain or self.external_dns_not_needed(plugin_context, network): # No need to update external DNS service. Only process the port's # dns_name attribute if necessary if dns_name is not None: dns_data_db = self._process_only_dns_name_update( plugin_context, db_data, dns_name) else: dns_data_db = self._update_dns_db(dns_name, dns_domain, db_data, plugin_context, has_fixed_ips) self._extend_port_dict(db_data, db_data, dns_data_db, plugin_context) def _process_only_dns_name_update(self, plugin_context, db_data, dns_name): dns_data_db = obj_reg.load_class('PortDNS').get_object( plugin_context, port_id=db_data['id']) if dns_data_db: dns_data_db['dns_name'] = dns_name dns_data_db.update() return dns_data_db if dns_name: dns_data_db = obj_reg.new_instance( 'PortDNS', plugin_context, port_id=db_data['id'], current_dns_name='', current_dns_domain='', previous_dns_name='', previous_dns_domain='', dns_name=dns_name) dns_data_db.create() return dns_data_db def external_dns_not_needed(self, context, network): """Decide if ports in network need to be sent to the DNS service. :param context: plugin request context :param network: network dictionary :return: True or False """ pass def extend_network_dict(self, session, db_data, response_data): response_data[dns.DNSDOMAIN] = '' if db_data.dns_domain: response_data[dns.DNSDOMAIN] = db_data.dns_domain[dns.DNSDOMAIN] return response_data def _get_dns_domain(self, network_id, context=None): if not cfg.CONF.dns_domain: return '' return _dotted_domain(cfg.CONF.dns_domain) def _get_request_dns_name(self, port, network_id, context): dns_domain = self._get_dns_domain(network_id, context) if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)): return (port.get(dns.DNSNAME, ''), False) return ('', True) def _get_request_dns_name_and_domain_name(self, dns_data_db, network_id, context): dns_domain = self._get_dns_domain(network_id, context) dns_name = '' if ((dns_domain and dns_domain != DNS_DOMAIN_DEFAULT)): if dns_data_db: dns_name = dns_data_db.dns_name return dns_name, dns_domain def _get_dns_names_for_port(self, ips, dns_data_db, network_id, context): dns_assignment = [] dns_name, dns_domain = self._get_request_dns_name_and_domain_name( dns_data_db, network_id, context) for ip in ips: if dns_name: hostname = dns_name fqdn = dns_name if not dns_name.endswith('.'): fqdn = '%s.%s' % (dns_name, dns_domain) else: hostname = 'host-%s' % ip['ip_address'].replace( '.', '-').replace(':', '-') fqdn = hostname if dns_domain: fqdn = '%s.%s' % (hostname, dns_domain) dns_assignment.append({'ip_address': ip['ip_address'], 'hostname': hostname, 'fqdn': fqdn}) return dns_assignment def _get_dns_name_for_port_get(self, port, dns_data_db, context): if port['fixed_ips']: return self._get_dns_names_for_port( port['fixed_ips'], dns_data_db, port['network_id'], context) return [] def _extend_port_dict(self, db_data, response_data, dns_data_db, context=None): if not dns_data_db: response_data[dns.DNSNAME] = '' else: response_data[dns.DNSNAME] = dns_data_db[dns.DNSNAME] response_data['dns_assignment'] = self._get_dns_name_for_port_get( db_data, dns_data_db, context) return response_data def extend_port_dict(self, session, db_data, response_data): dns_data_db = db_data.dns return self._extend_port_dict(db_data, response_data, dns_data_db) def _get_network(self, context, network_id): plugin = directory.get_plugin() return plugin.get_network(context, network_id) class DNSExtensionDriverNSXv(DNSExtensionDriver): def initialize(self): LOG.info("DNSExtensionDriverNSXv initialization complete") def external_dns_not_needed(self, context, network): dns_driver = _get_dns_driver() if not dns_driver: return True provider_type = network.get('provider:network_type') if not provider_type: return True if network['router:external']: return True return False class DNSExtensionDriverNSXv3(DNSExtensionDriver): def initialize(self): self._availability_zones = nsx_az.NsxV3AvailabilityZones() LOG.info("DNSExtensionDriverNSXv3 initialization complete") self.config_dns_domain = cfg.CONF.nsx_v3.dns_domain def _get_db_net_dns(self, session, network_id): db_entry = session.query(dns_model.NetworkDNSDomain).filter_by( network_id=network_id).first() if db_entry: return db_entry.dns_domain def _get_db_net_az_hints(self, session, network_id): # TODO(asarfaty): Consider caching networks azs in get_ports # and use it here db_entry = session.query(models_v2.Network).filter_by( id=network_id).first() if db_entry: return db_entry.availability_zone_hints def _get_network_and_az_dns_domain(self, network_id, context): if not context: context = n_context.get_admin_context() # Getting only the relevant network attributes directly from the DB net_domain = self._get_db_net_dns(context.session, network_id) # Getting the az of the network is relevant only if any of the azs # have dns_domain and if there is no net_domain az_domain = None if not net_domain and self._availability_zones.non_default_dns_domain: az = None net_hints = self._get_db_net_az_hints(context.session, network_id) if net_hints: hints = az_validator.convert_az_string_to_list(net_hints) if hints: az_name = hints[0] az = self._availability_zones.get_availability_zone( az_name) if not az: # Get the default availability zone az = self._availability_zones.get_default_availability_zone() az_domain = az.dns_domain return net_domain, az_domain def _get_dns_domain(self, network_id, context=None): net_domain, az_domain = self._get_network_and_az_dns_domain( network_id, context) # first try to use the dns_domain configured on the network if net_domain: return _dotted_domain(net_domain) # try to use the dns-domain from the specific availability zone # of this network if (az_domain and _dotted_domain(az_domain) != _dotted_domain(DNS_DOMAIN_DEFAULT)): dns_domain = az_domain # Global nsx_v3 dns domain elif (self.config_dns_domain and (_dotted_domain(self.config_dns_domain) != _dotted_domain(DNS_DOMAIN_DEFAULT))): dns_domain = self.config_dns_domain # Global neutron dns domain elif cfg.CONF.dns_domain: dns_domain = cfg.CONF.dns_domain else: return '' return _dotted_domain(dns_domain) def external_dns_not_needed(self, context, network): dns_driver = _get_dns_driver() if not dns_driver: return True provider_type = network.get('provider:network_type') if not provider_type: return True if network['router:external']: return True return False class DNSExtensionDriverNSXp(DNSExtensionDriverNSXv3): def initialize(self): self._availability_zones = nsxp_az.NsxPAvailabilityZones() LOG.info("DNSExtensionDriverNSXp initialization complete") self.config_dns_domain = cfg.CONF.nsx_p.dns_domain class DNSExtensionDriverDVS(DNSExtensionDriver): def initialize(self): LOG.info("DNSExtensionDriverDVS initialization complete") def external_dns_not_needed(self, context, network): dns_driver = _get_dns_driver() if not dns_driver: return True if network['router:external']: return True return False DNS_DRIVER = None def _get_dns_driver(): global DNS_DRIVER if DNS_DRIVER: return DNS_DRIVER if not cfg.CONF.external_dns_driver: return try: DNS_DRIVER = driver.ExternalDNSService.get_instance() LOG.debug("External DNS driver loaded: %s", cfg.CONF.external_dns_driver) return DNS_DRIVER except ImportError: LOG.exception("ImportError exception occurred while loading " "the external DNS service driver") raise dns_exc.ExternalDNSDriverNotFound( driver=cfg.CONF.external_dns_driver) def _send_data_to_external_dns_service(context, dns_driver, dns_domain, dns_name, records): try: dns_driver.create_record_set(context, dns_domain, dns_name, records) except (dns_exc.DNSDomainNotFound, dns_exc.DuplicateRecordSet) as e: LOG.exception("Error publishing port data in external DNS " "service. Name: '%(name)s'. Domain: '%(domain)s'. " "DNS service driver message '%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg}) def _remove_data_from_external_dns_service(context, dns_driver, dns_domain, dns_name, records): try: dns_driver.delete_record_set(context, dns_domain, dns_name, records) except (dns_exc.DNSDomainNotFound, dns_exc.DuplicateRecordSet) as e: LOG.exception("Error deleting port data from external DNS " "service. Name: '%(name)s'. Domain: '%(domain)s'. " "IP addresses '%(ips)s'. DNS service driver message " "'%(message)s'", {"name": dns_name, "domain": dns_domain, "message": e.msg, "ips": ', '.join(records)}) def _create_port_in_external_dns_service(resource, event, trigger, **kwargs): dns_driver = _get_dns_driver() if not dns_driver: return context = kwargs['context'] port = kwargs['port'] dns_data_db = obj_reg.load_class('PortDNS').get_object( context, port_id=port['id']) if not (dns_data_db and dns_data_db['current_dns_name']): return records = [ip['ip_address'] for ip in port['fixed_ips']] _send_data_to_external_dns_service(context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], records) def _update_port_in_external_dns_service(resource, event, trigger, **kwargs): dns_driver = _get_dns_driver() if not dns_driver: return context = kwargs['context'] updated_port = kwargs['port'] original_port = kwargs.get('original_port') if not original_port: return original_ips = [ip['ip_address'] for ip in original_port['fixed_ips']] updated_ips = [ip['ip_address'] for ip in updated_port['fixed_ips']] is_dns_name_changed = (updated_port[dns.DNSNAME] != original_port[dns.DNSNAME]) is_dns_domain_changed = (dns.DNSDOMAIN in updated_port and updated_port[dns.DNSDOMAIN] != original_port[dns.DNSDOMAIN]) ips_changed = set(original_ips) != set(updated_ips) if not any((is_dns_name_changed, is_dns_domain_changed, ips_changed)): return dns_data_db = obj_reg.load_class('PortDNS').get_object( context, port_id=updated_port['id']) if not (dns_data_db and (dns_data_db['previous_dns_name'] or dns_data_db['current_dns_name'])): return if dns_data_db['previous_dns_name']: _remove_data_from_external_dns_service( context, dns_driver, dns_data_db['previous_dns_domain'], dns_data_db['previous_dns_name'], original_ips) if dns_data_db['current_dns_name']: _send_data_to_external_dns_service(context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], updated_ips) def _delete_port_in_external_dns_service(resource, event, trigger, payload=None): dns_driver = _get_dns_driver() if not dns_driver: return context = payload.context port_id = payload.resource_id dns_data_db = obj_reg.load_class('PortDNS').get_object( context, port_id=port_id) if not dns_data_db: return if dns_data_db['current_dns_name']: ip_allocations = obj_reg.load_class('IPAllocation').get_objects( context, port_id=port_id) records = [str(alloc['ip_address']) for alloc in ip_allocations] _remove_data_from_external_dns_service( context, dns_driver, dns_data_db['current_dns_domain'], dns_data_db['current_dns_name'], records) registry.subscribe( _create_port_in_external_dns_service, resources.PORT, events.AFTER_CREATE) registry.subscribe( _update_port_in_external_dns_service, resources.PORT, events.AFTER_UPDATE) registry.subscribe( _delete_port_in_external_dns_service, resources.PORT, events.BEFORE_DELETE) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1982536 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/0000755000175000017500000000000000000000000021567 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/__init__.py0000644000175000017500000000113700000000000023702 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db.models import securitygroup # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/advancedserviceproviders.py0000644000175000017500000000301300000000000027222 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions # Attribute Map ADV_SERVICE_PROVIDERS = 'advanced_service_providers' ALIAS = 'advanced-service-providers' EXTENDED_ATTRIBUTES_2_0 = { 'subnets': { ADV_SERVICE_PROVIDERS: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': None}}} class Advancedserviceproviders(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Advanced Service Providers" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Id of the advanced service providers attached to the subnet" @classmethod def get_updated(cls): return "2014-12-11T12:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/api_replay.py0000644000175000017500000000546100000000000024274 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib.api import extensions from neutron_lib.db import constants as db_const ALIAS = 'api-replay' # The attributes map is here for 2 reasons: # 1) allow posting id for the different objects we are importing # 2) make sure security-group named 'default' is also copied ID_WITH_POST = {'allow_post': True, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True} RESOURCE_ATTRIBUTE_MAP = { 'ports': { 'id': ID_WITH_POST, }, 'networks': { 'id': ID_WITH_POST, }, 'subnets': { 'id': ID_WITH_POST, }, 'security_groups': { 'id': ID_WITH_POST, 'name': {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': {'type:string': db_const.NAME_FIELD_SIZE}}, }, 'security_group_rules': { 'id': ID_WITH_POST, }, 'routers': { 'id': ID_WITH_POST, }, 'policies': { # QoS policies 'id': ID_WITH_POST, }, 'firewall_rules': { # FWaaS V2 rules 'id': ID_WITH_POST, }, 'firewall_policies': { # FWaaS V2 policies 'id': ID_WITH_POST, }, 'firewall_groups': { # FWaaS V2 groups 'id': ID_WITH_POST, }, } class Api_replay(extensions.ExtensionDescriptor): """Extension for api replay which allows us to specify ids of resources.""" @classmethod def get_name(cls): return "Api Replay" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Enables mode to allow api to be replayed" @classmethod def get_updated(cls): return "2016-05-05T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} def get_required_extensions(self): # make sure this extension is called after those, so our change # will not be overridden return ["security-group", "router"] def get_optional_extensions(self): # QoS is optional since it is not always enabled return ["qos"] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/dhcp_mtu.py0000644000175000017500000000314600000000000023750 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import constants DHCP_MTU = 'dhcp_mtu' ALIAS = 'dhcp-mtu' EXTENDED_ATTRIBUTES_2_0 = { 'subnets': { DHCP_MTU: { 'allow_post': True, 'allow_put': True, 'default': constants.ATTR_NOT_SPECIFIED, # This is the legal range for the backend MTU 'validate': {'type:range': (68, 65535)}, 'is_visible': True}, } } class Dhcp_mtu(extensions.ExtensionDescriptor): """Extension class supporting DHCP MTU for subnets.""" @classmethod def get_name(cls): return "DHCP MTU" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Enable the ability to add DHCP MTU for Subnets" @classmethod def get_updated(cls): return "2016-7-21T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/dns_search_domain.py0000644000175000017500000000650200000000000025604 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib import constants from neutron_lib.db import constants as db_const from vmware_nsx._i18n import _ DNS_LABEL_MAX_LEN = 63 DNS_LABEL_REGEX = "[a-zA-Z0-9-]{1,%d}$" % DNS_LABEL_MAX_LEN def _validate_dns_format(data): if not data: return try: # Allow values ending in period '.' trimmed = data if not data.endswith('.') else data[:-1] names = trimmed.split('.') for name in names: if not name: raise TypeError(_("Encountered an empty component")) if name.endswith('-') or name[0] == '-': raise TypeError( _("Name '%s' must not start or end with a hyphen") % name) if not re.match(DNS_LABEL_REGEX, name): raise TypeError( _("Name '%s' must be 1-63 characters long, each of " "which can only be alphanumeric or a hyphen") % name) # RFC 1123 hints that a TLD can't be all numeric. last is a TLD if # it's an FQDN. if len(names) > 1 and re.match("^[0-9]+$", names[-1]): raise TypeError(_("TLD '%s' must not be all numeric") % names[-1]) except TypeError as e: msg = _("'%(data)s' not a valid DNS search domain. Reason: " "%(reason)s") % {'data': data, 'reason': str(e)} return msg def _validate_dns_search_domain(data, max_len=db_const.NAME_FIELD_SIZE): msg = validators.validate_string(data, max_len) if msg: return msg if not data: return msg = _validate_dns_format(data) if msg: return msg validators.add_validator('dns_search_domain', _validate_dns_search_domain) ALIAS = 'dns-search-domain' DNS_SEARCH_DOMAIN = 'dns_search_domain' EXTENDED_ATTRIBUTES_2_0 = { 'subnets': { DNS_SEARCH_DOMAIN: { 'allow_post': True, 'allow_put': True, 'default': constants.ATTR_NOT_SPECIFIED, 'validate': {'type:dns_search_domain': db_const.NAME_FIELD_SIZE}, 'is_visible': True}, } } class Dns_search_domain(extensions.ExtensionDescriptor): """Extension class supporting dns search domains for subnets.""" @classmethod def get_name(cls): return "DNS search Domains" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Enable the ability to add DNS search domain name for Subnets" @classmethod def get_updated(cls): return "2016-1-22T10:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/edge_service_gateway_bgp_peer.py0000644000175000017500000000645100000000000030157 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ EDGE_SERVICE_GW = 'esg_id' EDGE_ID_MAX_LEN = 15 ALIAS = 'edge-service-gateway-bgp-peer' def _validate_edge_service_gw_id(esg_id, valid_values=None): if esg_id is None: return msg = validators.validate_string(esg_id, max_len=EDGE_ID_MAX_LEN) if msg: return msg if re.match(r'^edge-[1-9]+[0-9]*$', esg_id) is None: msg = _("'%s' is not a valid edge service gateway id.") % esg_id return msg validators.add_validator('validate_edge_service_gw_id', _validate_edge_service_gw_id) RESOURCE_ATTRIBUTE_MAP = { 'bgp-peers': { EDGE_SERVICE_GW: { 'allow_post': True, 'allow_put': False, 'default': None, 'validate': {'type:validate_edge_service_gw_id': None}, 'enforce_policy': True, 'is_visible': True, 'required_by_policy': False } } } class BgpDisabledOnEsgPeer(nexception.InvalidInput): message = _("To add this peer to BGP speaker you must first enable BGP on " "the associated ESG - '%(esg_id)s'.") class EsgRemoteASDoNotMatch(nexception.InvalidInput): message = _("Specified remote AS is '%(remote_as)s', but ESG '%(esg_id)s' " "is configured on AS %(esg_as)s.") class ExternalSubnetHasGW(nexception.InvalidInput): message = _("Subnet '%(subnet_id)s' on external network '%(network_id)s' " "is configured with gateway IP, set to None before enabling " "BGP on the network.") class EsgInternalIfaceDoesNotMatch(nexception.InvalidInput): message = _("Given BGP peer IP address doesn't match " "any interface on ESG '%(esg_id)s'") class Edge_service_gateway_bgp_peer(extensions.ExtensionDescriptor): """Extension class to allow identifying of-peer with specificN SXv edge service gateway. """ @classmethod def get_name(cls): return "Edge service gateway bgp peer" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return ("Adding a new (optional) attribute 'esg_id' to bgp-peer " "resource, where esg_id is a valid NSXv Edge service gateway " "id.") @classmethod def get_updated(cls): return "2017-04-01T10:00:00-00:00" def get_required_extensions(self): return ["bgp"] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/housekeeper.py0000644000175000017500000000713700000000000024470 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron.api.v2 import resource_helper from neutron_lib.api import extensions from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ HOUSEKEEPER_RESOURCE_NAME = "housekeeper" HOUSEKEEPERS = "housekeepers" ALIAS = 'housekeeper' # The housekeeper tasks table is read only RESOURCE_ATTRIBUTE_MAP = { HOUSEKEEPERS: { 'name': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, 'description': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, 'enabled': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, 'error_count': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, 'fixed_count': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, 'error_info': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, } } class Housekeeper(extensions.ExtensionDescriptor): """API extension for NSX housekeeper jobs.""" @classmethod def get_name(cls): return "Housekeeper" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "NSX plugin housekeeping services." @classmethod def get_updated(cls): return "2016-11-20T00:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) member_actions = {} return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, None, action_map=member_actions, register_quota=True, translate_name=True) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class HousekeeperReadOnly(nexception.NotAuthorized): message = _("NSX housekeeper tasks are read-only.") class HousekeeperPluginBase(object): @abc.abstractmethod def create_housekeeper(self, context, housekeeper): raise HousekeeperReadOnly() @abc.abstractmethod def update_housekeeper(self, context, name, housekeeper): pass @abc.abstractmethod def get_housekeeper(self, context, name, fields=None): pass @abc.abstractmethod def delete_housekeeper(self, context, name): raise HousekeeperReadOnly() @abc.abstractmethod def get_housekeepers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass @abc.abstractmethod def get_housekeeper_count(self, context, filters=None): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/lsn.py0000644000175000017500000000503300000000000022736 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron.api import extensions from neutron.api.v2 import base from neutron_lib.api import extensions as api_extensions from neutron_lib.plugins import directory ALIAS = 'lsn' COLLECTION_NAME = "%ss" % ALIAS RESOURCE_ATTRIBUTE_MAP = { COLLECTION_NAME: { 'network': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'is_visible': True}, 'report': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible': True}, }, } class Lsn(api_extensions.ExtensionDescriptor): """Enable LSN configuration for Neutron NSX networks.""" @classmethod def get_name(cls): return "Logical Service Node configuration" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Enables configuration of NSX Logical Services Node." @classmethod def get_updated(cls): return "2013-10-05T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] plugin = directory.get_plugin() resource_name = ALIAS collection_name = resource_name.replace('_', '-') + "s" params = RESOURCE_ATTRIBUTE_MAP.get(COLLECTION_NAME, dict()) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=False) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/maclearning.py0000644000175000017500000000336200000000000024425 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import converters from neutron_lib.api import extensions from neutron_lib import constants ALIAS = 'mac-learning' MAC_LEARNING = 'mac_learning_enabled' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { MAC_LEARNING: {'allow_post': True, 'allow_put': True, 'convert_to': converters.convert_to_boolean, 'default': constants.ATTR_NOT_SPECIFIED, 'is_visible': True}, } } class Maclearning(extensions.ExtensionDescriptor): """Extension class supporting port mac learning.""" @classmethod def get_name(cls): return "MAC Learning" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Provides MAC learning capabilities." @classmethod def get_updated(cls): return "2013-05-1T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/nsxpolicy.py0000644000175000017500000000634500000000000024201 0ustar00coreycorey00000000000000# Copyright 2016 VMware. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from neutron.api.v2 import resource_helper from neutron_lib.api import extensions from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ POLICY_RESOURCE_NAME = "nsx_policy" # Use dash for alias and collection name ALIAS = POLICY_RESOURCE_NAME.replace('_', '-') NSX_POLICIES = "nsx_policies" # The nsx-policies table is read only RESOURCE_ATTRIBUTE_MAP = { NSX_POLICIES: { 'id': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, 'name': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, 'description': { 'allow_post': False, 'allow_put': False, 'is_visible': True}, } } class Nsxpolicy(extensions.ExtensionDescriptor): """API extension for NSX policies.""" @classmethod def get_name(cls): return "NSX Policy" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "NSX security policies." @classmethod def get_updated(cls): return "2016-11-20T00:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) member_actions = {} return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, None, action_map=member_actions, register_quota=True, translate_name=True) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class NsxPolicyReadOnly(nexception.NotAuthorized): message = _("NSX policies are read-only.") class NsxPolicyPluginBase(object): @abc.abstractmethod def create_nsx_policy(self, context, nsx_policy): raise NsxPolicyReadOnly() @abc.abstractmethod def update_nsx_policy(self, context, id, nsx_policy): raise NsxPolicyReadOnly() @abc.abstractmethod def get_nsx_policy(self, context, id, fields=None): pass @abc.abstractmethod def delete_nsx_policy(self, context, id): raise NsxPolicyReadOnly() @abc.abstractmethod def get_nsx_policies(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/projectpluginmap.py0000644000175000017500000001122200000000000025522 0ustar00coreycorey00000000000000# Copyright 2017 VMware. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import abc from neutron.api.v2 import resource_helper from neutron_lib.api import extensions from neutron_lib.db import constants as db_const from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ PROJECT_PLUGIN_RESOURCE_NAME = "project_plugin_map" # Use dash for alias and collection name ALIAS = PROJECT_PLUGIN_RESOURCE_NAME.replace('_', '-') PROJECT_PLUGINS = "project_plugin_maps" class NsxPlugins(object): NSX_V = 'nsx-v' NSX_T = 'nsx-t' DVS = 'dvs' NSX_P = 'nsx-p' # Note(asarfaty) this option is missing from the DB enum VALID_TYPES = [NsxPlugins.NSX_V, NsxPlugins.NSX_T, NsxPlugins.DVS] RESOURCE_ATTRIBUTE_MAP = { PROJECT_PLUGINS: { 'id': { 'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True}, # project is the id of the project mapped by this entry 'project': { 'allow_post': True, 'allow_put': False, 'is_visible': True}, 'plugin': { 'allow_post': True, 'allow_put': False, 'is_visible': True, 'validate': {'type:values': VALID_TYPES}}, # tenant id is the id of tenant/project owning this entry 'tenant_id': {'allow_post': True, 'allow_put': False, 'validate': { 'type:string': db_const.PROJECT_ID_FIELD_SIZE}, 'required_by_policy': True, 'is_visible': True}, } } class Projectpluginmap(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "Project Plugin Mapping" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Per Project Core Plugin." @classmethod def get_updated(cls): return "2017-12-05T00:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) member_actions = {} return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, None, action_map=member_actions, register_quota=True, translate_name=True) def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} class ProjectPluginReadOnly(nexception.NotAuthorized): message = _("Project Plugin map entries cannot be modified.") class ProjectPluginAlreadyExists(nexception.Conflict): message = _("Project Plugin map already exists for project " "%(project_id)s.") class ProjectPluginAdminOnly(nexception.NotAuthorized): message = _("Project Plugin map can be added only by an admin user.") class ProjectPluginIllegalId(nexception.Conflict): message = _("Project ID %(project_id)s is illegal.") class ProjectPluginNotAvailable(nexception.NotAuthorized): message = _("Plugin %(plugin)s is not available.") class ProjectPluginMapPluginBase(object): @abc.abstractmethod def create_project_plugin_map(self, context, project_plugin_map): pass @abc.abstractmethod def update_project_plugin_map(self, context, id, project_plugin_map): raise ProjectPluginReadOnly() @abc.abstractmethod def get_project_plugin_map(self, context, id, fields=None): pass @abc.abstractmethod def delete_project_plugin_map(self, context, id): # TODO(asarfaty): delete when the project is deleted? raise ProjectPluginReadOnly() @abc.abstractmethod def get_project_plugin_maps(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/providersecuritygroup.py0000644000175000017500000000574100000000000026647 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import converters from neutron_lib.api import extensions from neutron_lib import constants from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ ALIAS = 'provider-security-group' PROVIDER = 'provider' PROVIDER_SECURITYGROUPS = 'provider_security_groups' EXTENDED_ATTRIBUTES_2_0 = { 'security_groups': { PROVIDER: { 'allow_post': True, 'allow_put': False, 'convert_to': converters.convert_to_boolean, 'default': False, 'enforce_policy': True, 'is_visible': True} }, 'ports': {PROVIDER_SECURITYGROUPS: { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'convert_to': converters.convert_none_to_empty_list, 'validate': {'type:uuid_list': None}, 'default': constants.ATTR_NOT_SPECIFIED} } } NUM_PROVIDER_SGS_ON_PORT = 1 class SecurityGroupNotProvider(nexception.InvalidInput): message = _("Security group %(id)s is not a provider security group.") class SecurityGroupIsProvider(nexception.InvalidInput): message = _("Security group %(id)s is a provider security group and " "cannot be specified via the security group field.") class DefaultSecurityGroupIsNotProvider(nexception.InvalidInput): message = _("Can't create default security-group as a provider " "security-group.") class ProviderSecurityGroupEditNotAdmin(nexception.NotAuthorized): message = _("Security group %(id)s is a provider security group and " "requires an admin to modify it.") class Providersecuritygroup(extensions.ExtensionDescriptor): """Provider security-group extension.""" @classmethod def get_name(cls): return "Provider security group" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Admin controlled security groups with blocking rules." @classmethod def get_updated(cls): return "2016-07-13T10:00:00-00:00" def get_required_extensions(self): return ["security-group"] @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/routersize.py0000644000175000017500000000331300000000000024354 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import constants ALIAS = 'nsxv-router-size' ROUTER_SIZE = 'router_size' VALID_EDGE_SIZES = ['compact', 'large', 'xlarge', 'quadlarge'] EXTENDED_ATTRIBUTES_2_0 = { 'routers': { ROUTER_SIZE: {'allow_post': True, 'allow_put': True, 'validate': {'type:values': VALID_EDGE_SIZES}, 'default': constants.ATTR_NOT_SPECIFIED, 'is_visible': True}, } } class Routersize(extensions.ExtensionDescriptor): """Extension class supporting router size.""" @classmethod def get_name(cls): return "Router Size" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Enables configuration of NSXv Edge Size" @classmethod def get_updated(cls): return "2015-9-22T10:00:00-00:00" def get_required_extensions(self): return ["router"] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/routertype.py0000644000175000017500000000344700000000000024373 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import constants ALIAS = 'nsxv-router-type' ROUTER_TYPE = 'router_type' VALID_TYPES = ['shared', 'exclusive'] EXTENDED_ATTRIBUTES_2_0 = { 'routers': { ROUTER_TYPE: {'allow_post': True, 'allow_put': True, 'validate': {'type:values': VALID_TYPES}, 'default': constants.ATTR_NOT_SPECIFIED, 'is_visible': True}, } } class Routertype(extensions.ExtensionDescriptor): """Extension class supporting router type.""" @classmethod def get_name(cls): return "Router Type" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Enables configuration of NSXv router type." @classmethod def get_updated(cls): return "2015-1-12T10:00:00-00:00" def get_required_extensions(self): return ["router"] @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/secgroup_rule_local_ip_prefix.py0000644000175000017500000000367700000000000030253 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.extensions import securitygroup from neutron_lib.api import extensions from neutron_lib import constants ALIAS = 'secgroup-rule-local-ip-prefix' LOCAL_IP_PREFIX = 'local_ip_prefix' RESOURCE_ATTRIBUTE_MAP = { 'security_group_rules': { LOCAL_IP_PREFIX: { 'allow_post': True, 'allow_put': False, 'convert_to': securitygroup.convert_ip_prefix_to_cidr, 'default': constants.ATTR_NOT_SPECIFIED, 'enforce_policy': True, 'is_visible': True} } } class Secgroup_rule_local_ip_prefix(extensions.ExtensionDescriptor): """Extension class to add support for specifying local-ip-prefix in a security-group rule. """ @classmethod def get_name(cls): return "Security Group rule local ip prefix" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return ("Enable to specify the 'local-ip-prefix' when creating a " "security-group rule.") @classmethod def get_updated(cls): return "2016-03-01T10:00:00-00:00" def get_required_extensions(self): return ["security-group"] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/securitygrouplogging.py0000644000175000017500000000365400000000000026444 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import converters from neutron_lib.api import extensions ALIAS = 'security-group-logging' LOGGING = 'logging' RESOURCE_ATTRIBUTE_MAP = { 'security_groups': { LOGGING: { 'allow_post': True, 'allow_put': True, 'convert_to': converters.convert_to_boolean, 'default': False, 'enforce_policy': True, 'is_visible': True} } } class Securitygrouplogging(extensions.ExtensionDescriptor): """Security group logging extension.""" @classmethod def get_name(cls): return "Security group logging" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Security group logging extension." @classmethod def get_namespace(cls): # todo return "https://docs.openstack.org/ext/security_group_logging/api/v2.0" @classmethod def get_updated(cls): return "2015-04-13T10:00:00-00:00" def get_required_extensions(self): return ["security-group"] @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/securitygrouppolicy.py0000644000175000017500000000371000000000000026306 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import extensions from neutron_lib import exceptions as nexception from vmware_nsx._i18n import _ ALIAS = 'security-group-policy' POLICY = 'policy' RESOURCE_ATTRIBUTE_MAP = { 'security_groups': { POLICY: { 'allow_post': True, 'allow_put': True, 'enforce_policy': True, 'is_visible': True, 'default': None} } } class PolicySecurityGroupDeleteNotAdmin(nexception.NotAuthorized): message = _("Security group %(id)s is a policy security group and " "requires an admin to delete it.") class Securitygrouppolicy(extensions.ExtensionDescriptor): """Security group policy extension.""" @classmethod def get_name(cls): return "Security group policy" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return "Security group policy extension." @classmethod def get_updated(cls): return "2016-10-06T10:00:00-00:00" def get_required_extensions(self): return ["security-group"] @classmethod def get_resources(cls): """Returns Ext Resources.""" return [] def get_extended_resources(self, version): if version == "2.0": return RESOURCE_ATTRIBUTE_MAP else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/extensions/vnicindex.py0000644000175000017500000000301600000000000024130 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api import converters from neutron_lib.api import extensions # Attribute Map VNIC_INDEX = 'vnic_index' ALIAS = 'vnic-index' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { VNIC_INDEX: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': None, 'convert_to': converters.convert_to_int_if_not_none}}} class Vnicindex(extensions.ExtensionDescriptor): @classmethod def get_name(cls): return "VNIC Index" @classmethod def get_alias(cls): return ALIAS @classmethod def get_description(cls): return ("Enable a port to be associated with a VNIC index") @classmethod def get_updated(cls): return "2014-09-15T12:00:00-00:00" def get_extended_resources(self, version): if version == "2.0": return EXTENDED_ATTRIBUTES_2_0 else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/nsx_cluster.py0000644000175000017500000001013200000000000022310 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging import six from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions LOG = logging.getLogger(__name__) DEFAULT_PORT = 443 # Raise if one of those attributes is not specified REQUIRED_ATTRIBUTES = ['default_tz_uuid', 'nsx_user', 'nsx_password', 'nsx_controllers'] # Emit an INFO log if one of those attributes is not specified IMPORTANT_ATTRIBUTES = ['default_l3_gw_service_uuid'] # Deprecated attributes DEPRECATED_ATTRIBUTES = ['metadata_dhcp_host_route', 'nvp_user', 'nvp_password', 'nvp_controllers'] class NSXCluster(object): """NSX cluster class. Encapsulates controller connections and the API client for a NSX cluster. Controller-specific parameters, such as timeouts are stored in the elements of the controllers attribute, which are dicts. """ def __init__(self, **kwargs): self._required_attributes = REQUIRED_ATTRIBUTES[:] self._important_attributes = IMPORTANT_ATTRIBUTES[:] self._deprecated_attributes = {} self._sanity_check(kwargs) for opt, val in six.iteritems(self._deprecated_attributes): LOG.deprecated(_("Attribute '%s' has been deprecated or moved " "to a new section. See new configuration file " "for details."), opt) depr_func = getattr(self, '_process_%s' % opt, None) if depr_func: depr_func(val) # If everything went according to plan these two lists should be empty if self._required_attributes: raise exceptions.InvalidClusterConfiguration( invalid_attrs=self._required_attributes) if self._important_attributes: LOG.info("The following cluster attributes were " "not specified: %s'", self._important_attributes) # The API client will be explicitly created by users of this class self.api_client = None def _sanity_check(self, options): # Iterating this way ensures the conf parameters also # define the structure of this class for arg in cfg.CONF: if arg not in DEPRECATED_ATTRIBUTES: setattr(self, arg, options.get(arg, cfg.CONF.get(arg))) self._process_attribute(arg) elif options.get(arg) is not None: # Process deprecated attributes only if specified self._deprecated_attributes[arg] = options.get(arg) def _process_attribute(self, attribute): # Process the attribute only if it's not empty! if getattr(self, attribute, None): if attribute in self._required_attributes: self._required_attributes.remove(attribute) if attribute in self._important_attributes: self._important_attributes.remove(attribute) handler_func = getattr(self, '_process_%s' % attribute, None) if handler_func: handler_func() def _process_nsx_controllers(self): # If this raises something is not right, so let it bubble up # TODO(salvatore-orlando): Also validate attribute here for i, ctrl in enumerate(self.nsx_controllers or []): if len(ctrl.split(':')) == 1: self.nsx_controllers[i] = '%s:%s' % (ctrl, DEFAULT_PORT) def _process_nvp_controllers(self): self.nsx_controllers = self.nvp_controllers self._process_nsx_controllers() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1982536 vmware-nsx-15.0.1.dev143/vmware_nsx/nsxlib/0000755000175000017500000000000000000000000020667 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/nsxlib/__init__.py0000644000175000017500000000000000000000000022766 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1982536 vmware-nsx-15.0.1.dev143/vmware_nsx/nsxlib/mh/0000755000175000017500000000000000000000000021273 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/nsxlib/mh/__init__.py0000644000175000017500000001207200000000000023406 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron import version from neutron_lib import exceptions as exception from oslo_log import log from oslo_serialization import jsonutils import six from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc HTTP_GET = "GET" HTTP_POST = "POST" HTTP_DELETE = "DELETE" HTTP_PUT = "PUT" # Prefix to be used for all NSX API calls URI_PREFIX = "/ws.v1" NEUTRON_VERSION = version.version_info.release_string() LOG = log.getLogger(__name__) def _build_uri_path(resource, resource_id=None, parent_resource_id=None, fields=None, relations=None, filters=None, types=None, is_attachment=False, extra_action=None): resources = resource.split('/') res_path = resources[0] if resource_id: res_path += "/%s" % resource_id if len(resources) > 1: # There is also a parent resource to account for in the uri res_path = "%s/%s/%s" % (resources[1], parent_resource_id, res_path) if is_attachment: res_path = "%s/attachment" % res_path elif extra_action: res_path = "%s/%s" % (res_path, extra_action) params = [] params.append(fields and "fields=%s" % fields) params.append(relations and "relations=%s" % relations) params.append(types and "types=%s" % types) if filters: sorted_filters = [ '%s=%s' % (k, filters[k]) for k in sorted(filters.keys()) ] params.extend(sorted_filters) uri_path = "%s/%s" % (URI_PREFIX, res_path) non_empty_params = [x for x in params if x is not None] if non_empty_params: query_string = '&'.join(non_empty_params) if query_string: uri_path += "?%s" % query_string return uri_path def format_exception(etype, e, exception_locals): """Consistent formatting for exceptions. :param etype: a string describing the exception type. :param e: the exception. :param exception_locals: calling context local variable dict. :returns: a formatted string. """ msg = [_("Error. %(type)s exception: %(exc)s.") % {'type': etype, 'exc': e}] lcls = dict((k, v) for k, v in six.iteritems(exception_locals) if k != 'request') msg.append(_("locals=[%s]") % str(lcls)) return ' '.join(msg) def do_request(*args, **kwargs): """Issue a request to the cluster specified in kwargs. :param args: a list of positional arguments. :param kwargs: a list of keyworkds arguments. :returns: the result of the operation loaded into a python object or None. """ cluster = kwargs["cluster"] try: res = cluster.api_client.request(*args) if res: return jsonutils.loads(res) except api_exc.ResourceNotFound: raise exception.NotFound() except api_exc.ReadOnlyMode: raise nsx_exc.MaintenanceInProgress() def get_single_query_page(path, cluster, page_cursor=None, page_length=1000, neutron_only=True): params = [] if page_cursor: params.append("_page_cursor=%s" % page_cursor) params.append("_page_length=%s" % page_length) # NOTE(salv-orlando): On the NSX backend the 'Quantum' tag is still # used for marking Neutron entities in order to preserve compatibility if neutron_only: params.append("tag_scope=quantum") query_params = "&".join(params) path = "%s%s%s" % (path, "&" if (path.find("?") != -1) else "?", query_params) body = do_request(HTTP_GET, path, cluster=cluster) # Result_count won't be returned if _page_cursor is supplied return body['results'], body.get('page_cursor'), body.get('result_count') def get_all_query_pages(path, cluster): need_more_results = True result_list = [] page_cursor = None while need_more_results: results, page_cursor = get_single_query_page( path, cluster, page_cursor)[:2] if not page_cursor: need_more_results = False result_list.extend(results) return result_list def mk_body(**kwargs): """Convenience function creates and dumps dictionary to string. :param kwargs: the key/value pirs to be dumped into a json string. :returns: a json string. """ return jsonutils.dumps(kwargs, ensure_ascii=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/nsxlib/mh/lsn.py0000644000175000017500000002506400000000000022450 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as exception from oslo_log import log from oslo_serialization import jsonutils import six from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.nsxlib import mh as nsxlib HTTP_GET = "GET" HTTP_POST = "POST" HTTP_DELETE = "DELETE" HTTP_PUT = "PUT" SERVICECLUSTER_RESOURCE = "edge-cluster" LSERVICESNODE_RESOURCE = "lservices-node" LSERVICESNODEPORT_RESOURCE = "lport/%s" % LSERVICESNODE_RESOURCE SUPPORTED_METADATA_OPTIONS = ['metadata_proxy_shared_secret'] LOG = log.getLogger(__name__) def service_cluster_exists(cluster, svc_cluster_id): exists = False try: exists = ( svc_cluster_id and nsxlib.do_request(HTTP_GET, nsxlib._build_uri_path( SERVICECLUSTER_RESOURCE, resource_id=svc_cluster_id), cluster=cluster) is not None) except exception.NotFound: pass return exists def lsn_for_network_create(cluster, network_id): lsn_obj = { "edge_cluster_uuid": cluster.default_service_cluster_uuid, "tags": utils.get_tags(n_network_id=network_id) } return nsxlib.do_request(HTTP_POST, nsxlib._build_uri_path(LSERVICESNODE_RESOURCE), jsonutils.dumps(lsn_obj, sort_keys=True), cluster=cluster)["uuid"] def lsn_for_network_get(cluster, network_id): filters = {"tag": network_id, "tag_scope": "n_network_id"} results = nsxlib.do_request(HTTP_GET, nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, fields="uuid", filters=filters), cluster=cluster)['results'] if not results: raise exception.NotFound() elif len(results) == 1: return results[0]['uuid'] def lsn_delete(cluster, lsn_id): nsxlib.do_request(HTTP_DELETE, nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, resource_id=lsn_id), cluster=cluster) def lsn_port_host_entries_update( cluster, lsn_id, lsn_port_id, conf, hosts_data): hosts_obj = {'hosts': hosts_data} nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id, extra_action=conf), jsonutils.dumps(hosts_obj, sort_keys=True), cluster=cluster) def lsn_port_create(cluster, lsn_id, port_data): port_obj = { "ip_address": port_data["ip_address"], "mac_address": port_data["mac_address"], "tags": utils.get_tags(n_mac_address=port_data["mac_address"], n_subnet_id=port_data["subnet_id"]), "type": "LogicalServicesNodePortConfig", } return nsxlib.do_request(HTTP_POST, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id), jsonutils.dumps(port_obj, sort_keys=True), cluster=cluster)["uuid"] def lsn_port_delete(cluster, lsn_id, lsn_port_id): return nsxlib.do_request(HTTP_DELETE, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id), cluster=cluster) def _lsn_port_get(cluster, lsn_id, filters): results = nsxlib.do_request(HTTP_GET, nsxlib._build_uri_path( LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, fields="uuid", filters=filters), cluster=cluster)['results'] if not results: raise exception.NotFound() elif len(results) == 1: return results[0]['uuid'] def lsn_port_by_mac_get(cluster, lsn_id, mac_address): filters = {"tag": mac_address, "tag_scope": "n_mac_address"} return _lsn_port_get(cluster, lsn_id, filters) def lsn_port_by_subnet_get(cluster, lsn_id, subnet_id): filters = {"tag": subnet_id, "tag_scope": "n_subnet_id"} return _lsn_port_get(cluster, lsn_id, filters) def lsn_port_info_get(cluster, lsn_id, lsn_port_id): result = nsxlib.do_request(HTTP_GET, nsxlib._build_uri_path( LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id), cluster=cluster) for tag in result['tags']: if tag['scope'] == 'n_subnet_id': result['subnet_id'] = tag['tag'] break return result def lsn_port_plug_network(cluster, lsn_id, lsn_port_id, lswitch_port_id): patch_obj = { "type": "PatchAttachment", "peer_port_uuid": lswitch_port_id } try: nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id, is_attachment=True), jsonutils.dumps(patch_obj, sort_keys=True), cluster=cluster) except api_exc.Conflict: # This restriction might be lifted at some point msg = (_("Attempt to plug Logical Services Node %(lsn)s into " "network with port %(port)s failed. PatchAttachment " "already exists with another port") % {'lsn': lsn_id, 'port': lswitch_port_id}) LOG.exception(msg) raise nsx_exc.LsnConfigurationConflict(lsn_id=lsn_id) def _lsn_configure_action( cluster, lsn_id, action, is_enabled, obj): lsn_obj = {"enabled": is_enabled} lsn_obj.update(obj) nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, resource_id=lsn_id, extra_action=action), jsonutils.dumps(lsn_obj, sort_keys=True), cluster=cluster) def _lsn_port_configure_action( cluster, lsn_id, lsn_port_id, action, is_enabled, obj): nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSERVICESNODE_RESOURCE, resource_id=lsn_id, extra_action=action), jsonutils.dumps({"enabled": is_enabled}, sort_keys=True), cluster=cluster) nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id, extra_action=action), jsonutils.dumps(obj, sort_keys=True), cluster=cluster) def _get_opts(name, value): return {"name": name, "value": str(value)} def lsn_port_dhcp_configure( cluster, lsn_id, lsn_port_id, is_enabled=True, dhcp_options=None): dhcp_options = dhcp_options or {} opts = [_get_opts(key, val) for key, val in six.iteritems(dhcp_options)] dhcp_obj = {'options': opts} _lsn_port_configure_action( cluster, lsn_id, lsn_port_id, 'dhcp', is_enabled, dhcp_obj) def lsn_metadata_configure( cluster, lsn_id, is_enabled=True, metadata_info=None): meta_obj = { 'metadata_server_ip': metadata_info['metadata_server_ip'], 'metadata_server_port': metadata_info['metadata_server_port'], } if metadata_info: opts = [ _get_opts(opt, metadata_info[opt]) for opt in SUPPORTED_METADATA_OPTIONS if metadata_info.get(opt) ] if opts: meta_obj["options"] = opts _lsn_configure_action( cluster, lsn_id, 'metadata-proxy', is_enabled, meta_obj) def _lsn_port_host_action( cluster, lsn_id, lsn_port_id, host_obj, extra_action, action): nsxlib.do_request(HTTP_POST, nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE, parent_resource_id=lsn_id, resource_id=lsn_port_id, extra_action=extra_action, filters={"action": action}), jsonutils.dumps(host_obj, sort_keys=True), cluster=cluster) def lsn_port_dhcp_host_add(cluster, lsn_id, lsn_port_id, host_data): _lsn_port_host_action( cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'add_host') def lsn_port_dhcp_host_remove(cluster, lsn_id, lsn_port_id, host_data): _lsn_port_host_action( cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'remove_host') def lsn_port_metadata_host_add(cluster, lsn_id, lsn_port_id, host_data): _lsn_port_host_action( cluster, lsn_id, lsn_port_id, host_data, 'metadata-proxy', 'add_host') def lsn_port_metadata_host_remove(cluster, lsn_id, lsn_port_id, host_data): _lsn_port_host_action(cluster, lsn_id, lsn_port_id, host_data, 'metadata-proxy', 'remove_host') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/nsxlib/mh/switch.py0000644000175000017500000004021400000000000023147 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_lib import constants from neutron_lib import exceptions as exception from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from vmware_nsx._i18n import _ from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.nsxlib import mh as nsxlib HTTP_GET = "GET" HTTP_POST = "POST" HTTP_DELETE = "DELETE" HTTP_PUT = "PUT" LSWITCH_RESOURCE = "lswitch" LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE LOG = log.getLogger(__name__) def _configure_extensions(lport_obj, mac_address, fixed_ips, port_security_enabled, security_profiles, queue_id, mac_learning_enabled, allowed_address_pairs): lport_obj['allowed_address_pairs'] = [] if port_security_enabled: for fixed_ip in fixed_ips: ip_address = fixed_ip.get('ip_address') if ip_address: lport_obj['allowed_address_pairs'].append( {'mac_address': mac_address, 'ip_address': ip_address}) # add address pair allowing src_ip 0.0.0.0 to leave # this is required for outgoing dhcp request lport_obj["allowed_address_pairs"].append( {"mac_address": mac_address, "ip_address": "0.0.0.0"}) lport_obj['security_profiles'] = list(security_profiles or []) lport_obj['queue_uuid'] = queue_id if mac_learning_enabled is not None: lport_obj["mac_learning"] = mac_learning_enabled lport_obj["type"] = "LogicalSwitchPortConfig" for address_pair in list(allowed_address_pairs or []): lport_obj['allowed_address_pairs'].append( {'mac_address': address_pair['mac_address'], 'ip_address': address_pair['ip_address']}) def get_lswitch_by_id(cluster, lswitch_id): try: lswitch_uri_path = nsxlib._build_uri_path( LSWITCH_RESOURCE, lswitch_id, relations="LogicalSwitchStatus") return nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster) except exception.NotFound: # FIXME(salv-orlando): this should not raise a neutron exception raise exception.NetworkNotFound(net_id=lswitch_id) def get_lswitches(cluster, neutron_net_id): def lookup_switches_by_tag(): # Fetch extra logical switches lswitch_query_path = nsxlib._build_uri_path( LSWITCH_RESOURCE, fields="uuid,display_name,tags,lport_count", relations="LogicalSwitchStatus", filters={'tag': neutron_net_id, 'tag_scope': 'quantum_net_id'}) return nsxlib.get_all_query_pages(lswitch_query_path, cluster) lswitch_uri_path = nsxlib._build_uri_path(LSWITCH_RESOURCE, neutron_net_id, relations="LogicalSwitchStatus") results = [] try: ls = nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster) results.append(ls) for tag in ls['tags']: if (tag['scope'] == "multi_lswitch" and tag['tag'] == "True"): results.extend(lookup_switches_by_tag()) except exception.NotFound: # This is legit if the neutron network was created using # a post-Havana version of the plugin results.extend(lookup_switches_by_tag()) if results: return results else: raise exception.NetworkNotFound(net_id=neutron_net_id) def create_lswitch(cluster, neutron_net_id, tenant_id, display_name, transport_zones_config, shared=None, **kwargs): # The tag scope adopts a slightly different naming convention for # historical reasons lswitch_obj = {"display_name": utils.check_and_truncate(display_name), "transport_zones": transport_zones_config, "replication_mode": cfg.CONF.NSX.replication_mode, "tags": utils.get_tags(os_tid=tenant_id, quantum_net_id=neutron_net_id)} # TODO(salv-orlando): Now that we have async status synchronization # this tag is perhaps not needed anymore if shared: lswitch_obj["tags"].append({"tag": "true", "scope": "shared"}) if "tags" in kwargs: lswitch_obj["tags"].extend(kwargs["tags"]) uri = nsxlib._build_uri_path(LSWITCH_RESOURCE) lswitch = nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(lswitch_obj), cluster=cluster) LOG.debug("Created logical switch: %s", lswitch['uuid']) return lswitch def update_lswitch(cluster, lswitch_id, display_name, tenant_id=None, **kwargs): uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id) lswitch_obj = {"display_name": utils.check_and_truncate(display_name)} # NOTE: tag update will not 'merge' existing tags with new ones. tags = [] if tenant_id: tags = utils.get_tags(os_tid=tenant_id) # The 'tags' kwarg might existing and be None tags.extend(kwargs.get('tags') or []) if tags: lswitch_obj['tags'] = tags try: return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj), cluster=cluster) except exception.NotFound as e: LOG.error("Network not found, Error: %s", str(e)) raise exception.NetworkNotFound(net_id=lswitch_id) def delete_network(cluster, net_id, lswitch_id): delete_networks(cluster, net_id, [lswitch_id]) #TODO(salvatore-orlando): Simplify and harmonize def delete_networks(cluster, net_id, lswitch_ids): for ls_id in lswitch_ids: path = "/ws.v1/lswitch/%s" % ls_id try: nsxlib.do_request(HTTP_DELETE, path, cluster=cluster) except exception.NotFound as e: LOG.error("Network not found, Error: %s", str(e)) raise exception.NetworkNotFound(net_id=ls_id) def query_lswitch_lports(cluster, ls_uuid, fields="*", filters=None, relations=None): # Fix filter for attachments if filters and "attachment" in filters: filters['attachment_vif_uuid'] = filters["attachment"] del filters['attachment'] uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=ls_uuid, fields=fields, filters=filters, relations=relations) return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results'] def delete_port(cluster, switch, port): uri = "/ws.v1/lswitch/" + switch + "/lport/" + port try: nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster) except exception.NotFound as e: LOG.error("Port or Network not found, Error: %s", str(e)) raise exception.PortNotFoundOnNetwork( net_id=switch, port_id=port) except api_exc.NsxApiException: raise exception.NeutronException() def get_ports(cluster, networks=None, devices=None, tenants=None): vm_filter_obsolete = "" vm_filter = "" tenant_filter = "" # This is used when calling delete_network. Neutron checks to see if # the network has any ports. if networks: # FIXME (Aaron) If we get more than one network_id this won't work lswitch = networks[0] else: lswitch = "*" if devices: for device_id in devices: vm_filter_obsolete = '&'.join( ["tag_scope=vm_id", "tag=%s" % utils.device_id_to_vm_id(device_id, obfuscate=True), vm_filter_obsolete]) vm_filter = '&'.join( ["tag_scope=vm_id", "tag=%s" % utils.device_id_to_vm_id(device_id), vm_filter]) if tenants: for tenant in tenants: tenant_filter = '&'.join( ["tag_scope=os_tid", "tag=%s" % tenant, tenant_filter]) nsx_lports = {} lport_fields_str = ("tags,admin_status_enabled,display_name," "fabric_status_up") try: lport_query_path_obsolete = ( "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" "&relations=LogicalPortStatus" % (lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter)) lport_query_path = ( "/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id" "&relations=LogicalPortStatus" % (lswitch, lport_fields_str, vm_filter, tenant_filter)) try: # NOTE(armando-migliaccio): by querying with obsolete tag first # current deployments won't take the performance hit of a double # call. In release L-** or M-**, we might want to swap the calls # as it's likely that ports with the new tag would outnumber the # ones with the old tag ports = nsxlib.get_all_query_pages(lport_query_path_obsolete, cluster) if not ports: ports = nsxlib.get_all_query_pages(lport_query_path, cluster) except exception.NotFound: LOG.warning("Lswitch %s not found in NSX", lswitch) ports = None if ports: for port in ports: for tag in port["tags"]: if tag["scope"] == "q_port_id": nsx_lports[tag["tag"]] = port except Exception: err_msg = _("Unable to get ports") LOG.exception(err_msg) raise nsx_exc.NsxPluginException(err_msg=err_msg) return nsx_lports def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id): """Get port by neutron tag. Returns the NSX UUID of the logical port with tag q_port_id equal to neutron_port_id or None if the port is not Found. """ uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=lswitch_uuid, fields='uuid', filters={'tag': neutron_port_id, 'tag_scope': 'q_port_id'}) LOG.debug("Looking for port with q_port_id tag '%(neutron_port_id)s' " "on: '%(lswitch_uuid)s'", {'neutron_port_id': neutron_port_id, 'lswitch_uuid': lswitch_uuid}) res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster) num_results = len(res["results"]) if num_results >= 1: if num_results > 1: LOG.warning("Found '%(num_ports)d' ports with " "q_port_id tag: '%(neutron_port_id)s'. " "Only 1 was expected.", {'num_ports': num_results, 'neutron_port_id': neutron_port_id}) return res["results"][0] def get_port(cluster, network, port, relations=None): LOG.info("get_port() %(network)s %(port)s", {'network': network, 'port': port}) uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?" if relations: uri += "relations=%s" % relations try: return nsxlib.do_request(HTTP_GET, uri, cluster=cluster) except exception.NotFound as e: LOG.error("Port or Network not found, Error: %s", str(e)) raise exception.PortNotFoundOnNetwork( port_id=port, net_id=network) def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id, display_name, device_id, admin_status_enabled, mac_address=None, fixed_ips=None, port_security_enabled=None, security_profiles=None, queue_id=None, mac_learning_enabled=None, allowed_address_pairs=None): lport_obj = dict( admin_status_enabled=admin_status_enabled, display_name=utils.check_and_truncate(display_name), tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id, vm_id=utils.device_id_to_vm_id(device_id))) _configure_extensions(lport_obj, mac_address, fixed_ips, port_security_enabled, security_profiles, queue_id, mac_learning_enabled, allowed_address_pairs) path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid try: result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj), cluster=cluster) LOG.debug("Updated logical port %(result)s " "on logical switch %(uuid)s", {'result': result['uuid'], 'uuid': lswitch_uuid}) return result except exception.NotFound as e: LOG.error("Port or Network not found, Error: %s", str(e)) raise exception.PortNotFoundOnNetwork( port_id=lport_uuid, net_id=lswitch_uuid) def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id, display_name, device_id, admin_status_enabled, mac_address=None, fixed_ips=None, port_security_enabled=None, security_profiles=None, queue_id=None, mac_learning_enabled=None, allowed_address_pairs=None): """Creates a logical port on the assigned logical switch.""" display_name = utils.check_and_truncate(display_name) lport_obj = dict( admin_status_enabled=admin_status_enabled, display_name=display_name, tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id, vm_id=utils.device_id_to_vm_id(device_id)) ) _configure_extensions(lport_obj, mac_address, fixed_ips, port_security_enabled, security_profiles, queue_id, mac_learning_enabled, allowed_address_pairs) path = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=lswitch_uuid) result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj), cluster=cluster) LOG.debug("Created logical port %(result)s on logical switch %(uuid)s", {'result': result['uuid'], 'uuid': lswitch_uuid}) return result def get_port_status(cluster, lswitch_id, port_id): """Retrieve the operational status of the port.""" try: r = nsxlib.do_request(HTTP_GET, "/ws.v1/lswitch/%s/lport/%s/status" % (lswitch_id, port_id), cluster=cluster) except exception.NotFound as e: LOG.error("Port not found, Error: %s", str(e)) raise exception.PortNotFoundOnNetwork( port_id=port_id, net_id=lswitch_id) if r['link_status_up'] is True: return constants.PORT_STATUS_ACTIVE else: return constants.PORT_STATUS_DOWN def plug_interface(cluster, lswitch_id, lport_id, att_obj): return nsxlib.do_request(HTTP_PUT, nsxlib._build_uri_path(LSWITCHPORT_RESOURCE, lport_id, lswitch_id, is_attachment=True), jsonutils.dumps(att_obj), cluster=cluster) def plug_vif_interface( cluster, lswitch_id, port_id, port_type, attachment=None): """Plug a VIF Attachment object in a logical port.""" lport_obj = {} if attachment: lport_obj["vif_uuid"] = attachment lport_obj["type"] = port_type return plug_interface(cluster, lswitch_id, port_id, lport_obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/opts.py0000644000175000017500000000304700000000000020733 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools import vmware_nsx.common.config import vmware_nsx.dhcp_meta.lsnmanager import vmware_nsx.dhcp_meta.nsx import vmware_nsx.dvs.dvs_utils def list_opts(): return [('DEFAULT', itertools.chain( vmware_nsx.common.config.cluster_opts, vmware_nsx.common.config.connection_opts, vmware_nsx.common.config.nsx_common_opts)), ('NSX', vmware_nsx.common.config.base_opts), ('NSX_SYNC', vmware_nsx.common.config.sync_opts), ('nsxv', vmware_nsx.common.config.nsxv_opts), ('nsx_v3', vmware_nsx.common.config.nsx_v3_opts), ('dvs', vmware_nsx.dvs.dvs_utils.dvs_opts), ('nsx_tvd', vmware_nsx.common.config.nsx_tvd_opts), ('nsx_p', vmware_nsx.common.config.nsx_p_opts), ('NSX_DHCP', vmware_nsx.dhcp_meta.nsx.dhcp_opts), ('NSX_METADATA', vmware_nsx.dhcp_meta.nsx.metadata_opts), ('NSX_LSN', vmware_nsx.dhcp_meta.lsnmanager.lsn_opts)] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1982536 vmware-nsx-15.0.1.dev143/vmware_nsx/osc/0000755000175000017500000000000000000000000020154 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/osc/__init__.py0000644000175000017500000000000000000000000022253 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/osc/plugin.py0000644000175000017500000000301700000000000022025 0ustar00coreycorey00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from osc_lib import utils from oslo_log import log as logging LOG = logging.getLogger(__name__) DEFAULT_API_VERSION = '2' API_VERSION_OPTION = 'vmware_nsx_api_version' API_NAME = 'nsxclient' API_VERSIONS = { '2.0': 'nsxclient.v2_0.client.Client', '2': 'nsxclient.v2_0.client.Client', } def make_client(instance): """Returns a client.""" nsxclient = utils.get_client_class( API_NAME, instance._api_version[API_NAME], API_VERSIONS) LOG.debug('Instantiating vmware nsx client: %s', nsxclient) client = nsxclient(session=instance.session, region_name=instance._region_name, endpoint_type=instance._interface, insecure=instance._insecure, ca_cert=instance._cacert) return client def build_option_parser(parser): """Hook to add global options""" return parser ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1982536 vmware-nsx-15.0.1.dev143/vmware_nsx/osc/v2/0000755000175000017500000000000000000000000020503 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/osc/v2/__init__.py0000644000175000017500000000000000000000000022602 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/osc/v2/port.py0000644000175000017500000001047000000000000022043 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Port action implementations with nsx extensions""" from openstackclient.network.v2 import port from osc_lib import utils as osc_utils from vmware_nsx._i18n import _ from vmware_nsx.osc.v2 import utils def add_nsx_extensions_to_parser(parser, client_manager, is_create=True): allowed_extensions = utils.get_extensions(client_manager) # Provider security group (only for create action) if (is_create and 'provider-security-group' in allowed_extensions): parser.add_argument( '--provider-security-group', metavar='', action='append', dest='provider_security_groups', help=_("Provider Security group to associate with this port " "(name or ID) " "(repeat option to set multiple security groups)") ) if 'vnic-index' in allowed_extensions: # vnic index parser.add_argument( '--vnic-index', type=int, metavar='', help=_("Vnic index") ) if 'mac-learning' in allowed_extensions: # mac-learning-enabled mac_learning_group = parser.add_mutually_exclusive_group() mac_learning_group.add_argument( '--enable-mac-learning', action='store_true', help=_("Enable MAC learning") ) mac_learning_group.add_argument( '--disable-mac-learning', action='store_true', help=_("Disable MAC learning (Default") ) # overriding the port module global method, to add the nsx extensions super_get_attrs = port._get_attrs def _get_plugin_attrs(client_manager, parsed_args): allowed_extensions = utils.get_extensions(client_manager) attrs = super_get_attrs(client_manager, parsed_args) # Provider security groups if 'provider-security-group' in allowed_extensions: if (hasattr(parsed_args, 'provider_security_groups') and parsed_args.provider_security_groups is not None): attrs['provider_security_groups'] = [ client_manager.network.find_security_group( sg, ignore_missing=False).id for sg in parsed_args.provider_security_groups] if 'vnic-index' in allowed_extensions: # Vnic index if parsed_args.vnic_index is not None: attrs['vnic_index'] = parsed_args.vnic_index parsed_args.vnic_index = None if 'mac-learning' in allowed_extensions: # mac-learning-enabled if parsed_args.enable_mac_learning: attrs['mac_learning_enabled'] = True if parsed_args.disable_mac_learning: attrs['mac_learning_enabled'] = False return attrs port._get_attrs = _get_plugin_attrs # Update the port module global _formatters, to format provider security # groups too port._formatters['provider_security_groups'] = osc_utils.format_list class NsxCreatePort(port.CreatePort): """Create a new port with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron port attributes parser = super(NsxCreatePort, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager, is_create=True) return parser class NsxSetPort(port.SetPort): """Set port properties with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron port attributes parser = super(NsxSetPort, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager, is_create=False) return parser ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/osc/v2/project_plugin_map.py0000644000175000017500000001007500000000000024741 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Project Plugin mapping action implementations""" import six from openstack import exceptions as os_exceptions from openstack import resource from openstackclient.i18n import _ from osc_lib.command import command from osc_lib import exceptions as osc_exceptions from osc_lib import utils project_plugin_maps_path = "/project-plugin-maps" class ProjectPluginMap(resource.Resource): resource_key = 'project_plugin_map' resources_key = 'project_plugin_maps' base_path = '/project-plugin-maps' # capabilities allow_create = True allow_get = True allow_update = False allow_delete = False allow_list = True _query_mapping = resource.QueryParameters( 'plugin', 'project', 'tenant_id') # Properties id = resource.Body('id') project = resource.Body('project') plugin = resource.Body('plugin') tenant_id = resource.Body('tenant_id') def _get_columns(item): columns = ['project', 'plugin'] return columns, columns def _get_attrs(parsed_args): attrs = {} if parsed_args.project is not None: attrs['project'] = parsed_args.project if parsed_args.plugin is not None: attrs['plugin'] = parsed_args.plugin return attrs class CreateProjectPluginMap(command.ShowOne): _description = _("Create project plugin map") def get_parser(self, prog_name): parser = super(CreateProjectPluginMap, self).get_parser(prog_name) parser.add_argument( 'project', metavar="", help=_("project") ) parser.add_argument( '--plugin', metavar="", required=True, help=_('Plugin.)') ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network attrs = _get_attrs(parsed_args) try: obj = client._create(ProjectPluginMap, **attrs) except os_exceptions.HttpException as exc: msg = _("Error while executing command: %s") % exc.message if exc.details: msg += ", " + six.text_type(exc.details) raise osc_exceptions.CommandError(msg) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns, formatters={}) return (display_columns, data) class ListProjectPluginMap(command.Lister): _description = _("List project plugin mappings") def take_action(self, parsed_args): client = self.app.client_manager.network columns = ( 'project', 'plugin' ) column_headers = ( 'Project ID', 'Plugin', ) client = self.app.client_manager.network data = client._list(ProjectPluginMap) return (column_headers, (utils.get_item_properties( s, columns, ) for s in data)) class ShowProjectPluginMap(command.ShowOne): _description = _("Display project plugins mapping") def get_parser(self, prog_name): parser = super(ShowProjectPluginMap, self).get_parser(prog_name) parser.add_argument( 'id', metavar='', help=_('id') ) return parser def take_action(self, parsed_args): client = self.app.client_manager.network obj = client._get(ProjectPluginMap, parsed_args.id) display_columns, columns = _get_columns(obj) data = utils.get_item_properties(obj, columns) return display_columns, data ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/osc/v2/router.py0000644000175000017500000000565100000000000022404 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Router action implementations with nsx extensions""" from openstackclient.network.v2 import router from vmware_nsx._i18n import _ from vmware_nsx.extensions import routersize from vmware_nsx.extensions import routertype from vmware_nsx.osc.v2 import utils def add_nsx_extensions_to_parser(parser, client_manager): if 'nsxv-router-size' in utils.get_extensions(client_manager): # router-size parser.add_argument( '--router-size', metavar='', choices=routersize.VALID_EDGE_SIZES, help=_("Router Size") ) if 'nsxv-router-type' in utils.get_extensions(client_manager): # router-type parser.add_argument( '--router-type', metavar='', choices=routertype.VALID_TYPES, help=_("Router Type") ) # overriding the router module global method, to add the nsx extensions super_get_attrs = router._get_attrs def _get_plugin_attrs(client_manager, parsed_args): attrs = super_get_attrs(client_manager, parsed_args) if 'nsxv-router-type' in utils.get_extensions(client_manager): # Router type if parsed_args.router_type is not None: attrs['router_type'] = parsed_args.router_type parsed_args.router_type = None if 'nsxv-router-size' in utils.get_extensions(client_manager): # Router size if parsed_args.router_size is not None: attrs['router_size'] = parsed_args.router_size parsed_args.router_size = None return attrs router._get_attrs = _get_plugin_attrs class NsxCreateRouter(router.CreateRouter): """Create a new router with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron router attributes parser = super(NsxCreateRouter, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager) return parser class NsxSetRouter(router.SetRouter): """Set router properties with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron router attributes parser = super(NsxSetRouter, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager) return parser ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/osc/v2/security_group.py0000644000175000017500000001335100000000000024143 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Security group action implementations with nsx extensions""" from osc_lib import utils as osc_utils from osc_lib.utils import tags as _tag from openstackclient.identity import common as identity_common from openstackclient.network.v2 import security_group from vmware_nsx._i18n import _ from vmware_nsx.osc.v2 import utils def add_nsx_extensions_to_parser(parser, client_manager, for_create=True): if 'security-group-logging' in utils.get_extensions(client_manager): # logging logging_enable_group = parser.add_mutually_exclusive_group() logging_enable_group.add_argument( '--logging', action='store_true', help=_("Enable logging") ) logging_enable_group.add_argument( '--no-logging', action='store_true', help=_("Disable logging (default)") ) if ('provider-security-group' in utils.get_extensions(client_manager) and for_create): # provider parser.add_argument( '--provider', action='store_true', help=_("Provider security group") ) if 'security-group-policy' in utils.get_extensions(client_manager): # policy parser.add_argument( '--policy', metavar='', help=_("NSX Policy Id") ) def _get_plugin_attrs(attrs, parsed_args, client_manager): if 'security-group-logging' in utils.get_extensions(client_manager): # logging if parsed_args.logging: attrs['logging'] = True if parsed_args.no_logging: attrs['logging'] = False if 'provider-security-group' in utils.get_extensions(client_manager): # provider if hasattr(parsed_args, 'provider') and parsed_args.provider: attrs['provider'] = True if 'security-group-policy' in utils.get_extensions(client_manager): # policy if parsed_args.policy is not None: attrs['policy'] = parsed_args.policy return attrs class NsxCreateSecurityGroup(security_group.CreateSecurityGroup): """Create a new security_group with vmware nsx extensions """ def take_action_network(self, client, parsed_args): #TODO(asarfaty): Better to change the neutron client code of # CreateSecurityGroup:take_action_network to use an internal # get_attributes, and override only this # Build the create attributes. attrs = {} attrs['name'] = parsed_args.name attrs['description'] = self._get_description(parsed_args) if parsed_args.project is not None: identity_client = self.app.client_manager.identity project_id = identity_common.find_project( identity_client, parsed_args.project, parsed_args.project_domain, ).id attrs['tenant_id'] = project_id # add the plugin attributes attrs = _get_plugin_attrs(attrs, parsed_args, self.app.client_manager) # Create the security group and display the results. obj = client.create_security_group(**attrs) # tags cannot be set when created, so tags need to be set later. _tag.update_tags_for_set(client, obj, parsed_args) display_columns, property_columns = security_group._get_columns(obj) data = osc_utils.get_item_properties( obj, property_columns, formatters=security_group._formatters_network ) return (display_columns, data) def update_parser_common(self, parser): parser = super(NsxCreateSecurityGroup, self).update_parser_common( parser) # Add the nsx attributes to the neutron security group attributes add_nsx_extensions_to_parser( parser, self.app.client_manager, for_create=True) return parser class NsxSetSecurityGroup(security_group.SetSecurityGroup): """Set security group properties with vmware nsx extensions """ def take_action_network(self, client, parsed_args): #TODO(asarfaty): Better to change the neutron client code of # CreateSecurityGroup:take_action_network to use an internal # get_attributes, and override only this obj = client.find_security_group(parsed_args.group, ignore_missing=False) attrs = {} if parsed_args.name is not None: attrs['name'] = parsed_args.name if parsed_args.description is not None: attrs['description'] = parsed_args.description # add the plugin attributes attrs = _get_plugin_attrs(attrs, parsed_args, self.app.client_manager) client.update_security_group(obj, **attrs) # tags is a subresource and it needs to be updated separately. _tag.update_tags_for_set(client, obj, parsed_args) def update_parser_common(self, parser): parser = super(NsxSetSecurityGroup, self).update_parser_common(parser) # Add the nsx attributes to the neutron security group attributes add_nsx_extensions_to_parser( parser, self.app.client_manager, for_create=False) return parser ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/osc/v2/subnet.py0000644000175000017500000000544600000000000022366 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Subnet extensions action implementations""" from openstackclient.network.v2 import subnet from vmware_nsx._i18n import _ from vmware_nsx.osc.v2 import utils def add_nsx_extensions_to_parser(parser, client_manager): if 'dhcp-mtu' in utils.get_extensions(client_manager): # DHCP MTU parser.add_argument( '--dhcp-mtu', type=int, metavar='', help=_("DHCP MTU") ) if 'dns-search-domain' in utils.get_extensions(client_manager): # DNS search domain parser.add_argument( '--dns-search-domain', metavar='', help=_("DNS search Domain") ) # overriding the subnet module global method, to add the nsx extensions super_get_attrs = subnet._get_attrs def _get_plugin_attrs(client_manager, parsed_args, is_create=True): attrs = super_get_attrs(client_manager, parsed_args, is_create) if 'dhcp-mtu' in utils.get_extensions(client_manager): # DHCP MTU if parsed_args.dhcp_mtu is not None: attrs['dhcp_mtu'] = int(parsed_args.dhcp_mtu) parsed_args.dhcp_mtu = None if 'dns-search-domain' in utils.get_extensions(client_manager): # DNS search domain if parsed_args.dns_search_domain is not None: attrs['dns_search_domain'] = parsed_args.dns_search_domain parsed_args.dns_search_domain = None return attrs subnet._get_attrs = _get_plugin_attrs class NsxCreateSubnet(subnet.CreateSubnet): """Create a new subnet with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron subnet attributes parser = super(NsxCreateSubnet, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager) return parser class NsxSetSubnet(subnet.SetSubnet): """Set subnet properties with vmware nsx extensions """ def get_parser(self, prog_name): # Add the nsx attributes to the neutron subnet attributes parser = super(NsxSetSubnet, self).get_parser(prog_name) add_nsx_extensions_to_parser(parser, self.app.client_manager) return parser ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/osc/v2/utils.py0000644000175000017500000000262100000000000022216 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from osc_lib import utils as osc_utils cached_extensions = None def get_extensions(client_manager): """Return a list of all current extensions aliases""" # Return previously calculated results global cached_extensions if cached_extensions is not None: return cached_extensions extensions = [] if not client_manager._auth_setup_completed: # cannot get the extensions from the neutron client return extensions # Get supported extensions from the manager data = client_manager.network.extensions() for s in data: prop = osc_utils.get_item_properties( s, ('Alias',), formatters={}) extensions.append(prop[0]) # Save the results in the global cache cached_extensions = extensions return extensions ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugin.py0000644000175000017500000000241200000000000021237 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Note: this import should be here in order to appear before NeutronDbPluginV2 # in each of the plugins. If not: security-group/-rule will not have all the # relevant extend dict registries. from neutron.db import l3_dvr_db # noqa from vmware_nsx.plugins.dvs import plugin as dvs from vmware_nsx.plugins.nsx import plugin as nsx from vmware_nsx.plugins.nsx_p import plugin as nsx_p from vmware_nsx.plugins.nsx_v import plugin as nsx_v from vmware_nsx.plugins.nsx_v3 import plugin as nsx_v3 NsxDvsPlugin = dvs.NsxDvsV2 NsxVPlugin = nsx_v.NsxVPluginV2 NsxV3Plugin = nsx_v3.NsxV3Plugin NsxPolicyPlugin = nsx_p.NsxPolicyPlugin NsxTVDPlugin = nsx.NsxTVDPlugin ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1982536 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/0000755000175000017500000000000000000000000021051 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/__init__.py0000644000175000017500000000000000000000000023150 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1982536 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/common/0000755000175000017500000000000000000000000022341 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/common/__init__.py0000644000175000017500000000000000000000000024440 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1982536 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/common/housekeeper/0000755000175000017500000000000000000000000024660 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/common/housekeeper/__init__.py0000644000175000017500000000000000000000000026757 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/common/housekeeper/base_job.py0000644000175000017500000000414000000000000026775 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from neutron_lib.plugins import directory from oslo_log import log import six LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class BaseJob(object): _core_plugin = None def __init__(self, global_readonly, readonly_jobs): job_readonly = global_readonly or (self.get_name() in readonly_jobs) LOG.info('Housekeeping: %s job initialized in %s mode', self.get_name(), 'RO' if job_readonly else 'RW') @property def plugin(self): if not self._core_plugin: self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin() is True: # get the plugin that match this driver self._core_plugin = self.get_project_plugin( self._core_plugin) return self._core_plugin @abc.abstractmethod def get_name(self): pass @abc.abstractmethod def get_description(self): pass @abc.abstractmethod def run(self, context): pass @abc.abstractmethod def get_project_plugin(self, plugin): pass def housekeeper_info(info, fmt, *args): msg = fmt % args if info: info = "%s\n%s" % (info, msg) else: info = msg LOG.info("Housekeeping: %s", msg) return info def housekeeper_warning(info, fmt, *args): msg = fmt % args if info: info = "%s\n%s" % (info, msg) else: info = msg LOG.warning("Housekeeping: %s", msg) return info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/common/housekeeper/housekeeper.py0000644000175000017500000002163700000000000027562 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from oslo_config import cfg from oslo_log import log import stevedore from neutron_lib import exceptions as n_exc from vmware_nsx.common import locking LOG = log.getLogger(__name__) ALL_DUMMY_JOB_NAME = 'all' ALL_DUMMY_JOB = { 'name': ALL_DUMMY_JOB_NAME, 'description': 'Execute all housekeepers', 'enabled': True, 'error_count': 0, 'fixed_count': 0, 'error_info': None} class NsxHousekeeper(stevedore.named.NamedExtensionManager): def __init__(self, hk_ns, hk_jobs, hk_readonly, hk_readonly_jobs): self.global_readonly = hk_readonly self.readonly_jobs = hk_readonly_jobs self.email_notifier = None if (cfg.CONF.smtp_gateway and cfg.CONF.smtp_from_addr and cfg.CONF.snmp_to_list): self.email_notifier = HousekeeperEmailNotifier() self.results = {} if self.global_readonly: LOG.info('Housekeeper initialized in readonly mode') else: LOG.info('Housekeeper initialized') self.results = {} self.jobs = {} super(NsxHousekeeper, self).__init__( hk_ns, hk_jobs, invoke_on_load=True, invoke_args=(self.global_readonly, self.readonly_jobs)) LOG.info("Loaded housekeeping job names: %s", self.names()) for job in self: if job.obj.get_name() in hk_jobs: self.jobs[job.obj.get_name()] = job.obj def get(self, job_name): if job_name == ALL_DUMMY_JOB_NAME: return {'name': job_name, 'description': ALL_DUMMY_JOB['description'], 'enabled': job_name in self.jobs, 'error_count': self.results.get( job_name, {}).get('error_count', 0), 'fixed_count': self.results.get( job_name, {}).get('fixed_count', 0), 'error_info': self.results.get( job_name, {}).get('error_info', '')} for job in self: name = job.obj.get_name() if job_name == name: return {'name': job_name, 'description': job.obj.get_description(), 'enabled': job_name in self.jobs, 'error_count': self.results.get( job_name, {}).get('error_count', 0), 'fixed_count': self.results.get( job_name, {}).get('fixed_count', 0), 'error_info': self.results.get( job_name, {}).get('error_info', '')} raise n_exc.ObjectNotFound(id=job_name) def list(self): results = [{'name': ALL_DUMMY_JOB_NAME, 'description': ALL_DUMMY_JOB['description'], 'enabled': ALL_DUMMY_JOB_NAME in self.jobs, 'error_count': self.results.get( ALL_DUMMY_JOB_NAME, {}).get('error_count', 0), 'fixed_count': self.results.get( ALL_DUMMY_JOB_NAME, {}).get('fixed_count', 0), 'error_info': self.results.get( ALL_DUMMY_JOB_NAME, {}).get('error_info', '')}] for job in self: job_name = job.obj.get_name() results.append({'name': job_name, 'description': job.obj.get_description(), 'enabled': job_name in self.jobs, 'error_count': self.results.get( job_name, {}).get('error_count', 0), 'fixed_count': self.results.get( job_name, {}).get('fixed_count', 0), 'error_info': self.results.get( job_name, {}).get('error_info', '')}) return results def readwrite_allowed(self, job_name): # Check if a job can run in readwrite mode if self.global_readonly: return False non_readonly_jobs = set(self.jobs.keys()) - set(self.readonly_jobs) if job_name == ALL_DUMMY_JOB_NAME: # 'all' readwrite is allowed if it has non readonly jobs if non_readonly_jobs: return True return False else: # specific job is allowed if it is not in the readonly list if job_name in self.readonly_jobs: return False return True def run(self, context, job_name, readonly=False): self.results = {} if context.is_admin: if self.email_notifier: self.email_notifier.start('Cloud Housekeeper Execution Report') with locking.LockManager.get_lock('nsx-housekeeper'): error_count = 0 fixed_count = 0 error_info = '' if job_name == ALL_DUMMY_JOB_NAME: if (not readonly and not self.readwrite_allowed(ALL_DUMMY_JOB_NAME)): raise n_exc.ObjectNotFound(id=ALL_DUMMY_JOB_NAME) for job in self.jobs.values(): if (not readonly and not self.readwrite_allowed(job.get_name())): # skip this job as it is readonly continue result = job.run(context, readonly=readonly) if result: if self.email_notifier and result['error_count']: self._add_job_text_to_notifier(job, result) error_count += result['error_count'] fixed_count += result['fixed_count'] error_info += result['error_info'] + "\n" self.results[job_name] = { 'error_count': error_count, 'fixed_count': fixed_count, 'error_info': error_info } else: job = self.jobs.get(job_name) if job: if (not readonly and not self.readwrite_allowed(job_name)): raise n_exc.ObjectNotFound(id=job_name) result = job.run(context, readonly=readonly) if result: error_count = result['error_count'] if self.email_notifier: self._add_job_text_to_notifier(job, result) self.results[job.get_name()] = result else: raise n_exc.ObjectNotFound(id=job_name) if self.email_notifier and error_count: self.email_notifier.send() else: raise n_exc.AdminRequired() def _add_job_text_to_notifier(self, job, result): self.email_notifier.add_text("%s:", job.get_name()) self.email_notifier.add_text( '%d errors found, %d fixed\n%s\n\n', result['error_count'], result['fixed_count'], result['error_info']) class HousekeeperEmailNotifier(object): def __init__(self): self.msg = None self.html = None self.has_text = False def start(self, subject): self.msg = MIMEMultipart('alternative') self.msg['Subject'] = subject self.msg['From'] = cfg.CONF.smtp_from_addr self.msg['To'] = ', '.join(cfg.CONF.snmp_to_list) self.html = '
' self.has_text = False def add_text(self, fmt, *args): self.has_text = True text = fmt % args LOG.debug("Housekeeper emailer adding text %s", text) self.html += text.replace("\n", "
") + "
\n" def send(self): if self.has_text: self.html += "
" part1 = MIMEText(self.html, 'html') self.msg.attach(part1) s = smtplib.SMTP(cfg.CONF.smtp_gateway) s.sendmail(cfg.CONF.smtp_from_addr, cfg.CONF.snmp_to_list, self.msg.as_string()) s.quit() self.msg = None self.html = None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/common/plugin.py0000644000175000017500000005573700000000000024232 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from sqlalchemy.orm import exc from neutron.db import address_scope_db from neutron.db import db_base_plugin_v2 from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import models_v2 from neutron_lib.api.definitions import address_scope as ext_address_scope from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.api import validators from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context as n_context from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.utils import net as nl_net_utils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.extensions import secgroup_rule_local_ip_prefix as sg_prefix from vmware_nsx.services.qos.common import utils as qos_com_utils LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class NsxPluginBase(db_base_plugin_v2.NeutronDbPluginV2, address_scope_db.AddressScopeDbMixin): """Common methods for NSX-V, NSX-V3 and NSX-P plugins""" @property def plugin_type(self): return "Unknown" @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _ext_extend_network_dict(result, netdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() with db_api.CONTEXT_WRITER.using(ctx): plugin._extension_manager.extend_network_dict( ctx.session, netdb, result) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _ext_extend_port_dict(result, portdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() with db_api.CONTEXT_WRITER.using(ctx): plugin._extension_manager.extend_port_dict( ctx.session, portdb, result) @staticmethod @resource_extend.extends([subnet_def.COLLECTION_NAME]) def _ext_extend_subnet_dict(result, subnetdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() with db_api.CONTEXT_WRITER.using(ctx): plugin._extension_manager.extend_subnet_dict( ctx.session, subnetdb, result) def get_network_az_by_net_id(self, context, network_id): try: network = self.get_network(context, network_id) except Exception: return self.get_default_az() return self.get_network_az(network) def _get_router_interface_ports_by_network( self, context, router_id, network_id): port_filters = {'device_id': [router_id], 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], 'network_id': [network_id]} return self.get_ports(context, filters=port_filters) def _get_network_interface_ports(self, context, net_id): port_filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], 'network_id': [net_id]} return self.get_ports(context, filters=port_filters) def _get_network_router_ids(self, context, net_id): intf_ports = self._get_network_interface_ports(context, net_id) return [port['device_id'] for port in intf_ports if port['device_id']] def get_router_for_floatingip(self, context, internal_port, internal_subnet, external_network_id): router_id = super(NsxPluginBase, self).get_router_for_floatingip( context, internal_port, internal_subnet, external_network_id) if router_id: router = self._get_router(context.elevated(), router_id) if not router.enable_snat: msg = _("Unable to assign a floating IP to a router that " "has SNAT disabled") raise n_exc.InvalidInput(error_message=msg) return router_id def _get_network_address_scope(self, context, net_id): network = self.get_network(context, net_id) return network.get(ext_address_scope.IPV4_ADDRESS_SCOPE) def _get_subnet_address_scope(self, context, subnet_id, subnet=None): if not subnet: subnet = self.get_subnet(context, subnet_id) if not subnet['subnetpool_id']: return subnetpool = self.get_subnetpool(context, subnet['subnetpool_id']) return subnetpool.get('address_scope_id', '') def _get_subnetpool_address_scope(self, context, subnetpool_id): if not subnetpool_id: return subnetpool = self.get_subnetpool(context, subnetpool_id) return subnetpool.get('address_scope_id', '') def _validate_address_scope_for_router_interface(self, context, router_id, gw_network_id, subnet_id, subnet=None): """Validate that the GW address scope is the same as the interface""" gw_address_scope = self._get_network_address_scope(context, gw_network_id) if not gw_address_scope: return subnet_address_scope = self._get_subnet_address_scope( context, subnet_id, subnet=subnet) if (not subnet_address_scope or subnet_address_scope != gw_address_scope): raise nsx_exc.NsxRouterInterfaceDoesNotMatchAddressScope( router_id=router_id, address_scope_id=gw_address_scope) def _get_router_interfaces(self, context, router_id): port_filters = {'device_id': [router_id], 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} return self.get_ports(context, filters=port_filters) def _find_router_subnets_cidrs(self, context, router_id, subnets=None): """Retrieve cidrs of subnets attached to the specified router.""" if not subnets: subnets = self._load_router_subnet_cidrs_from_db(context, router_id) return [subnet['cidr'] for subnet in subnets] def _find_router_subnets_cidrs_per_addr_scope(self, context, router_id, subnets=None): """Generate a list of cidrs per address pool. Go over all the router interface subnets. return a list of lists of subnets cidrs belonging to same address pool. """ if not subnets: subnets = self._load_router_subnet_cidrs_from_db(context, router_id) cidrs_map = {} for subnet in subnets: ads = self._get_subnetpool_address_scope( context, subnet['subnetpool_id']) or '' if ads not in cidrs_map: cidrs_map[ads] = [] cidrs_map[ads].append(subnet['cidr']) return list(cidrs_map.values()) def _get_port_by_device_id(self, context, device_id, device_owner): """Retrieve ports associated with a specific device id. Used for retrieving all neutron ports attached to a given router. """ port_qry = context.session.query(models_v2.Port) return port_qry.filter_by( device_id=device_id, device_owner=device_owner,).all() def _update_filters_with_sec_group(self, context, filters=None): if filters is not None: security_groups = filters.pop("security_groups", None) if security_groups: bindings = ( super(NsxPluginBase, self) ._get_port_security_group_bindings(context, filters={'security_group_id': security_groups})) if 'id' in filters: filters['id'] = [entry['port_id'] for entry in bindings if entry['port_id'] in filters['id']] else: filters['id'] = [entry['port_id'] for entry in bindings] def _load_router_subnet_cidrs_from_db(self, context, router_id): """Retrieve subnets attached to the specified router.""" ports = self._get_port_by_device_id(context, router_id, l3_db.DEVICE_OWNER_ROUTER_INTF) # No need to check for overlapping CIDRs subnet_ids = [] for port in ports: for ip in port.get('fixed_ips', []): subnet_ids.append(ip.subnet_id) subnet_qry = context.session.query(models_v2.Subnet) db_subnets = subnet_qry.filter( models_v2.Subnet.id.in_(subnet_ids)).all() subnets = [{'id': subnet.id, 'cidr': subnet.cidr, 'subnetpool_id': subnet.subnetpool_id, 'ip_version': subnet.ip_version, 'network_id': subnet.network_id, 'gateway_ip': subnet.gateway_ip, 'enable_dhcp': subnet.enable_dhcp, 'ipv6_address_mode': subnet.ipv6_address_mode} for subnet in db_subnets] return subnets def _find_router_gw_subnets(self, context, router): """Retrieve external subnets attached to router GW""" if not router['external_gateway_info']: return [] subnets = [] for fip in router['external_gateway_info']['external_fixed_ips']: subnet = self.get_subnet(context, fip['subnet_id']) subnets.append(subnet) return subnets def recalculate_snat_rules_for_router(self, context, router, subnets): """Method to recalculate router snat rules for specific subnets. Invoked when subnetpool address scope changes. Implemented in child plugin classes """ pass def recalculate_fw_rules_for_router(self, context, router, subnets): """Method to recalculate router FW rules for specific subnets. Invoked when subnetpool address scope changes. Implemented in child plugin classes """ pass def _filter_subnets_by_subnetpool(self, subnets, subnetpool_id): return [subnet for subnet in subnets if subnet['subnetpool_id'] == subnetpool_id] def on_subnetpool_address_scope_updated(self, resource, event, trigger, payload=None): context = payload.context routers = self.get_routers(context) subnetpool_id = payload.resource_id elevated_context = context.elevated() LOG.info("Inspecting routers for potential configuration changes " "due to address scope change on subnetpool %s", subnetpool_id) for rtr in routers: subnets = self._load_router_subnet_cidrs_from_db(elevated_context, rtr['id']) gw_subnets = self._find_router_gw_subnets(elevated_context, rtr) affected_subnets = self._filter_subnets_by_subnetpool( subnets, subnetpool_id) affected_gw_subnets = self._filter_subnets_by_subnetpool( gw_subnets, subnetpool_id) if not affected_subnets and not affected_gw_subnets: # No subnets were affected by address scope change continue if (affected_subnets == subnets and affected_gw_subnets == gw_subnets): # All subnets remain under the same address scope # (all router subnets were allocated from subnetpool_id) continue # Update east-west FW rules self.recalculate_fw_rules_for_router(context, rtr, affected_subnets) if not rtr['external_gateway_info']: continue if not rtr['external_gateway_info']['enable_snat']: LOG.warning("Due to address scope change on subnetpool " "%(subnetpool)s, uniqueness on interface " "addresses on no-snat router %(router) is no " "longer guaranteed, which may result in faulty " "operation.", {'subnetpool': subnetpool_id, 'router': rtr['id']}) continue if affected_gw_subnets: # GW address scope have changed - we need to revisit snat # rules for all router interfaces affected_subnets = subnets self.recalculate_snat_rules_for_router(context, rtr, affected_subnets) def _validate_max_ips_per_port(self, fixed_ip_list, device_owner): """Validate the number of fixed ips on a port Do not allow multiple ip addresses on a port since the nsx backend cannot add multiple static dhcp bindings with the same port """ if (device_owner and nl_net_utils.is_port_trusted({'device_owner': device_owner})): return if validators.is_attr_set(fixed_ip_list) and len(fixed_ip_list) > 1: msg = _('Exceeded maximum amount of fixed ips per port') raise n_exc.InvalidInput(error_message=msg) def _extract_external_gw(self, context, router, is_extract=True): r = router['router'] gw_info = constants.ATTR_NOT_SPECIFIED # First extract the gateway info in case of updating # gateway before edge is deployed. if 'external_gateway_info' in r: gw_info = r.get('external_gateway_info', {}) if is_extract: del r['external_gateway_info'] network_id = (gw_info.get('network_id') if gw_info else None) if network_id: ext_net = self._get_network(context, network_id) if not ext_net.external: msg = (_("Network '%s' is not a valid external network") % network_id) raise n_exc.BadRequest(resource='router', msg=msg) subnets = self._get_subnets_by_network(context.elevated(), network_id) if not subnets: msg = _("Cannot update gateway on Network '%s' " "with no subnet") % network_id raise n_exc.BadRequest(resource='router', msg=msg) return gw_info def get_subnets_by_network(self, context, network_id): return [self._make_subnet_dict(subnet_obj) for subnet_obj in self._get_subnets_by_network(context.elevated(), network_id)] def _validate_routes(self, context, router_id, routes): super(NsxPluginBase, self)._validate_routes( context, router_id, routes) # do not allow adding a default route. NSX-v/v3 don't support it for route in routes: if (route.get('destination', '').startswith('0.0.0.0/') or route.get('destination', '').startswith('::/')): msg = _("Cannot set a default route using static routes") raise n_exc.BadRequest(resource='router', msg=msg) @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _extend_availability_zone_hints(net_res, net_db): net_res[az_def.AZ_HINTS] = az_validator.convert_az_string_to_list( net_db[az_def.AZ_HINTS]) def _validate_external_subnet(self, context, network_id): if self._network_is_external(context, network_id): err_msg = _("Can not enable DHCP on external network") raise n_exc.InvalidInput(error_message=err_msg) def _validate_host_routes_input(self, subnet_input, orig_enable_dhcp=None, orig_host_routes=None): s = subnet_input['subnet'] request_host_routes = (validators.is_attr_set(s.get('host_routes')) and s['host_routes']) clear_host_routes = (validators.is_attr_set(s.get('host_routes')) and not s['host_routes']) request_enable_dhcp = s.get('enable_dhcp') if request_enable_dhcp is False: if (request_host_routes or not clear_host_routes and orig_host_routes): err_msg = _("Can't disable DHCP while using host routes") raise n_exc.InvalidInput(error_message=err_msg) if request_host_routes: if not request_enable_dhcp and orig_enable_dhcp is False: err_msg = _("Host routes can only be supported when DHCP " "is enabled") raise n_exc.InvalidInput(error_message=err_msg) def _validate_qos_policy_id(self, context, qos_policy_id): if qos_policy_id: qos_com_utils.validate_policy_accessable(context, qos_policy_id) def _process_extra_attr_router_create(self, context, router_db, r): for extra_attr in l3_attrs_db.get_attr_info().keys(): if (extra_attr in r and validators.is_attr_set(r.get(extra_attr))): self.set_extra_attr_value(context, router_db, extra_attr, r[extra_attr]) def _ensure_default_security_group(self, context, tenant_id): try: return super(NsxPluginBase, self)._ensure_default_security_group( context, tenant_id) except exc.FlushError: # This means that another worker already created this default SG LOG.info("_ensure_default_security_group fail for project %s. " "Default security group already created", tenant_id) return self._get_default_sg_id(context, tenant_id) def _assert_on_assoc_floatingip_to_special_ports(self, fip, internal_port): """Do not allow attaching fip to dedicated ports""" port_id = fip.get('port_id') dev_owner = internal_port.get('device_owner', '') if (port_id and dev_owner and (dev_owner in constants.ROUTER_INTERFACE_OWNERS or dev_owner == constants.DEVICE_OWNER_DHCP)): msg = _('Associating floatingip to %s port is ' 'restricted') % dev_owner raise n_exc.BadRequest(resource='floatingip', msg=msg) def _fix_sg_rule_dict_ips(self, sg_rule): # 0.0.0.0/# and ::/ are not valid entries for local and remote so we # need to change this to None if (sg_rule.get('remote_ip_prefix') and (sg_rule['remote_ip_prefix'].startswith('0.0.0.0/') or sg_rule['remote_ip_prefix'].startswith('::/'))): sg_rule['remote_ip_prefix'] = None if (sg_rule.get(sg_prefix.LOCAL_IP_PREFIX) and validators.is_attr_set(sg_rule[sg_prefix.LOCAL_IP_PREFIX]) and (sg_rule[sg_prefix.LOCAL_IP_PREFIX].startswith('0.0.0.0/') or sg_rule[sg_prefix.LOCAL_IP_PREFIX].startswith('::/'))): sg_rule[sg_prefix.LOCAL_IP_PREFIX] = None def _log_get_ports(self, ports, filters): if len(ports) > 0: LOG.debug("Getting %(num)s port%(plural)s with %(filters)s", {'num': len(ports), 'plural': 's' if len(ports) > 1 else '', 'filters': ('filters ' + str(filters) if filters else 'no filters')}) def get_housekeeper(self, context, name, fields=None): # run the job in readonly mode and get the results self.housekeeper.run(context, name, readonly=True) return self.housekeeper.get(name) if self.housekeeper else None def get_housekeepers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): return self.housekeeper.list() if self.housekeeper else [] def update_housekeeper(self, context, name, housekeeper): # run the job in non-readonly mode and get the results if not self.housekeeper.readwrite_allowed(name): err_msg = (_("Can not run housekeeper job %s in readwrite " "mode") % name) raise n_exc.InvalidInput(error_message=err_msg) self.housekeeper.run(context, name, readonly=False) return self.housekeeper.get(name) def get_housekeepers_count(self, context, filters=None): return len(self.housekeeper.list()) if self.housekeeper else 0 # Register the callback def _validate_network_has_subnet(resource, event, trigger, payload=None): network_id = payload.metadata.get('network_id') subnets = payload.metadata.get('subnets') if not subnets: msg = _('No subnet defined on network %s') % network_id raise n_exc.InvalidInput(error_message=msg) def _delete_sg_group_related_rules(resource, event, trigger, **kwargs): """Upon SG deletion, call the explicit delete method for rules with that SG as the remote one. Otherwise those will be deleted with on_delete cascade, leaving the NSX backend unaware. """ sg_id = kwargs["security_group"]["id"] context = kwargs["context"] core_plugin = directory.get_plugin() filters = {'remote_group_id': [sg_id]} rules = core_plugin.get_security_group_rules(context, filters=filters) for rule in rules: if rule['security_group_id'] == sg_id: continue LOG.info("Deleting SG rule %s because of remote group %s deletion", rule['id'], sg_id) core_plugin.delete_security_group_rule(context, rule["id"]) def subscribe(): registry.subscribe(_validate_network_has_subnet, resources.ROUTER_GATEWAY, events.BEFORE_CREATE) registry.subscribe(_delete_sg_group_related_rules, resources.SECURITY_GROUP, events.PRECOMMIT_DELETE) subscribe() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1982536 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/common_v3/0000755000175000017500000000000000000000000022751 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/common_v3/__init__.py0000644000175000017500000000000000000000000025050 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/common_v3/availability_zones.py0000644000175000017500000001235400000000000027220 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsx._i18n import _ from vmware_nsx.common import availability_zones as common_az from vmware_nsx.common import exceptions as nsx_exc class NsxV3AvailabilityZone(common_az.ConfiguredAvailabilityZone): def init_from_config_line(self, config_line): # Not supported for nsx_v3 (old configuration) raise nsx_exc.NsxInvalidConfiguration( opt_name="availability_zones", opt_value=config_line, reason=_("Expected a list of names")) def _has_native_dhcp_metadata(self): # May be overridden by children return True def get_az_opts(self): # Should be implemented by children pass def init_from_config_section(self, az_name, mandatory_dhcp=True): az_info = self.get_az_opts() if self._has_native_dhcp_metadata(): # The optional parameters will get the global values if not # defined for this AZ self.metadata_proxy = az_info.get('metadata_proxy') if not self.metadata_proxy: raise nsx_exc.NsxInvalidConfiguration( opt_name="metadata_proxy", opt_value='None', reason=(_("metadata_proxy for availability zone %s " "must be defined") % az_name)) # This is mandatory only if using MP dhcp self.dhcp_profile = az_info.get('dhcp_profile') if not self.dhcp_profile and mandatory_dhcp: raise nsx_exc.NsxInvalidConfiguration( opt_name="dhcp_profile", opt_value='None', reason=(_("dhcp_profile for availability zone %s " "must be defined") % az_name)) native_metadata_route = az_info.get('native_metadata_route') if native_metadata_route: self.native_metadata_route = native_metadata_route else: self.metadata_proxy = None self.dhcp_profile = None self.native_metadata_route = None default_overlay_tz = az_info.get('default_overlay_tz') if default_overlay_tz: self.default_overlay_tz = default_overlay_tz default_vlan_tz = az_info.get('default_vlan_tz') if default_vlan_tz: self.default_vlan_tz = default_vlan_tz default_tier0_router = az_info.get('default_tier0_router') if default_tier0_router: self.default_tier0_router = default_tier0_router dns_domain = az_info.get('dns_domain') if dns_domain: self.dns_domain = dns_domain nameservers = az_info.get('nameservers') if nameservers: self.nameservers = nameservers edge_cluster = az_info.get('edge_cluster') if edge_cluster: self.edge_cluster = edge_cluster def init_defaults(self): # Should be implemented by children pass def _translate_dhcp_profile(self, nsxlib, search_scope=None): if self.dhcp_profile: dhcp_id = None if search_scope: # Find the TZ by its tag dhcp_id = nsxlib.get_id_by_resource_and_tag( nsxlib.native_dhcp_profile.resource_type, search_scope, self.dhcp_profile) if not dhcp_id: dhcp_id = nsxlib.native_dhcp_profile.get_id_by_name_or_id( self.dhcp_profile) self._native_dhcp_profile_uuid = dhcp_id else: self._native_dhcp_profile_uuid = None def _translate_metadata_proxy(self, nsxlib, search_scope=None): if self.metadata_proxy: proxy_id = None if search_scope: # Find the TZ by its tag proxy_id = nsxlib.get_id_by_resource_and_tag( nsxlib.native_md_proxy.resource_type, search_scope, self.metadata_proxy) if not proxy_id: proxy_id = nsxlib.native_md_proxy.get_id_by_name_or_id( self.metadata_proxy) self._native_md_proxy_uuid = proxy_id else: self._native_md_proxy_uuid = None def translate_configured_names_to_uuids(self, nsxlib): # May be overridden by children # Default implementation assumes UUID is provided in config self._default_overlay_tz_uuid = self.default_overlay_tz self._default_vlan_tz_uuid = self.default_vlan_tz self._default_tier0_router = self.default_tier0_router self._native_dhcp_profile_uuid = self.dhcp_profile self._native_md_proxy_uuid = self.metadata_proxy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/common_v3/plugin.py0000644000175000017500000042377000000000000024636 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import decorator import mock import netaddr from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging from oslo_utils import excutils from sqlalchemy import exc as sql_exc import webob.exc from six import moves from six import string_types from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db.availability_zone import router as router_az_db from neutron.db import db_base_plugin_v2 from neutron.db import dns_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db from neutron.db import extraroute_db from neutron.db import l3_attrs_db from neutron.db import l3_db from neutron.db import l3_gwmode_db from neutron.db.models import securitygroup as securitygroup_model from neutron.db import models_v2 from neutron.db import portbindings_db from neutron.db import portsecurity_db from neutron.db import securitygroups_db from neutron.db import vlantransparent_db from neutron.extensions import securitygroup as ext_sg from neutron.extensions import tagging from neutron_lib.agent import topics from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import extra_dhcp_opt as ext_edo from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings as pbin from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api import faults from neutron_lib.api import validators from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import allowedaddresspairs as addr_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.exceptions import port_security as psec_exc from neutron_lib.plugins import directory from neutron_lib.plugins import utils as plugin_utils from neutron_lib import rpc as n_rpc from neutron_lib.services.qos import constants as qos_consts from neutron_lib.utils import helpers from neutron_lib.utils import net as nl_net_utils from vmware_nsx.api_replay import utils as api_replay_utils from vmware_nsx.common import availability_zones as nsx_com_az from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.common import nsx_constants from vmware_nsx.common import utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import extended_security_group as extended_sec from vmware_nsx.db import extended_security_group_rule as extend_sg_rule from vmware_nsx.db import maclearning as mac_db from vmware_nsx.db import nsx_portbindings_db as pbin_db from vmware_nsx.extensions import advancedserviceproviders as as_providers from vmware_nsx.extensions import maclearning as mac_ext from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.plugins.common import plugin from vmware_nsx.services.qos.common import utils as qos_com_utils from vmware_nsx.services.vpnaas.common_v3 import ipsec_utils from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts from vmware_nsxlib.v3 import utils as nsxlib_utils LOG = logging.getLogger(__name__) @decorator.decorator def api_replay_mode_wrapper(f, *args, **kwargs): if cfg.CONF.api_replay_mode: # NOTE(arosen): the mock.patch here is needed for api_replay_mode with mock.patch("neutron_lib.plugins.utils._fixup_res_dict", side_effect=api_replay_utils._fixup_res_dict): return f(*args, **kwargs) else: return f(*args, **kwargs) # NOTE(asarfaty): the order of inheritance here is important. in order for the # QoS notification to work, the AgentScheduler init must be called first # NOTE(arosen): same is true with the ExtendedSecurityGroupPropertiesMixin # this needs to be above securitygroups_db.SecurityGroupDbMixin. # FIXME(arosen): we can solve this inheritance order issue by just mixining in # the classes into a new class to handle the order correctly. class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin, addr_pair_db.AllowedAddressPairsMixin, plugin.NsxPluginBase, extended_sec.ExtendedSecurityGroupPropertiesMixin, pbin_db.NsxPortBindingMixin, extend_sg_rule.ExtendedSecurityGroupRuleMixin, securitygroups_db.SecurityGroupDbMixin, external_net_db.External_net_db_mixin, extraroute_db.ExtraRoute_db_mixin, router_az_db.RouterAvailabilityZoneMixin, l3_gwmode_db.L3_NAT_db_mixin, portbindings_db.PortBindingMixin, portsecurity_db.PortSecurityDbMixin, extradhcpopt_db.ExtraDhcpOptMixin, dns_db.DNSDbMixin, vlantransparent_db.Vlantransparent_db_mixin, mac_db.MacLearningDbMixin, l3_attrs_db.ExtraAttributesMixin, nsx_com_az.NSXAvailabilityZonesPluginCommon): """Common methods for NSX-V3 plugins (NSX-V3 & Policy)""" def __init__(self): super(NsxPluginV3Base, self).__init__() self._network_vlans = plugin_utils.parse_network_vlan_ranges( self._get_conf_attr('network_vlan_ranges')) self._native_dhcp_enabled = False self.start_rpc_listeners_called = False def _init_native_dhcp(self): if not self.nsxlib: self._native_dhcp_enabled = False return self._native_dhcp_enabled = True for az in self.get_azs_list(): if not az._native_dhcp_profile_uuid: LOG.error("Unable to retrieve DHCP Profile %s for " "availability zone %s, " "native DHCP service is not supported", az.name, az.dhcp_profile) self._native_dhcp_enabled = False def _init_native_metadata(self): for az in self.get_azs_list(): if not az._native_md_proxy_uuid: LOG.error("Unable to retrieve Metadata Proxy %s for " "availability zone %s, " "native metadata service is not supported", az.name, az.metadata_proxy) def _extend_fault_map(self): """Extends the Neutron Fault Map. Exceptions specific to the NSX Plugin are mapped to standard HTTP Exceptions. """ faults.FAULT_MAP.update({nsx_lib_exc.InvalidInput: webob.exc.HTTPBadRequest, nsx_lib_exc.ServiceClusterUnavailable: webob.exc.HTTPServiceUnavailable, nsx_lib_exc.ClientCertificateNotTrusted: webob.exc.HTTPBadRequest, nsx_exc.SecurityGroupMaximumCapacityReached: webob.exc.HTTPBadRequest, nsx_lib_exc.NsxLibInvalidInput: webob.exc.HTTPBadRequest, nsx_exc.NsxENSPortSecurity: webob.exc.HTTPBadRequest, nsx_exc.NsxPluginTemporaryError: webob.exc.HTTPServiceUnavailable, nsx_lib_exc.TooManyRequests: webob.exc.HTTPServiceUnavailable }) def _get_conf_attr(self, attr): plugin_cfg = getattr(cfg.CONF, self.cfg_group) return getattr(plugin_cfg, attr) def _setup_rpc(self): """Should be implemented by each plugin""" pass @property def support_external_port_tagging(self): # oslo_messaging_notifications must be defined for this to work if (cfg.CONF.oslo_messaging_notifications.driver and self._get_conf_attr('support_nsx_port_tagging')): return True return False def update_port_nsx_tags(self, context, port_id, tags, is_delete=False): """Can be implemented by each plugin to update the backend port tags""" pass def start_rpc_listeners(self): if self.start_rpc_listeners_called: # If called more than once - we should not create it again return self.conn.consume_in_threads() self._setup_rpc() self.topic = topics.PLUGIN self.conn = n_rpc.Connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) self.conn.create_consumer(topics.REPORTS, [agents_db.AgentExtRpcCallback()], fanout=False) self.start_rpc_listeners_called = True if self.support_external_port_tagging: self.start_tagging_rpc_listener() return self.conn.consume_in_threads() def start_tagging_rpc_listener(self): # Add listener for tags plugin notifications transport = oslo_messaging.get_notification_transport(cfg.CONF) targets = [oslo_messaging.Target( topic=cfg.CONF.oslo_messaging_notifications.topics[0])] endpoints = [TagsCallbacks()] pool = "tags-listeners" server = oslo_messaging.get_notification_listener(transport, targets, endpoints, pool=pool) server.start() server.wait() def _translate_external_tag(self, external_tag, port_id): tag_parts = external_tag.split(':') if len(tag_parts) != 2: LOG.warning("Skipping tag %s for port %s: wrong format", external_tag, port_id) else: return {'scope': tag_parts[0][:nsxlib_utils.MAX_RESOURCE_TYPE_LEN], 'tag': tag_parts[1][:nsxlib_utils.MAX_TAG_LEN]} def _translate_external_tags(self, external_tags, port_id): new_tags = [] for tag in external_tags: new_tag = self._translate_external_tag(tag, port_id) if new_tag: new_tags.append(new_tag) return new_tags def get_external_tags_for_port(self, context, port_id): tags_plugin = directory.get_plugin(tagging.TAG_PLUGIN_TYPE) if tags_plugin: extra_tags = tags_plugin.get_tags(context, 'ports', port_id) return self._translate_external_tags(extra_tags['tags'], port_id) def _get_interface_subnet(self, context, interface_info): is_port, is_sub = self._validate_interface_info(interface_info) subnet_id = None if is_sub: subnet_id = interface_info.get('subnet_id') if not subnet_id: port_id = interface_info['port_id'] port = self.get_port(context, port_id) if 'fixed_ips' in port and port['fixed_ips']: if len(port['fixed_ips']) > 1: # This should never happen since router interface is per # IP version, and we allow single fixed ip per ip version return subnet_id = port['fixed_ips'][0]['subnet_id'] if subnet_id: return self.get_subnet(context, subnet_id) def _get_interface_network_id(self, context, interface_info, subnet=None): if subnet: return subnet['network_id'] is_port, is_sub = self._validate_interface_info(interface_info) if is_port: net_id = self.get_port(context, interface_info['port_id'])['network_id'] elif is_sub: net_id = self.get_subnet(context, interface_info['subnet_id'])['network_id'] return net_id def _validate_interface_address_scope(self, context, router_db, interface_subnet): gw_network_id = (router_db.gw_port.network_id if router_db.gw_port else None) if not router_db.enable_snat and gw_network_id: self._validate_address_scope_for_router_interface( context.elevated(), router_db.id, gw_network_id, interface_subnet['id'], subnet=interface_subnet) def _validate_address_pairs(self, address_pairs): for pair in address_pairs: ip = pair.get('ip_address') # Validate ipv4 cidrs (No limitation on ipv6): if ':' not in ip: if len(ip.split('/')) > 1 and ip.split('/')[1] != '32': LOG.error("cidr %s is not supported in allowed address " "pairs", ip) raise nsx_exc.InvalidIPAddress(ip_address=ip) def _validate_number_of_address_pairs(self, port): address_pairs = port.get(addr_apidef.ADDRESS_PAIRS) num_allowed_on_backend = nsxlib_consts.NUM_ALLOWED_IP_ADDRESSES # Counting existing ports to take into account. If no fixed ips # are defined - we set it to 3 in order to reserve 2 fixed and another # for DHCP. existing_fixed_ips = len(port.get('fixed_ips', [])) if existing_fixed_ips == 0: existing_fixed_ips = 3 else: existing_fixed_ips += 1 if address_pairs: max_addr_pairs = num_allowed_on_backend - existing_fixed_ips if len(address_pairs) > max_addr_pairs: err_msg = (_("Maximum of %(max)s address pairs can be defined " "for this port on the NSX backend") % {'max': max_addr_pairs}) raise n_exc.InvalidInput(error_message=err_msg) def _create_port_address_pairs(self, context, port_data): (port_security, has_ip) = self._determine_port_security_and_has_ip( context, port_data) address_pairs = port_data.get(addr_apidef.ADDRESS_PAIRS) if validators.is_attr_set(address_pairs): if not port_security: raise addr_exc.AddressPairAndPortSecurityRequired() else: self._validate_address_pairs(address_pairs) self._validate_number_of_address_pairs(port_data) self._process_create_allowed_address_pairs(context, port_data, address_pairs) else: port_data[addr_apidef.ADDRESS_PAIRS] = [] def _provider_sgs_specified(self, port_data): # checks if security groups were updated adding/modifying # security groups, port security is set and port has ip provider_sgs_specified = (validators.is_attr_set( port_data.get(provider_sg.PROVIDER_SECURITYGROUPS)) and port_data.get(provider_sg.PROVIDER_SECURITYGROUPS) != []) return provider_sgs_specified def _create_port_preprocess_security( self, context, port, port_data, neutron_db, is_ens_tz_port): (port_security, has_ip) = self._determine_port_security_and_has_ip( context, port_data) port_data[psec.PORTSECURITY] = port_security # No port security is allowed if the port belongs to an ENS TZ if (port_security and is_ens_tz_port and not self._ens_psec_supported()): raise nsx_exc.NsxENSPortSecurity() self._process_port_port_security_create( context, port_data, neutron_db) # allowed address pair checks self._create_port_address_pairs(context, port_data) if port_security and has_ip: self._ensure_default_security_group_on_port(context, port) (sgids, psgids) = self._get_port_security_groups_lists( context, port) elif (self._check_update_has_security_groups({'port': port_data}) or self._provider_sgs_specified(port_data) or self._get_provider_security_groups_on_port(context, port)): LOG.error("Port has conflicting port security status and " "security groups") raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() else: sgids = psgids = [] port_data[ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) return port_security, has_ip, sgids, psgids def _should_validate_port_sec_on_update_port(self, port_data): # Need to determine if we skip validations for port security. # This is the edge case when the subnet is deleted. # This should be called prior to deleting the fixed ip from the # port data for fixed_ip in port_data.get('fixed_ips', []): if 'delete_subnet' in fixed_ip: return False return True def _update_port_preprocess_security( self, context, port, id, updated_port, is_ens_tz_port, validate_port_sec=True, direct_vnic_type=False): delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( port) has_addr_pairs = self._check_update_has_allowed_address_pairs(port) has_security_groups = self._check_update_has_security_groups(port) delete_security_groups = self._check_update_deletes_security_groups( port) # populate port_security setting port_data = port['port'] if psec.PORTSECURITY not in port_data: updated_port[psec.PORTSECURITY] = \ self._get_port_security_binding(context, id) has_ip = self._ip_on_port(updated_port) # validate port security and allowed address pairs if not updated_port[psec.PORTSECURITY]: # has address pairs in request if has_addr_pairs: raise addr_exc.AddressPairAndPortSecurityRequired() elif not delete_addr_pairs: # check if address pairs are in db updated_port[addr_apidef.ADDRESS_PAIRS] = ( self.get_allowed_address_pairs(context, id)) if updated_port[addr_apidef.ADDRESS_PAIRS]: raise addr_exc.AddressPairAndPortSecurityRequired() if delete_addr_pairs or has_addr_pairs: self._validate_address_pairs( updated_port[addr_apidef.ADDRESS_PAIRS]) # delete address pairs and read them in self._delete_allowed_address_pairs(context, id) self._process_create_allowed_address_pairs( context, updated_port, updated_port[addr_apidef.ADDRESS_PAIRS]) if updated_port[psec.PORTSECURITY] and psec.PORTSECURITY in port_data: # No port security is allowed if the port belongs to an ENS TZ if is_ens_tz_port and not self._ens_psec_supported(): raise nsx_exc.NsxENSPortSecurity() # No port security is allowed if the port has a direct vnic type if direct_vnic_type: err_msg = _("Security features are not supported for " "ports with direct/direct-physical VNIC type") raise n_exc.InvalidInput(error_message=err_msg) # checks if security groups were updated adding/modifying # security groups, port security is set and port has ip provider_sgs_specified = self._provider_sgs_specified(updated_port) if (validate_port_sec and not (has_ip and updated_port[psec.PORTSECURITY])): if has_security_groups or provider_sgs_specified: LOG.error("Port has conflicting port security status and " "security groups") raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() # Update did not have security groups passed in. Check # that port does not have any security groups already on it. filters = {'port_id': [id]} security_groups = ( super(NsxPluginV3Base, self)._get_port_security_group_bindings( context, filters) ) if security_groups and not delete_security_groups: raise psec_exc.PortSecurityPortHasSecurityGroup() if delete_security_groups or has_security_groups: # delete the port binding and read it with the new rules. self._delete_port_security_group_bindings(context, id) sgids = self._get_security_groups_on_port(context, port) self._process_port_create_security_group(context, updated_port, sgids) if psec.PORTSECURITY in port['port']: self._process_port_port_security_update( context, port['port'], updated_port) return updated_port def _validate_create_network(self, context, net_data): """Validate the parameters of the new network to be created This method includes general validations that does not depend on provider attributes, or plugin specific configurations """ external = net_data.get(extnet_apidef.EXTERNAL) is_external_net = validators.is_attr_set(external) and external with_qos = validators.is_attr_set( net_data.get(qos_consts.QOS_POLICY_ID)) if with_qos: self._validate_qos_policy_id( context, net_data.get(qos_consts.QOS_POLICY_ID)) if is_external_net: raise nsx_exc.QoSOnExternalNet() def _validate_update_network(self, context, net_id, original_net, net_data): """Validate the updated parameters of a network This method includes general validations that does not depend on provider attributes, or plugin specific configurations """ extern_net = self._network_is_external(context, net_id) with_qos = validators.is_attr_set( net_data.get(qos_consts.QOS_POLICY_ID)) # Do not allow QoS on external networks if with_qos: if extern_net: raise nsx_exc.QoSOnExternalNet() self._validate_qos_policy_id( context, net_data.get(qos_consts.QOS_POLICY_ID)) # Do not support changing external/non-external networks if (extnet_apidef.EXTERNAL in net_data and net_data[extnet_apidef.EXTERNAL] != extern_net): err_msg = _("Cannot change the router:external flag of a network") raise n_exc.InvalidInput(error_message=err_msg) is_ens_net = self._is_ens_tz_net(context, net_id) if is_ens_net: self._assert_on_ens_with_qos(net_data) def _assert_on_illegal_port_with_qos(self, device_owner): # Prevent creating/update port with QoS policy # on router-interface/network-dhcp ports. if ((device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF or device_owner == constants.DEVICE_OWNER_DHCP)): err_msg = _("Unable to create or update %s port with a QoS " "policy") % device_owner LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _assert_on_external_net_with_compute(self, port_data): # Prevent creating port with device owner prefix 'compute' # on external networks. device_owner = port_data.get('device_owner') if (device_owner is not None and device_owner.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX)): err_msg = _("Unable to update/create a port with an external " "network") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _validate_ens_create_port(self, context, port_data): if self._ens_qos_supported(): return qos_selected = validators.is_attr_set(port_data.get( qos_consts.QOS_POLICY_ID)) if qos_selected: err_msg = _("Cannot configure QOS on ENS networks") raise n_exc.InvalidInput(error_message=err_msg) def _assert_on_port_admin_state(self, port_data, device_owner): """Do not allow changing the admin state of some ports""" if (device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF or device_owner == l3_db.DEVICE_OWNER_ROUTER_GW): if port_data.get("admin_state_up") is False: err_msg = _("admin_state_up=False router ports are not " "supported") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _validate_max_ips_per_port(self, context, fixed_ip_list, device_owner): """Validate the number of fixed ips on a port Do not allow multiple ip addresses on a port since the nsx backend cannot add multiple static dhcp bindings with the same port """ if (device_owner and nl_net_utils.is_port_trusted({'device_owner': device_owner})): return if not validators.is_attr_set(fixed_ip_list): return msg = _('Exceeded maximum amount of fixed ips per port and ip version') if len(fixed_ip_list) > 2: raise n_exc.InvalidInput(error_message=msg) if len(fixed_ip_list) < 2: return def get_fixed_ip_version(i): if 'ip_address' in fixed_ip_list[i]: return netaddr.IPAddress( fixed_ip_list[i]['ip_address']).version if 'subnet_id' in fixed_ip_list[i]: subnet = self.get_subnet(context.elevated(), fixed_ip_list[i]['subnet_id']) return subnet['ip_version'] ipver1 = get_fixed_ip_version(0) ipver2 = get_fixed_ip_version(1) if ipver1 and ipver2 and ipver1 != ipver2: # One fixed IP is allowed for each IP version return raise n_exc.InvalidInput(error_message=msg) def _get_subnets_for_fixed_ips_on_port(self, context, port_data): # get the subnet id from the fixed ips of the port if 'fixed_ips' in port_data and port_data['fixed_ips']: subnet_ids = (fixed_ip['subnet_id'] for fixed_ip in port_data['fixed_ips']) return (self._get_subnet(context.elevated(), subnet_id) for subnet_id in subnet_ids) return [] def _validate_create_port(self, context, port_data): self._validate_max_ips_per_port(context, port_data.get('fixed_ips', []), port_data.get('device_owner')) is_external_net = self._network_is_external( context, port_data['network_id']) qos_selected = validators.is_attr_set(port_data.get( qos_consts.QOS_POLICY_ID)) device_owner = port_data.get('device_owner') # QoS validations if qos_selected: self._validate_qos_policy_id( context, port_data.get(qos_consts.QOS_POLICY_ID)) self._assert_on_illegal_port_with_qos(device_owner) if is_external_net: raise nsx_exc.QoSOnExternalNet() is_ens_tz_port = self._is_ens_tz_port(context, port_data) if is_ens_tz_port: self._validate_ens_create_port(context, port_data) # External network validations: if is_external_net: self._assert_on_external_net_with_compute(port_data) self._assert_on_port_admin_state(port_data, device_owner) self._validate_extra_dhcp_options(port_data.get(ext_edo.EXTRADHCPOPTS)) def _assert_on_vpn_port_change(self, port_data): if port_data['device_owner'] == ipsec_utils.VPN_PORT_OWNER: msg = _('Can not update/delete VPNaaS port %s') % port_data['id'] raise n_exc.InvalidInput(error_message=msg) def _assert_on_lb_port_fixed_ip_change(self, port_data, orig_dev_own): if orig_dev_own == constants.DEVICE_OWNER_LOADBALANCERV2: if "fixed_ips" in port_data and port_data["fixed_ips"]: msg = _('Can not update Loadbalancer port with fixed IP') raise n_exc.InvalidInput(error_message=msg) def _assert_on_device_owner_change(self, port_data, orig_dev_own): """Prevent illegal device owner modifications """ if orig_dev_own == constants.DEVICE_OWNER_LOADBALANCERV2: if ("allowed_address_pairs" in port_data and port_data["allowed_address_pairs"]): msg = _('Loadbalancer port can not be updated ' 'with address pairs') raise n_exc.InvalidInput(error_message=msg) if 'device_owner' not in port_data: return new_dev_own = port_data['device_owner'] if new_dev_own == orig_dev_own: return err_msg = (_("Changing port device owner '%(orig)s' to '%(new)s' is " "not allowed") % {'orig': orig_dev_own, 'new': new_dev_own}) # Do not allow changing nova <-> neutron device owners if ((orig_dev_own.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX) and new_dev_own.startswith(constants.DEVICE_OWNER_NETWORK_PREFIX)) or (orig_dev_own.startswith(constants.DEVICE_OWNER_NETWORK_PREFIX) and new_dev_own.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX))): raise n_exc.InvalidInput(error_message=err_msg) # Do not allow removing the device owner in some cases if orig_dev_own == constants.DEVICE_OWNER_DHCP: raise n_exc.InvalidInput(error_message=err_msg) def _assert_on_port_sec_change(self, port_data, device_owner): """Do not allow enabling port security/mac learning of some ports Trusted ports are created with port security and mac learning disabled in neutron, and it should not change. """ if nl_net_utils.is_port_trusted({'device_owner': device_owner}): if port_data.get(psec.PORTSECURITY) is True: err_msg = _("port_security_enabled=True is not supported for " "trusted ports") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) mac_learning = port_data.get(mac_ext.MAC_LEARNING) if (validators.is_attr_set(mac_learning) and mac_learning is True): err_msg = _("mac_learning_enabled=True is not supported for " "trusted ports") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _validate_update_port(self, context, id, original_port, port_data): qos_selected = validators.is_attr_set(port_data.get (qos_consts.QOS_POLICY_ID)) is_external_net = self._network_is_external( context, original_port['network_id']) device_owner = (port_data['device_owner'] if 'device_owner' in port_data else original_port.get('device_owner')) # QoS validations if qos_selected: self._validate_qos_policy_id( context, port_data.get(qos_consts.QOS_POLICY_ID)) if is_external_net: raise nsx_exc.QoSOnExternalNet() self._assert_on_illegal_port_with_qos(device_owner) is_ens_tz_port = self._is_ens_tz_port(context, original_port) if is_ens_tz_port and not self._ens_qos_supported(): err_msg = _("Cannot configure QOS on ENS networks") raise n_exc.InvalidInput(error_message=err_msg) # External networks validations: if is_external_net: self._assert_on_external_net_with_compute(port_data) # Device owner validations: orig_dev_owner = original_port.get('device_owner') self._assert_on_device_owner_change(port_data, orig_dev_owner) self._assert_on_port_admin_state(port_data, device_owner) self._assert_on_port_sec_change(port_data, device_owner) self._validate_max_ips_per_port(context, port_data.get('fixed_ips', []), device_owner) self._validate_number_of_address_pairs(port_data) self._assert_on_vpn_port_change(original_port) self._assert_on_lb_port_fixed_ip_change(port_data, orig_dev_owner) self._validate_extra_dhcp_options(port_data.get(ext_edo.EXTRADHCPOPTS)) def _get_dhcp_port_name(self, net_name, net_id): return utils.get_name_and_uuid('%s-%s' % ('dhcp', net_name or 'network'), net_id) def _build_port_name(self, context, port_data): device_owner = port_data.get('device_owner') device_id = port_data.get('device_id') if device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF and device_id: router = self._get_router(context, device_id) name = utils.get_name_and_uuid( router['name'] or 'router', port_data['id'], tag='port') elif device_owner == constants.DEVICE_OWNER_DHCP: network = self.get_network(context, port_data['network_id']) name = self._get_dhcp_port_name(network['name'], network['id']) elif device_owner.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX): name = utils.get_name_and_uuid( port_data['name'] or 'instance-port', port_data['id']) else: name = port_data['name'] return name def _validate_external_net_create(self, net_data, default_tier0_router, tier0_validator=None): """Validate external network configuration Returns a tuple of: - Boolean is provider network (always True) - Network type (always L3_EXT) - tier 0 router id - vlan id """ if not validators.is_attr_set(net_data.get(pnet.PHYSICAL_NETWORK)): tier0_uuid = default_tier0_router else: tier0_uuid = net_data[pnet.PHYSICAL_NETWORK] if ((validators.is_attr_set(net_data.get(pnet.NETWORK_TYPE)) and net_data.get(pnet.NETWORK_TYPE) != utils.NetworkTypes.L3_EXT and net_data.get(pnet.NETWORK_TYPE) != utils.NetworkTypes.LOCAL) or validators.is_attr_set(net_data.get(pnet.SEGMENTATION_ID))): msg = (_("External network cannot be created with %s provider " "network or segmentation id") % net_data.get(pnet.NETWORK_TYPE)) raise n_exc.InvalidInput(error_message=msg) if tier0_validator: tier0_validator(tier0_uuid) return (True, utils.NetworkTypes.L3_EXT, tier0_uuid, 0) def _extend_network_dict_provider(self, context, network, bindings=None): """Add network provider fields to the network dict from the DB""" if 'id' not in network: return if not bindings: bindings = nsx_db.get_network_bindings(context.session, network['id']) # With NSX plugin, "normal" overlay networks will have no binding if bindings: # Network came in through provider networks API network[pnet.NETWORK_TYPE] = bindings[0].binding_type network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id def _extend_get_network_dict_provider(self, context, network): self._extend_network_dict_provider(context, network) network[qos_consts.QOS_POLICY_ID] = (qos_com_utils. get_network_policy_id(context, network['id'])) def _translate_net_db_2_dict(self, context, net_db): net_dict = self._make_network_dict(net_db, context=context) self._extend_get_network_dict_provider(context, net_dict) return net_dict def get_network(self, context, id, fields=None): with db_api.CONTEXT_READER.using(context): # Get network from Neutron database network = self._get_network(context, id) # Don't do field selection here otherwise we won't be able to add # provider networks fields net = self._translate_net_db_2_dict(context, network) return db_utils.resource_fields(net, fields) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Get networks from Neutron database filters = filters or {} with db_api.CONTEXT_READER.using(context): networks = super(NsxPluginV3Base, self).get_networks( context, filters, fields, sorts, limit, marker, page_reverse) # Add provider network fields for net in networks: self._extend_get_network_dict_provider(context, net) return (networks if not fields else [db_utils.resource_fields(network, fields) for network in networks]) def _assert_on_ens_with_qos(self, net_data): if self._ens_qos_supported(): return qos_id = net_data.get(qos_consts.QOS_POLICY_ID) if validators.is_attr_set(qos_id): err_msg = _("Cannot configure QOS on ENS networks") raise n_exc.InvalidInput(error_message=err_msg) def _get_port_qos_policy_id(self, context, original_port, updated_port): """Return the QoS policy Id of a port that is being created/updated Return the QoS policy assigned directly to the port (after update or originally), or the policy of the network, if it is a compute port that should inherit it. original_port: the neutron port data before this update (or None in a case of a new port creation) updated_ports: the modified fields of this port (or all th attributes of the new port) """ orig_compute = False if original_port: orig_compute = original_port.get('device_owner', '').startswith( constants.DEVICE_OWNER_COMPUTE_PREFIX) updated_compute = updated_port.get('device_owner', '').startswith( constants.DEVICE_OWNER_COMPUTE_PREFIX) is_new_compute = updated_compute and not orig_compute qos_policy_id = None if qos_consts.QOS_POLICY_ID in updated_port: qos_policy_id = updated_port[qos_consts.QOS_POLICY_ID] elif original_port: # Look for the original QoS policy of this port qos_policy_id = qos_com_utils.get_port_policy_id( context, original_port['id']) # If the port is now a 'compute' port (attached to a vm) and # Qos policy was not configured on the port directly, # try to take it from the ports network if qos_policy_id is None and is_new_compute: # check if the network of this port has a policy net_id = (original_port.get('network_id') if original_port else updated_port.get('network_id')) qos_policy_id = qos_com_utils.get_network_policy_id( context, net_id) return qos_policy_id def _ens_psec_supported(self): """Should be implemented by each plugin""" pass def _ens_qos_supported(self): """Should be implemented by each plugin""" pass def _has_native_dhcp_metadata(self): """Should be implemented by each plugin""" pass def _get_nsx_net_tz_id(self, nsx_net): """Should be implemented by each plugin""" pass def _get_network_nsx_id(self, context, neutron_id): """Should be implemented by each plugin""" pass def _get_tier0_uplink_cidrs(self, tier0_id): """Should be implemented by each plugin""" pass def _validate_ens_net_portsecurity(self, net_data): """Validate/Update the port security of the new network for ENS TZ Should be implemented by the plugin if necessary """ pass def _is_ens_tz_net(self, context, net_id): """Return True if the network is based on an END transport zone""" tz_id = self._get_net_tz(context, net_id) if tz_id: # Check the mode of this TZ return self._is_ens_tz(tz_id) return False def _is_ens_tz_port(self, context, port_data): # Check the host-switch-mode of the TZ connected to the ports network return self._is_ens_tz_net(context, port_data['network_id']) def _is_overlay_network(self, context, network_id): """Should be implemented by each plugin""" pass def _generate_segment_id(self, context, physical_network, net_data, restricted_vlans): bindings = nsx_db.get_network_bindings_by_phy_uuid( context.session, physical_network) vlan_ranges = self._network_vlans.get(physical_network, []) if vlan_ranges: vlan_ids = set() for vlan_min, vlan_max in vlan_ranges: vlan_ids |= set(moves.range(vlan_min, vlan_max + 1)) else: vlan_min = constants.MIN_VLAN_TAG vlan_max = constants.MAX_VLAN_TAG vlan_ids = set(moves.range(vlan_min, vlan_max + 1)) used_ids_in_range = [binding.vlan_id for binding in bindings if binding.vlan_id in vlan_ids] not_allowed_in_range = set(used_ids_in_range + restricted_vlans) free_ids = list(vlan_ids ^ not_allowed_in_range) if len(free_ids) == 0: raise n_exc.NoNetworkAvailable() net_data[pnet.SEGMENTATION_ID] = free_ids[0] return net_data[pnet.SEGMENTATION_ID] def _default_physical_net(self, physical_net): return physical_net is None or physical_net == 'default' def _validate_provider_create(self, context, network_data, az, nsxlib_tz, nsxlib_network, transparent_vlan=False): """Validate the parameters of a new provider network raises an error if illegal returns a dictionary with the relevant processed data: - is_provider_net: boolean - net_type: provider network type or None - physical_net: the uuid of the relevant transport zone or None - vlan_id: vlan tag, 0 or None - switch_mode: standard or ENS """ # initialize the relevant parameters from the AZ default_vlan_tz_uuid = az._default_vlan_tz_uuid default_overlay_tz_uuid = az._default_overlay_tz_uuid mdproxy_uuid = az._native_md_proxy_uuid is_provider_net = any( validators.is_attr_set(network_data.get(f)) for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) physical_net = network_data.get(pnet.PHYSICAL_NETWORK) if not validators.is_attr_set(physical_net): physical_net = None vlan_id = network_data.get(pnet.SEGMENTATION_ID) if not validators.is_attr_set(vlan_id): vlan_id = None if vlan_id and transparent_vlan: err_msg = (_("Segmentation ID cannot be set with transparent " "vlan!")) raise n_exc.InvalidInput(error_message=err_msg) err_msg = None net_type = network_data.get(pnet.NETWORK_TYPE) tz_type = nsxlib_consts.TRANSPORT_TYPE_VLAN switch_mode = nsxlib_consts.HOST_SWITCH_MODE_STANDARD if validators.is_attr_set(net_type): if net_type == utils.NsxV3NetworkTypes.FLAT: if vlan_id is not None: err_msg = (_("Segmentation ID cannot be specified with " "%s network type") % utils.NsxV3NetworkTypes.FLAT) else: if not transparent_vlan: # Set VLAN id to 0 for flat networks vlan_id = '0' if self._default_physical_net(physical_net): physical_net = default_vlan_tz_uuid elif net_type == utils.NsxV3NetworkTypes.VLAN: # Use default VLAN transport zone if physical network not given if self._default_physical_net(physical_net): physical_net = default_vlan_tz_uuid restricted_vlans = self._get_tz_restricted_vlans(physical_net) if not transparent_vlan: # Validate VLAN id if not vlan_id: vlan_id = self._generate_segment_id(context, physical_net, network_data, restricted_vlans) elif not plugin_utils.is_valid_vlan_tag(vlan_id): err_msg = (_('Segmentation ID %(seg_id)s out of ' 'range (%(min_id)s through %(max_id)s)') % {'seg_id': vlan_id, 'min_id': constants.MIN_VLAN_TAG, 'max_id': constants.MAX_VLAN_TAG}) elif vlan_id in restricted_vlans: err_msg = (_('Segmentation ID %(seg_id)s cannot be ' 'used as it is used by the transport ' 'node') % {'seg_id': vlan_id}) else: # Verify VLAN id is not already allocated bindings = nsx_db.\ get_network_bindings_by_vlanid_and_physical_net( context.session, vlan_id, physical_net) if bindings: raise n_exc.VlanIdInUse( vlan_id=vlan_id, physical_network=physical_net) elif net_type == utils.NsxV3NetworkTypes.GENEVE: if vlan_id: err_msg = (_("Segmentation ID cannot be specified with " "%s network type") % utils.NsxV3NetworkTypes.GENEVE) tz_type = nsxlib_consts.TRANSPORT_TYPE_OVERLAY elif net_type == utils.NsxV3NetworkTypes.NSX_NETWORK: # Linking neutron networks to an existing NSX logical switch if not physical_net: err_msg = (_("Physical network must be specified with " "%s network type") % net_type) # Validate the logical switch existence else: try: nsx_net = nsxlib_network.get(physical_net) tz_id = self._get_nsx_net_tz_id(nsx_net) switch_mode = nsxlib_tz.get_host_switch_mode(tz_id) except nsx_lib_exc.ResourceNotFound: err_msg = (_('Logical switch %s does not exist') % physical_net) # make sure no other neutron network is using it bindings = ( nsx_db.get_network_bindings_by_vlanid_and_physical_net( context.elevated().session, 0, physical_net)) if bindings: err_msg = (_('Logical switch %s is already used by ' 'another network') % physical_net) else: err_msg = (_('%(net_type_param)s %(net_type_value)s not ' 'supported') % {'net_type_param': pnet.NETWORK_TYPE, 'net_type_value': net_type}) elif is_provider_net: # FIXME: Ideally provider-network attributes should be checked # at the NSX backend. For now, the network_type is required, # so the plugin can do a quick check locally. err_msg = (_('%s is required for creating a provider network') % pnet.NETWORK_TYPE) else: net_type = None if physical_net is None: # Default to transport type overlay physical_net = default_overlay_tz_uuid # validate the transport zone existence and type if (not err_msg and physical_net and net_type != utils.NsxV3NetworkTypes.NSX_NETWORK): if is_provider_net: try: backend_type = nsxlib_tz.get_transport_type( physical_net) except nsx_lib_exc.ResourceNotFound: err_msg = (_('Transport zone %s does not exist') % physical_net) else: if backend_type != tz_type: err_msg = (_('%(tz)s transport zone is required for ' 'creating a %(net)s provider network') % {'tz': tz_type, 'net': net_type}) if not err_msg: switch_mode = nsxlib_tz.get_host_switch_mode(physical_net) # validate the mdproxy TZ matches this one. if (not err_msg and physical_net and self._has_native_dhcp_metadata()): if not self._validate_net_mdproxy_tz( az, physical_net, mdproxy_uuid): err_msg = (_('Network TZ %(tz)s does not match MD proxy ' '%(md)s edge cluster') % {'tz': physical_net, 'md': mdproxy_uuid}) if err_msg: raise n_exc.InvalidInput(error_message=err_msg) if (switch_mode == nsxlib_consts.HOST_SWITCH_MODE_ENS): if not self._allow_ens_networks(): raise NotImplementedError(_("ENS support is disabled")) self._assert_on_ens_with_qos(network_data) self._validate_ens_net_portsecurity(network_data) return {'is_provider_net': is_provider_net, 'net_type': net_type, 'physical_net': physical_net, 'vlan_id': vlan_id, 'switch_mode': switch_mode} def _validate_net_mdproxy_tz(self, az, tz_uuid, mdproxy_uuid): """Validate that the network TZ matches the mdproxy edge cluster Should be implemented by each plugin. """ pass def _network_is_nsx_net(self, context, network_id): bindings = nsx_db.get_network_bindings(context.session, network_id) if not bindings: return False return (bindings[0].binding_type == utils.NsxV3NetworkTypes.NSX_NETWORK) def _vif_type_by_vnic_type(self, direct_vnic_type): return (nsx_constants.VIF_TYPE_DVS if direct_vnic_type else pbin.VIF_TYPE_OVS) def _get_network_segmentation_id(self, context, neutron_id): bindings = nsx_db.get_network_bindings(context.session, neutron_id) if bindings: return bindings[0].vlan_id def _get_network_vlan_transparent(self, context, network_id): if not cfg.CONF.vlan_transparent: return False # Get this flag directly from DB to improve performance db_entry = context.session.query(models_v2.Network).filter_by( id=network_id).first() if db_entry: return True if db_entry.vlan_transparent else False def _extend_nsx_port_dict_binding(self, context, port_data): # Not using the register api for this because we need the context # Some attributes were already initialized by _extend_port_portbinding if pbin.VIF_TYPE not in port_data: port_data[pbin.VIF_TYPE] = pbin.VIF_TYPE_OVS if pbin.VNIC_TYPE not in port_data: port_data[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL if 'network_id' in port_data: net_id = port_data['network_id'] if pbin.VIF_DETAILS not in port_data: port_data[pbin.VIF_DETAILS] = {} port_data[pbin.VIF_DETAILS][pbin.OVS_HYBRID_PLUG] = False if (port_data.get('device_owner') == constants.DEVICE_OWNER_FLOATINGIP): # floatingip belongs to an external net without nsx-id port_data[pbin.VIF_DETAILS]['nsx-logical-switch-id'] = None else: port_data[pbin.VIF_DETAILS]['nsx-logical-switch-id'] = ( self._get_network_nsx_id(context, net_id)) if port_data[pbin.VNIC_TYPE] != pbin.VNIC_NORMAL: port_data[pbin.VIF_DETAILS]['segmentation-id'] = ( self._get_network_segmentation_id(context, net_id)) port_data[pbin.VIF_DETAILS]['vlan-transparent'] = ( self._get_network_vlan_transparent(context, net_id)) def _extend_qos_port_dict_binding(self, context, port): # add the qos policy id from the DB if 'id' in port: port[qos_consts.QOS_POLICY_ID] = qos_com_utils.get_port_policy_id( context, port['id']) def fix_direct_vnic_port_sec(self, direct_vnic_type, port_data): if direct_vnic_type: if validators.is_attr_set(port_data.get(psec.PORTSECURITY)): # 'direct' and 'direct-physical' vnic types ports requires # port-security to be disabled. if port_data[psec.PORTSECURITY]: err_msg = _("Security features are not supported for " "ports with direct/direct-physical VNIC " "type") raise n_exc.InvalidInput(error_message=err_msg) else: # Implicitly disable port-security for direct vnic types. port_data[psec.PORTSECURITY] = False def _validate_network_type(self, context, network_id, net_types): net = self.get_network(context, network_id) if net.get(pnet.NETWORK_TYPE) in net_types: return True return False def _revert_neutron_port_update(self, context, port_id, original_port, updated_port, port_security, sec_grp_updated): # revert the neutron port update super(NsxPluginV3Base, self).update_port(context, port_id, {'port': original_port}) # revert allowed address pairs if port_security: orig_pair = original_port.get(addr_apidef.ADDRESS_PAIRS) updated_pair = updated_port.get(addr_apidef.ADDRESS_PAIRS) if orig_pair != updated_pair: self._delete_allowed_address_pairs(context, port_id) if orig_pair: self._process_create_allowed_address_pairs( context, original_port, orig_pair) # revert the security groups modifications if sec_grp_updated: self.update_security_group_on_port( context, port_id, {'port': original_port}, updated_port, original_port) def _get_external_attachment_info(self, context, router): gw_port = router.gw_port ipaddress = None netmask = None nexthop = None if gw_port: # gw_port may have multiple IPs, only configure the first one if gw_port.get('fixed_ips'): ipaddress = gw_port['fixed_ips'][0]['ip_address'] network_id = gw_port.get('network_id') if network_id: ext_net = self._get_network(context, network_id) if not ext_net.external: msg = (_("Network '%s' is not a valid external " "network") % network_id) raise n_exc.BadRequest(resource='router', msg=msg) if ext_net.subnets: ext_subnet = ext_net.subnets[0] netmask = str(netaddr.IPNetwork(ext_subnet.cidr).netmask) nexthop = ext_subnet.gateway_ip return (ipaddress, netmask, nexthop) def _get_tier0_uuid_by_net_id(self, context, network_id): if not network_id: return network = self.get_network(context, network_id) if not network.get(pnet.PHYSICAL_NETWORK): az = self.get_network_az(network) return az._default_tier0_router else: return network.get(pnet.PHYSICAL_NETWORK) def _validate_router_tz(self, context, tier0_uuid, subnets): """Ensure the related GW (Tier0 router) belongs to the same TZ as the subnets attached to the Tier1 router Should be implemented by each plugin. """ pass def _get_router_gw_info(self, context, router_id): router = self.get_router(context, router_id) return router.get(l3_apidef.EXTERNAL_GW_INFO, {}) def _validate_router_gw_and_tz(self, context, router_id, info, org_enable_snat, router_subnets): # Ensure that a router cannot have SNAT disabled if there are # floating IP's assigned if (info and 'enable_snat' in info and org_enable_snat != info.get('enable_snat') and info.get('enable_snat') is False and self.router_gw_port_has_floating_ips(context, router_id)): msg = _("Unable to set SNAT disabled. Floating IPs assigned") raise n_exc.InvalidInput(error_message=msg) # Ensure that the router GW tier0 belongs to the same TZ as the # subnets of its interfaces if info and info.get('network_id'): new_tier0_uuid = self._get_tier0_uuid_by_net_id(context.elevated(), info['network_id']) if new_tier0_uuid: self._validate_router_tz(context, new_tier0_uuid, router_subnets) def _get_tier0_uuid_by_router(self, context, router): network_id = router.gw_port_id and router.gw_port.network_id return self._get_tier0_uuid_by_net_id(context, network_id) def _validate_gw_overlap_interfaces(self, context, gateway_net, interfaces_networks): # Ensure that interface subnets cannot overlap with the GW subnet gw_subnets = self._get_subnets_by_network( context.elevated(), gateway_net) gw_cidrs = [subnet['cidr'] for subnet in gw_subnets] gw_ip_set = netaddr.IPSet(gw_cidrs) if_subnets = [] for net in interfaces_networks: if_subnets.extend(self._get_subnets_by_network( context.elevated(), net)) if_cidrs = [subnet['cidr'] for subnet in if_subnets] if_ip_set = netaddr.IPSet(if_cidrs) if gw_ip_set & if_ip_set: msg = _("Interface network cannot overlap with router GW network") LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _get_update_router_gw_actions( self, org_tier0_uuid, orgaddr, org_enable_snat, new_tier0_uuid, newaddr, new_enable_snat, tier1_services_exist, sr_currently_exists): """Return a dictionary of flags indicating which actions should be performed on this router GW update. """ actions = {} # Remove router link port between tier1 and tier0 if tier0 router link # is removed or changed actions['remove_router_link_port'] = ( org_tier0_uuid and (not new_tier0_uuid or org_tier0_uuid != new_tier0_uuid)) # Remove SNAT rules for gw ip if gw ip is deleted/changed or # enable_snat is updated from True to False actions['remove_snat_rules'] = ( org_enable_snat and orgaddr and (newaddr != orgaddr or not new_enable_snat)) # Remove No-DNAT rules if GW was removed or snat was disabled actions['remove_no_dnat_rules'] = ( orgaddr and org_enable_snat and (not newaddr or not new_enable_snat)) # Revocate bgp announce for nonat subnets if tier0 router link is # changed or enable_snat is updated from False to True actions['revocate_bgp_announce'] = ( not org_enable_snat and org_tier0_uuid and (new_tier0_uuid != org_tier0_uuid or new_enable_snat)) # Add router link port between tier1 and tier0 if tier0 router link is # added or changed to a new one actions['add_router_link_port'] = ( new_tier0_uuid and (not org_tier0_uuid or org_tier0_uuid != new_tier0_uuid)) # Add SNAT rules for gw ip if gw ip is add/changed or # enable_snat is updated from False to True actions['add_snat_rules'] = ( new_enable_snat and newaddr and (newaddr != orgaddr or not org_enable_snat)) # Add No-DNAT rules if GW was added, and the router has SNAT enabled, # or if SNAT was enabled actions['add_no_dnat_rules'] = ( new_enable_snat and newaddr and (not orgaddr or not org_enable_snat)) # Bgp announce for nonat subnets if tier0 router link is changed or # enable_snat is updated from True to False actions['bgp_announce'] = ( not new_enable_snat and new_tier0_uuid and (new_tier0_uuid != org_tier0_uuid or not org_enable_snat)) # Advertise NAT routes if enable SNAT to support FIP. In the NoNAT # use case, only NSX connected routes need to be advertised. actions['advertise_route_nat_flag'] = ( True if new_enable_snat else False) actions['advertise_route_connected_flag'] = ( True if not new_enable_snat else False) # the purpose of this var is to be able to differ between # adding a gateway w/o snat and adding snat (when adding/removing gw # the snat option is on by default). new_with_snat = True if (new_enable_snat and newaddr) else False has_gw = True if newaddr else False if sr_currently_exists: # currently there is a service router on the backend actions['add_service_router'] = False # Should remove the service router if the GW was removed, # or no service needs it: SNAT, LBaaS or FWaaS actions['remove_service_router'] = ( not has_gw or not (tier1_services_exist or new_with_snat)) if actions['remove_service_router']: LOG.info("Removing service router [has GW: %s, services %s, " "SNAT %s]", has_gw, tier1_services_exist, new_with_snat) else: # currently there is no service router on the backend actions['remove_service_router'] = False # Should add service router if there is a GW # and there is a service that needs it: SNAT, LB or FWaaS actions['add_service_router'] = ( has_gw is not None and (new_with_snat or tier1_services_exist)) if actions['add_service_router']: LOG.info("Adding service router [has GW: %s, services %s, " "SNAT %s]", has_gw, tier1_services_exist, new_with_snat) return actions def _validate_update_router_gw(self, context, router_id, gw_info): router_ports = self._get_router_interfaces(context, router_id) for port in router_ports: # if setting this router as no-snat, make sure gw address scope # match those of the subnets if not gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default): for fip in port['fixed_ips']: self._validate_address_scope_for_router_interface( context.elevated(), router_id, gw_info['network_id'], fip['subnet_id']) # If the network attached to a router is a VLAN backed network # then it must be attached to an edge cluster if (not gw_info and not self._is_overlay_network(context, port['network_id'])): msg = _("A router attached to a VLAN backed network " "must have an external network assigned") raise n_exc.InvalidInput(error_message=msg) def _validate_ext_routes(self, context, router_id, gw_info, new_routes): ext_net_id = (gw_info['network_id'] if validators.is_attr_set(gw_info) and gw_info else None) if not ext_net_id: port_filters = {'device_id': [router_id], 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_GW]} gw_ports = self.get_ports(context, filters=port_filters) if gw_ports: ext_net_id = gw_ports[0]['network_id'] if ext_net_id: subnets = self._get_subnets_by_network(context, ext_net_id) ext_cidrs = [subnet['cidr'] for subnet in subnets] for route in new_routes: if netaddr.all_matching_cidrs( route['nexthop'], ext_cidrs): error_message = (_("route with destination %(dest)s have " "an external nexthop %(nexthop)s which " "can't be supported") % {'dest': route['destination'], 'nexthop': route['nexthop']}) LOG.error(error_message) raise n_exc.InvalidInput(error_message=error_message) def _validate_routes(self, context, router_id, routes): super(NsxPluginV3Base, self)._validate_routes( context, router_id, routes) # routes with mixed ip versions are not allowed for route in routes: if route.get('destination') and route.get('nexthop'): dest_ver = netaddr.IPNetwork(route['destination']).version nexthop_ver = netaddr.IPAddress(route['nexthop']).version if dest_ver != nexthop_ver: msg = _("Static route network CIDR and next hop IP " "addresses must be same address family.") raise n_exc.BadRequest(resource='router', msg=msg) def _get_static_routes_diff(self, context, router_id, gw_info, router_data): new_routes = router_data['routes'] self._validate_ext_routes(context, router_id, gw_info, new_routes) self._validate_routes(context, router_id, new_routes) old_routes = self._get_extra_routes_by_router_id( context, router_id) routes_added, routes_removed = helpers.diff_list_of_dict( old_routes, new_routes) return routes_added, routes_removed def _assert_on_router_admin_state(self, router_data): if router_data.get("admin_state_up") is False: err_msg = _("admin_state_up=False routers are not supported") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _get_network_dns_domain(self, az, network): dns_domain = None if network.get('dns_domain'): net_dns = network['dns_domain'] if isinstance(net_dns, string_types): dns_domain = net_dns elif hasattr(net_dns, "dns_domain"): dns_domain = net_dns.dns_domain if not dns_domain or not validators.is_attr_set(dns_domain): dns_domain = az.dns_domain return dns_domain def _build_dhcp_server_config(self, context, network, subnet, port, az): name = self.nsxlib.native_dhcp.build_server_name( network['name'], network['id']) net_tags = self.nsxlib.build_v3_tags_payload( network, resource_type='os-neutron-net-id', project_name=context.tenant_name) dns_domain = self._get_network_dns_domain(az, network) dns_nameservers = subnet['dns_nameservers'] if not dns_nameservers or not validators.is_attr_set(dns_nameservers): dns_nameservers = az.nameservers # There must be exactly one fixed ip matching given subnet fixed_ip_addr = [fip['ip_address'] for fip in port['fixed_ips'] if fip['subnet_id'] == subnet['id']] return self.nsxlib.native_dhcp.build_server( name, ip_address=fixed_ip_addr[0], cidr=subnet['cidr'], gateway_ip=subnet['gateway_ip'], host_routes=subnet['host_routes'], dns_domain=dns_domain, dns_nameservers=dns_nameservers, dhcp_profile_id=az._native_dhcp_profile_uuid, tags=net_tags) def _enable_native_dhcp(self, context, network, subnet, az=None): # Enable native DHCP service on the backend for this network. # First create a Neutron DHCP port and use its assigned IP # address as the DHCP server address in an API call to create a # LogicalDhcpServer on the backend. Then create the corresponding # logical port for the Neutron port with DHCP attachment as the # LogicalDhcpServer UUID. # TODO(annak): # This function temporarily serves both nsx_v3 and nsx_p plugins. # In future, when platform supports native dhcp in policy for infra # segments, this function should move back to nsx_v3 plugin # Delete obsolete settings if exist. This could happen when a # previous failed transaction was rolled back. But the backend # entries are still there. self._disable_native_dhcp(context, network['id']) # Get existing ports on subnet. existing_ports = super(NsxPluginV3Base, self).get_ports( context, filters={'network_id': [network['id']], 'fixed_ips': {'subnet_id': [subnet['id']]}}) nsx_net_id = self._get_network_nsx_id(context, network['id']) if not nsx_net_id: msg = ("Unable to obtain backend network id for logical DHCP " "server for network %s" % network['id']) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) if not az: az = self.get_network_az_by_net_id(context, network['id']) port_data = { "name": "", "admin_state_up": True, "device_id": az._native_dhcp_profile_uuid, "device_owner": constants.DEVICE_OWNER_DHCP, "network_id": network['id'], "tenant_id": network["tenant_id"], "mac_address": constants.ATTR_NOT_SPECIFIED, "fixed_ips": [{"subnet_id": subnet['id']}], psec.PORTSECURITY: False } # Create the DHCP port (on neutron only) and update its port security port = {'port': port_data} neutron_port = super(NsxPluginV3Base, self).create_port(context, port) is_ens_tz_port = self._is_ens_tz_port(context, port_data) self._create_port_preprocess_security(context, port, port_data, neutron_port, is_ens_tz_port) self._process_portbindings_create_and_update( context, port_data, neutron_port) server_data = self._build_dhcp_server_config( context, network, subnet, neutron_port, az) port_tags = self.nsxlib.build_v3_tags_payload( neutron_port, resource_type='os-neutron-dport-id', project_name=context.tenant_name) dhcp_server = None dhcp_port_profiles = [] if (not self._has_native_dhcp_metadata() and not self._is_ens_tz_net(context, network['id'])): dhcp_port_profiles.append(self._dhcp_profile) try: dhcp_server = self.nsxlib.dhcp_server.create(**server_data) LOG.debug("Created logical DHCP server %(server)s for network " "%(network)s", {'server': dhcp_server['id'], 'network': network['id']}) name = self._build_port_name(context, port_data) nsx_port = self.nsxlib.logical_port.create( nsx_net_id, dhcp_server['id'], tags=port_tags, name=name, attachment_type=nsxlib_consts.ATTACHMENT_DHCP, switch_profile_ids=dhcp_port_profiles) LOG.debug("Created DHCP logical port %(port)s for " "network %(network)s", {'port': nsx_port['id'], 'network': network['id']}) except nsx_lib_exc.ServiceClusterUnavailable: raise webob.exc.HTTPServiceUnavailable() except nsx_lib_exc.ManagerError: err_msg = ("Unable to create logical DHCP server for " "network %s" % network['id']) LOG.error(err_msg) if dhcp_server: self.nsxlib.dhcp_server.delete(dhcp_server['id']) super(NsxPluginV3Base, self).delete_port( context, neutron_port['id']) raise nsx_exc.NsxPluginException(err_msg=err_msg) try: # Add neutron_port_id -> nsx_port_id mapping to the DB. nsx_db.add_neutron_nsx_port_mapping( context.session, neutron_port['id'], nsx_net_id, nsx_port['id']) # Add neutron_net_id -> dhcp_service_id mapping to the DB. nsx_db.add_neutron_nsx_service_binding( context.session, network['id'], neutron_port['id'], nsxlib_consts.SERVICE_DHCP, dhcp_server['id']) except (db_exc.DBError, sql_exc.TimeoutError): with excutils.save_and_reraise_exception(): LOG.error("Failed to create mapping for DHCP port %s," "deleting port and logical DHCP server", neutron_port['id']) self.nsxlib.dhcp_server.delete(dhcp_server['id']) self._cleanup_port(context, neutron_port['id'], nsx_port['id']) # Configure existing ports to work with the new DHCP server try: for port_data in existing_ports: self._add_port_mp_dhcp_binding(context, port_data) except Exception: LOG.error('Unable to create DHCP bindings for existing ports ' 'on subnet %s', subnet['id']) def _disable_native_dhcp(self, context, network_id): # Disable native DHCP service on the backend for this network. # First delete the DHCP port in this network. Then delete the # corresponding LogicalDhcpServer for this network. self._ensure_native_dhcp() dhcp_service = nsx_db.get_nsx_service_binding( context.session, network_id, nsxlib_consts.SERVICE_DHCP) if not dhcp_service: return if dhcp_service['port_id']: try: _net_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, dhcp_service['port_id']) self._cleanup_port(context, dhcp_service['port_id'], nsx_port_id) except (nsx_lib_exc.ResourceNotFound, n_exc.NotFound): # This could happen when the port has been manually deleted # from the NSX, or when the neutron port deletion previously # failed LOG.error("Failed to delete DHCP port %(port)s for " "network %(network)s", {'port': dhcp_service['port_id'], 'network': network_id}) else: LOG.error("DHCP port is not configured for network %s", network_id) try: self.nsxlib.dhcp_server.delete(dhcp_service['nsx_service_id']) LOG.debug("Deleted logical DHCP server %(server)s for network " "%(network)s", {'server': dhcp_service['nsx_service_id'], 'network': network_id}) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to delete logical DHCP server %(server)s " "for network %(network)s", {'server': dhcp_service['nsx_service_id'], 'network': network_id}) try: # Delete neutron_id -> dhcp_service_id mapping from the DB. nsx_db.delete_neutron_nsx_service_binding( context.session, network_id, nsxlib_consts.SERVICE_DHCP) # Delete all DHCP bindings under this DHCP server from the DB. nsx_db.delete_neutron_nsx_dhcp_bindings_by_service_id( context.session, dhcp_service['nsx_service_id']) except db_exc.DBError: with excutils.save_and_reraise_exception(): LOG.error("Unable to delete DHCP server mapping for " "network %s", network_id) def _filter_ipv4_dhcp_fixed_ips(self, context, fixed_ips): ips = [] for fixed_ip in fixed_ips: if netaddr.IPNetwork(fixed_ip['ip_address']).version != 4: continue with db_api.CONTEXT_READER.using(context): subnet = self.get_subnet(context, fixed_ip['subnet_id']) if subnet['enable_dhcp']: ips.append(fixed_ip) return ips def _add_port_mp_dhcp_binding(self, context, port): if not utils.is_port_dhcp_configurable(port): return dhcp_service = nsx_db.get_nsx_service_binding( context.session, port['network_id'], nsxlib_consts.SERVICE_DHCP) if not dhcp_service: return for fixed_ip in self._filter_ipv4_dhcp_fixed_ips( context, port['fixed_ips']): binding = self._add_dhcp_binding_on_server( context, dhcp_service['nsx_service_id'], fixed_ip['subnet_id'], fixed_ip['ip_address'], port) try: nsx_db.add_neutron_nsx_dhcp_binding( context.session, port['id'], fixed_ip['subnet_id'], fixed_ip['ip_address'], dhcp_service['nsx_service_id'], binding['id']) except (db_exc.DBError, sql_exc.TimeoutError): LOG.error("Failed to add mapping of DHCP binding " "%(binding)s for port %(port)s, deleting " "DHCP binding on server", {'binding': binding['id'], 'port': port['id']}) fake_db_binding = { 'port_id': port['id'], 'nsx_service_id': dhcp_service['nsx_service_id'], 'nsx_binding_id': binding['id']} self._delete_dhcp_binding_on_server(context, fake_db_binding) def _add_dhcp_binding_on_server(self, context, dhcp_service_id, subnet_id, ip, port): try: hostname = 'host-%s' % ip.replace('.', '-') subnet = self.get_subnet(context, subnet_id) gateway_ip = subnet.get('gateway_ip') options = self._get_dhcp_options( context, ip, port.get(ext_edo.EXTRADHCPOPTS), port['network_id'], subnet) binding = self.nsxlib.dhcp_server.create_binding( dhcp_service_id, port['mac_address'], ip, hostname, self._get_conf_attr('dhcp_lease_time'), options, gateway_ip) LOG.debug("Created static binding (mac: %(mac)s, ip: %(ip)s, " "gateway: %(gateway)s, options: %(options)s) for port " "%(port)s on logical DHCP server %(server)s", {'mac': port['mac_address'], 'ip': ip, 'gateway': gateway_ip, 'options': options, 'port': port['id'], 'server': dhcp_service_id}) return binding except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to create static binding (mac: %(mac)s, " "ip: %(ip)s, gateway: %(gateway)s, options: " "%(options)s) for port %(port)s on logical DHCP " "server %(server)s", {'mac': port['mac_address'], 'ip': ip, 'gateway': gateway_ip, 'options': options, 'port': port['id'], 'server': dhcp_service_id}) def _delete_port_mp_dhcp_binding(self, context, port): # Do not check device_owner here because Nova may have already # deleted that before Neutron's port deletion. bindings = nsx_db.get_nsx_dhcp_bindings(context.session, port['id']) for binding in bindings: self._delete_dhcp_binding_on_server(context, binding) try: nsx_db.delete_neutron_nsx_dhcp_binding( context.session, binding['port_id'], binding['nsx_binding_id']) except db_exc.DBError: LOG.error("Unable to delete mapping of DHCP binding " "%(binding)s for port %(port)s", {'binding': binding['nsx_binding_id'], 'port': binding['port_id']}) def _delete_dhcp_binding_on_server(self, context, binding): try: self.nsxlib.dhcp_server.delete_binding( binding['nsx_service_id'], binding['nsx_binding_id']) LOG.debug("Deleted static binding for port %(port)s) on " "logical DHCP server %(server)s", {'port': binding['port_id'], 'server': binding['nsx_service_id']}) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to delete static binding for port " "%(port)s) on logical DHCP server %(server)s", {'port': binding['port_id'], 'server': binding['nsx_service_id']}) def _find_dhcp_binding(self, subnet_id, ip_address, bindings): for binding in bindings: if (subnet_id == binding['subnet_id'] and ip_address == binding['ip_address']): return binding def _update_port_mp_dhcp_binding(self, context, old_port, new_port): # First check if any IPv4 address in fixed_ips is changed. # Then update DHCP server setting or DHCP static binding # depending on the port type. # Note that Neutron allows a port with multiple IPs in the # same subnet. But backend DHCP server may not support that. if (utils.is_port_dhcp_configurable(old_port) != utils.is_port_dhcp_configurable(new_port)): # Note that the device_owner could be changed, # but still needs DHCP binding. if utils.is_port_dhcp_configurable(old_port): self._delete_port_mp_dhcp_binding(context, old_port) else: self._add_port_mp_dhcp_binding(context, new_port) return # Collect IPv4 DHCP addresses from original and updated fixed_ips # in the form of [(subnet_id, ip_address)]. old_fixed_ips = set([(fixed_ip['subnet_id'], fixed_ip['ip_address']) for fixed_ip in self._filter_ipv4_dhcp_fixed_ips( context, old_port['fixed_ips'])]) new_fixed_ips = set([(fixed_ip['subnet_id'], fixed_ip['ip_address']) for fixed_ip in self._filter_ipv4_dhcp_fixed_ips( context, new_port['fixed_ips'])]) # Find out the subnet/IP differences before and after the update. ips_to_add = list(new_fixed_ips - old_fixed_ips) ips_to_delete = list(old_fixed_ips - new_fixed_ips) ip_change = (ips_to_add or ips_to_delete) if (old_port["device_owner"] == constants.DEVICE_OWNER_DHCP and ip_change): # Update backend DHCP server address if the IP address of a DHCP # port is changed. if len(new_fixed_ips) != 1: msg = _("Can only configure one IP address on a DHCP server") LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) # Locate the backend DHCP server for this DHCP port. dhcp_service = nsx_db.get_nsx_service_binding( context.session, old_port['network_id'], nsxlib_consts.SERVICE_DHCP) if dhcp_service: new_ip = ips_to_add[0][1] try: self.nsxlib.dhcp_server.update( dhcp_service['nsx_service_id'], server_ip=new_ip) LOG.debug("Updated IP %(ip)s for logical DHCP server " "%(server)s", {'ip': new_ip, 'server': dhcp_service['nsx_service_id']}) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to update IP %(ip)s for logical " "DHCP server %(server)s", {'ip': new_ip, 'server': dhcp_service['nsx_service_id']}) elif utils.is_port_dhcp_configurable(old_port): # Update static DHCP bindings for a compute port. bindings = nsx_db.get_nsx_dhcp_bindings(context.session, old_port['id']) dhcp_opts = new_port.get(ext_edo.EXTRADHCPOPTS) dhcp_opts_changed = (old_port[ext_edo.EXTRADHCPOPTS] != new_port[ext_edo.EXTRADHCPOPTS]) if ip_change: # If IP address is changed, update associated DHCP bindings, # metadata route, and default hostname. # Mac address (if changed) will be updated at the same time. if ([subnet_id for (subnet_id, ip) in ips_to_add] == [subnet_id for (subnet_id, ip) in ips_to_delete]): # No change on subnet_id, just update corresponding IPs. for i, (subnet_id, ip) in enumerate(ips_to_delete): binding = self._find_dhcp_binding(subnet_id, ip, bindings) if binding: subnet = self.get_subnet(context, binding['subnet_id']) self._update_dhcp_binding_on_server( context, binding, new_port['mac_address'], ips_to_add[i][1], old_port['network_id'], dhcp_opts=dhcp_opts, subnet=subnet) # Update DB IP nsx_db.update_nsx_dhcp_bindings(context.session, old_port['id'], ip, ips_to_add[i][1]) else: for (subnet_id, ip) in ips_to_delete: binding = self._find_dhcp_binding(subnet_id, ip, bindings) if binding: self._delete_dhcp_binding_on_server(context, binding) if ips_to_add: dhcp_service = nsx_db.get_nsx_service_binding( context.session, new_port['network_id'], nsxlib_consts.SERVICE_DHCP) if dhcp_service: for (subnet_id, ip) in ips_to_add: self._add_dhcp_binding_on_server( context, dhcp_service['nsx_service_id'], subnet_id, ip, new_port) elif (old_port['mac_address'] != new_port['mac_address'] or dhcp_opts_changed): # If only Mac address/dhcp opts is changed, # update it in all associated DHCP bindings. for binding in bindings: subnet = self.get_subnet(context, binding['subnet_id']) self._update_dhcp_binding_on_server( context, binding, new_port['mac_address'], binding['ip_address'], old_port['network_id'], dhcp_opts=dhcp_opts, subnet=subnet) def _cleanup_port(self, context, port_id, nsx_port_id=None): # Clean up neutron port and nsx manager port if provided # Does not handle cleanup of policy port super(NsxPluginV3Base, self).delete_port(context, port_id) if nsx_port_id and self.nsxlib: self.nsxlib.logical_port.delete(nsx_port_id) def _is_excluded_port(self, device_owner, port_security): if device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF: return False if device_owner == constants.DEVICE_OWNER_DHCP: if not self._has_native_dhcp_metadata(): return True elif not port_security: return True return False def _validate_obj_az_on_creation(self, context, obj_data, obj_type): # validate the availability zone, and get the AZ object if az_def.AZ_HINTS in obj_data: self._validate_availability_zones_forced( context, obj_type, obj_data[az_def.AZ_HINTS]) return self.get_obj_az_by_hints(obj_data) def _add_az_to_net(self, context, net_id, net_data): if az_def.AZ_HINTS in net_data: # Update the AZ hints in the neutron object az_hints = az_validator.convert_az_list_to_string( net_data[az_def.AZ_HINTS]) super(NsxPluginV3Base, self).update_network( context, net_id, {'network': {az_def.AZ_HINTS: az_hints}}) def _add_az_to_router(self, context, router_id, router_data): if az_def.AZ_HINTS in router_data: # Update the AZ hints in the neutron object az_hints = az_validator.convert_az_list_to_string( router_data[az_def.AZ_HINTS]) super(NsxPluginV3Base, self).update_router( context, router_id, {'router': {az_def.AZ_HINTS: az_hints}}) def get_network_availability_zones(self, net_db): hints = az_validator.convert_az_string_to_list( net_db[az_def.AZ_HINTS]) # When using the configured AZs, the az will always be the same # as the hint (or default if none) if hints: az_name = hints[0] else: az_name = self.get_default_az().name return [az_name] def _get_router_az_obj(self, router): l3_attrs_db.ExtraAttributesMixin._extend_extra_router_dict( router, router) return self.get_router_az(router) def get_router_availability_zones(self, router): """Return availability zones which a router belongs to.""" return [self._get_router_az_obj(router).name] def _validate_availability_zones_forced(self, context, resource_type, availability_zones): return self.validate_availability_zones(context, resource_type, availability_zones, force=True) def _list_availability_zones(self, context, filters=None): result = {} for az in self._availability_zones_data.list_availability_zones(): # Add this availability zone as a network & router resource if filters: if 'name' in filters and az not in filters['name']: continue for res in ['network', 'router']: if 'resource' not in filters or res in filters['resource']: result[(az, res)] = True return result def validate_availability_zones(self, context, resource_type, availability_zones, force=False): # This method is called directly from this plugin but also from # registered callbacks if self._is_sub_plugin and not force: # validation should be done together for both plugins return # Validate against the configured AZs return self.validate_obj_azs(availability_zones) def _ensure_nsxlib(self, feature): if not self.nsxlib: msg = (_("%s is not supported since passthough API is disabled") % feature) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _ensure_native_dhcp(self): self._ensure_nsxlib("Native DHCP") if not self._native_dhcp_enabled: msg = (_("Native DHCP is not supported since dhcp_profile is not" " provided in plugin configuration")) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _get_net_dhcp_relay(self, context, net_id): """Should be implemented by each plugin""" pass def _get_ipv6_subnet(self, context, network): for subnet in network.subnets: if subnet.ip_version == 6: return subnet def _validate_single_ipv6_subnet(self, context, network, subnet): if subnet.get('ip_version') == 6: if self._get_ipv6_subnet(context, network): msg = (_("Only one ipv6 subnet per network is supported")) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _subnet_with_native_dhcp(self, subnet, orig_subnet=None): native_metadata = self._has_native_dhcp_metadata() default_enable_dhcp = (orig_subnet.get('enable_dhcp', False) if orig_subnet else False) # DHCPv6 is not yet supported, but slaac is. # When configuring slaac, neutron requires the user # to enable dhcp, however plugin code does not consider # slaac as dhcp. return (native_metadata and subnet.get('enable_dhcp', default_enable_dhcp) and subnet.get('ipv6_address_mode') != constants.IPV6_SLAAC) def _validate_mp_subnet_ip_version(self, subnet): # This validation only needs to be called at create, # since ip version and ipv6 mode attributes are read only if subnet.get('ip_version') == 4: # No dhcp restrictions for V4 return enable_dhcp = subnet.get('enable_dhcp', False) is_slaac = (subnet.get('ipv6_address_mode') == constants.IPV6_SLAAC) if enable_dhcp and not is_slaac: # No DHCPv6 support with the MP DHCP msg = _("DHCPv6 is not supported") LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _validate_net_dhcp_profile(self, context, network, az): """Validate that the dhcp profile edge cluster match the one of the network TZ """ if not self.nsxlib: msg = (_("Native DHCP is not supported since " "passthough API is disabled")) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) net_tz = self._get_net_tz(context, network['id']) dhcp_profile = az._native_dhcp_profile_uuid dhcp_obj = self.nsxlib.native_dhcp_profile.get(dhcp_profile) ec_id = dhcp_obj['edge_cluster_id'] ec_nodes = self.nsxlib.edge_cluster.get_transport_nodes(ec_id) ec_tzs = [] for tn_uuid in ec_nodes: ec_tzs.extend(self.nsxlib.transport_node.get_transport_zones( tn_uuid)) if net_tz not in ec_tzs: msg = (_('Network TZ %(tz)s does not match DHCP profile ' '%(dhcp)s edge cluster') % {'tz': net_tz, 'dhcp': dhcp_profile}) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _create_subnet_with_mp_dhcp(self, context, subnet): self._validate_number_of_subnet_static_routes(subnet) self._validate_host_routes_input(subnet) self._validate_mp_subnet_ip_version(subnet['subnet']) net_id = subnet['subnet']['network_id'] network = self._get_network(context, net_id) self._validate_single_ipv6_subnet(context, network, subnet['subnet']) # TODO(berlin): public external subnet announcement if self._subnet_with_native_dhcp(subnet['subnet']): self._validate_external_subnet(context, net_id) self._ensure_native_dhcp() net_az = self.get_network_az_by_net_id(context, net_id) self._validate_net_dhcp_profile(context, network, net_az) lock = 'nsxv3_network_' + net_id ddi_support, ddi_type = self._is_ddi_supported_on_net_with_type( context, net_id, network=network) dhcp_relay = self._get_net_dhcp_relay(context, net_id) with locking.LockManager.get_lock(lock): msg = None # Check if it is on an overlay network and is the first # DHCP-enabled subnet to create. if ddi_support: if self._has_no_dhcp_enabled_subnet(context, network): if not dhcp_relay and not self.nsxlib: # cannot create DHCP for this subnet msg = (_("Native DHCP is not supported since " "passthough API is disabled")) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) # Create the neutron subnet. # Any failure from here and on will require rollback. created_subnet = super( NsxPluginV3Base, self).create_subnet(context, subnet) try: # This can be called only after the super create # since we need the subnet pool to be translated # to allocation pools self._validate_address_space( context, created_subnet) except n_exc.InvalidInput: # revert the subnet creation with excutils.save_and_reraise_exception(): super(NsxPluginV3Base, self).delete_subnet( context, created_subnet['id']) self._extension_manager.process_create_subnet(context, subnet['subnet'], created_subnet) if not dhcp_relay: try: self._enable_native_dhcp( context, network, created_subnet, az=net_az) except (nsx_lib_exc.ManagerError, nsx_exc.NsxPluginException): with excutils.save_and_reraise_exception(): super(NsxPluginV3Base, self).delete_subnet( context, created_subnet['id']) else: msg = (_("Can not create more than one DHCP-enabled " "subnet in network %s") % net_id) else: msg = _("Native DHCP is not supported for %(type)s " "network %(id)s") % {'id': net_id, 'type': ddi_type} if msg: LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) else: # Subnet without DHCP created_subnet = super(NsxPluginV3Base, self).create_subnet( context, subnet) try: # This can be called only after the super create # since we need the subnet pool to be translated # to allocation pools self._validate_address_space(context, created_subnet) except n_exc.InvalidInput: # revert the subnet creation with excutils.save_and_reraise_exception(): super(NsxPluginV3Base, self).delete_subnet( context, created_subnet['id']) return created_subnet def _create_bulk_with_callback(self, resource, context, request_items, post_create_func=None, rollback_func=None): # This is a copy of the _create_bulk() in db_base_plugin_v2.py, # but extended with user-provided callback functions. objects = [] collection = "%ss" % resource items = request_items[collection] try: with db_api.CONTEXT_WRITER.using(context): for item in items: obj_creator = getattr(self, 'create_%s' % resource) obj = obj_creator(context, item) objects.append(obj) if post_create_func: # The user-provided post_create function is called # after a new object is created. post_create_func(obj) except Exception: if rollback_func: # The user-provided rollback function is called when an # exception occurred. for obj in objects: rollback_func(obj) # Note that the session.rollback() function is called here. # session.rollback() will invoke transaction.rollback() on # the transaction this session maintains. The latter will # deactivate the transaction and clear the session's cache. # # But depending on where the exception occurred, # transaction.rollback() may have already been called # internally before reaching here. # # For example, if the exception happened under a # "with session.begin(subtransactions=True):" statement # anywhere in the middle of processing obj_creator(), # transaction.__exit__() will invoke transaction.rollback(). # Thus when the exception reaches here, the session's cache # is already empty. context.session.rollback() with excutils.save_and_reraise_exception(): LOG.error("An exception occurred while creating " "the %(resource)s:%(item)s", {'resource': resource, 'item': item}) return objects def _post_create_subnet(self, context, subnet): LOG.debug("Collect native DHCP entries for network %s", subnet['network_id']) dhcp_service = nsx_db.get_nsx_service_binding( context.session, subnet['network_id'], nsxlib_consts.SERVICE_DHCP) if dhcp_service: _net_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, dhcp_service['port_id']) return {'nsx_port_id': nsx_port_id, 'nsx_service_id': dhcp_service['nsx_service_id']} def _rollback_subnet(self, subnet, dhcp_info): LOG.debug("Rollback native DHCP entries for network %s", subnet['network_id']) if dhcp_info and self.nsxlib: try: self.nsxlib.logical_port.delete(dhcp_info['nsx_port_id']) except Exception as e: LOG.error("Failed to delete logical port %(id)s " "during rollback. Exception: %(e)s", {'id': dhcp_info['nsx_port_id'], 'e': e}) try: self.nsxlib.dhcp_server.delete(dhcp_info['nsx_service_id']) except Exception as e: LOG.error("Failed to delete logical DHCP server %(id)s " "during rollback. Exception: %(e)s", {'id': dhcp_info['nsx_service_id'], 'e': e}) def create_subnet_bulk(self, context, subnets): # Maintain a local cache here because when the rollback function # is called, the cache in the session may have already been cleared. _subnet_dhcp_info = {} def _post_create(subnet): if subnet['enable_dhcp']: _subnet_dhcp_info[subnet['id']] = self._post_create_subnet( context, subnet) def _rollback(subnet): if (subnet and subnet['enable_dhcp'] and subnet['id'] in _subnet_dhcp_info): self._rollback_subnet(subnet, _subnet_dhcp_info[subnet['id']]) del _subnet_dhcp_info[subnet['id']] # callback should be called only with MP DHCP if (self._has_native_dhcp_metadata() and (not hasattr(self, 'use_policy_dhcp') or not self.use_policy_dhcp)): return self._create_bulk_with_callback('subnet', context, subnets, _post_create, _rollback) else: return self._create_bulk('subnet', context, subnets) def _get_neutron_net_ids_by_nsx_id(self, context, nsx_id): """Should be implemented by each plugin""" pass def _validate_number_of_subnet_static_routes(self, subnet_input): s = subnet_input['subnet'] request_host_routes = (validators.is_attr_set(s.get('host_routes')) and s['host_routes']) num_allowed_on_backend = nsxlib_consts.MAX_STATIC_ROUTES if request_host_routes: if len(request_host_routes) > num_allowed_on_backend: err_msg = (_( "Number of static routes is limited at the backend to %(" "backend)s. Requested %(requested)s") % {'backend': nsxlib_consts.MAX_STATIC_ROUTES, 'requested': len(request_host_routes)}) raise n_exc.InvalidInput(error_message=err_msg) def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} lswitch_ids = filters.pop(as_providers.ADV_SERVICE_PROVIDERS, []) if lswitch_ids: # This is a request from Nova for metadata processing. # Find the corresponding neutron network for each logical switch. network_ids = filters.pop('network_id', []) context = context.elevated() for lswitch_id in lswitch_ids: network_ids += self._get_neutron_net_ids_by_nsx_id( context, lswitch_id) filters['network_id'] = network_ids return super(NsxPluginV3Base, self).get_subnets( context, filters, fields, sorts, limit, marker, page_reverse) def delete_subnet_with_mp_dhcp(self, context, subnet_id): # TODO(berlin): cancel public external subnet announcement if self._has_native_dhcp_metadata(): # Ensure that subnet is not deleted if attached to router. self._subnet_check_ip_allocations_internal_router_ports( context, subnet_id) subnet = self.get_subnet(context, subnet_id) if self._subnet_with_native_dhcp(subnet): lock = 'nsxv3_network_' + subnet['network_id'] with locking.LockManager.get_lock(lock): # Check if it is the last DHCP-enabled subnet to delete. network = self._get_network(context, subnet['network_id']) if self._has_single_dhcp_enabled_subnet(context, network): try: self._disable_native_dhcp(context, network['id']) except Exception as e: LOG.error("Failed to disable native DHCP for " "network %(id)s. Exception: %(e)s", {'id': network['id'], 'e': e}) super(NsxPluginV3Base, self).delete_subnet( context, subnet_id) return super(NsxPluginV3Base, self).delete_subnet(context, subnet_id) def update_subnet_with_mp_dhcp(self, context, subnet_id, subnet): updated_subnet = None orig_subnet = self.get_subnet(context, subnet_id) self._validate_number_of_subnet_static_routes(subnet) self._validate_host_routes_input( subnet, orig_enable_dhcp=orig_subnet['enable_dhcp'], orig_host_routes=orig_subnet['host_routes']) network = self._get_network(context, orig_subnet['network_id']) if self._has_native_dhcp_metadata(): enable_dhcp = self._subnet_with_native_dhcp( subnet['subnet'], orig_subnet=orig_subnet) orig_enable_dhcp = self._subnet_with_native_dhcp(orig_subnet) if enable_dhcp != orig_enable_dhcp: self._ensure_native_dhcp() self._validate_external_subnet( context, orig_subnet['network_id']) lock = 'nsxv3_network_' + orig_subnet['network_id'] with locking.LockManager.get_lock(lock): if enable_dhcp: (ddi_support, ddi_type) = self._is_ddi_supported_on_net_with_type( context, orig_subnet['network_id'], network=network) if ddi_support: if self._has_no_dhcp_enabled_subnet( context, network): net_az = self.get_network_az_by_net_id( context, orig_subnet['network_id']) self._validate_net_dhcp_profile( context, network, net_az) updated_subnet = super( NsxPluginV3Base, self).update_subnet( context, subnet_id, subnet) self._extension_manager.process_update_subnet( context, subnet['subnet'], updated_subnet) self._enable_native_dhcp( context, network, updated_subnet, az=net_az) msg = None else: msg = (_("Multiple DHCP-enabled subnets is " "not allowed in network %s") % orig_subnet['network_id']) else: msg = (_("Native DHCP is not supported for " "%(type)s network %(id)s") % {'id': orig_subnet['network_id'], 'type': ddi_type}) if msg: LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) elif self._has_single_dhcp_enabled_subnet(context, network): self._disable_native_dhcp(context, network['id']) updated_subnet = super( NsxPluginV3Base, self).update_subnet( context, subnet_id, subnet) self._extension_manager.process_update_subnet( context, subnet['subnet'], updated_subnet) if not updated_subnet: updated_subnet = super(NsxPluginV3Base, self).update_subnet( context, subnet_id, subnet) self._extension_manager.process_update_subnet( context, subnet['subnet'], updated_subnet) # Check if needs to update logical DHCP server for native DHCP. if self._subnet_with_native_dhcp(updated_subnet): self._ensure_native_dhcp() kwargs = {} for key in ('dns_nameservers', 'gateway_ip', 'host_routes'): if key in subnet['subnet']: value = subnet['subnet'][key] if value != orig_subnet[key]: kwargs[key] = value if key != 'dns_nameservers': kwargs['options'] = None if 'options' in kwargs: sr, gw_ip = self._build_static_routes( updated_subnet.get('gateway_ip'), updated_subnet.get('cidr'), updated_subnet.get('host_routes', [])) kwargs['options'] = {'option121': {'static_routes': sr}} kwargs.pop('host_routes', None) if (gw_ip is not None and 'gateway_ip' not in kwargs and gw_ip != updated_subnet['gateway_ip']): kwargs['gateway_ip'] = gw_ip if kwargs: dhcp_service = nsx_db.get_nsx_service_binding( context.session, orig_subnet['network_id'], nsxlib_consts.SERVICE_DHCP) if dhcp_service: try: self.nsxlib.dhcp_server.update( dhcp_service['nsx_service_id'], **kwargs) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error( "Unable to update logical DHCP server " "%(server)s for network %(network)s", {'server': dhcp_service['nsx_service_id'], 'network': orig_subnet['network_id']}) if 'options' in kwargs: # Need to update the static binding of every VM in # this logical DHCP server. bindings = nsx_db.get_nsx_dhcp_bindings_by_service( context.session, dhcp_service['nsx_service_id']) for binding in bindings: port = self._get_port(context, binding['port_id']) dhcp_opts = port.get(ext_edo.EXTRADHCPOPTS) self._update_dhcp_binding_on_server( context, binding, port['mac_address'], binding['ip_address'], port['network_id'], gateway_ip=kwargs.get('gateway_ip', False), dhcp_opts=dhcp_opts, options=kwargs.get('options'), subnet=updated_subnet) return updated_subnet def _has_active_port(self, context, network_id): ports_in_use = context.session.query(models_v2.Port).filter_by( network_id=network_id).all() return not all([p.device_owner in db_base_plugin_v2.AUTO_DELETE_PORT_OWNERS for p in ports_in_use]) if ports_in_use else False def _delete_network_disable_dhcp(self, context, network_id): # Disable native DHCP and delete DHCP ports before network deletion lock = 'nsxv3_network_' + network_id with locking.LockManager.get_lock(lock): # Disable native DHCP if there is no other existing port # besides DHCP port. if not self._has_active_port(context, network_id): self._disable_native_dhcp(context, network_id) def _retry_delete_network(self, context, network_id): """This method attempts to retry the delete on a network if there are AUTO_DELETE_PORT_OWNERS left. This is to avoid a race condition between delete_network and the dhcp creating a port on the network. """ first_try = True while True: try: with db_api.CONTEXT_WRITER.using(context): self._process_l3_delete(context, network_id) return super(NsxPluginV3Base, self).delete_network( context, network_id) except n_exc.NetworkInUse: # There is a race condition in delete_network() that we need # to work around here. delete_network() issues a query to # automatically delete DHCP ports and then checks to see if any # ports exist on the network. If a network is created and # deleted quickly, such as when running tempest, the DHCP agent # may be creating its port for the network around the same time # that the network is deleted. This can result in the DHCP # port getting created in between these two queries in # delete_network(). To work around that, we'll call # delete_network() a second time if we get a NetworkInUse # exception but the only port(s) that exist are ones that # delete_network() is supposed to automatically delete. if not first_try: # We tried once to work around the known race condition, # but we still got the exception, so something else is # wrong that we can't recover from. raise first_try = False if self._has_active_port(context, network_id): # There is a port on the network that is not going to be # automatically deleted (such as a tenant created port), so # we have nothing else to do but raise the exception. raise def _build_static_routes(self, gateway_ip, cidr, host_routes): # The following code is based on _generate_opts_per_subnet() in # neutron/agent/linux/dhcp.py. It prepares DHCP options for a subnet. # This code is for IPv4 only (IPv6 dhcp does not support options) # Add route for directly connected network. static_routes = [{'network': cidr, 'next_hop': '0.0.0.0'}] # Copy routes from subnet host_routes attribute. if host_routes: for hr in host_routes: if hr['destination'] == constants.IPv4_ANY: if not gateway_ip: gateway_ip = hr['nexthop'] else: static_routes.append({'network': hr['destination'], 'next_hop': hr['nexthop']}) # If gateway_ip is defined, add default route via this gateway. if gateway_ip: static_routes.append({'network': constants.IPv4_ANY, 'next_hop': gateway_ip}) return static_routes, gateway_ip def _get_dhcp_options(self, context, ip, extra_dhcp_opts, net_id, subnet): # Always add option121. net_az = self.get_network_az_by_net_id(context, net_id) options = {'option121': {'static_routes': [ {'network': '%s' % net_az.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % net_az.native_metadata_route, 'next_hop': ip}]}} if subnet: sr, gateway_ip = self._build_static_routes( subnet.get('gateway_ip'), subnet.get('cidr'), subnet.get('host_routes', [])) options['option121']['static_routes'].extend(sr) # Adding extra options only if configured on port if extra_dhcp_opts: other_opts = [] for opt in extra_dhcp_opts: opt_name = opt['opt_name'] if opt['opt_value'] is not None: # None value means - delete this option. Since we rebuild # the options from scratch, it can be ignored. opt_val = opt['opt_value'] if opt_name == 'classless-static-route': # Add to the option121 static routes net, ip = opt_val.split(',') options['option121']['static_routes'].append({ 'network': net, 'next_hop': ip}) else: other_opts.append({ 'code': nsxlib_utils.get_dhcp_opt_code(opt_name), 'values': [opt_val]}) if other_opts: options['others'] = other_opts return options def _update_dhcp_binding_on_server(self, context, binding, mac, ip, net_id, gateway_ip=False, dhcp_opts=None, options=None, subnet=None): try: data = {'mac_address': mac, 'ip_address': ip} if ip != binding['ip_address']: data['host_name'] = 'host-%s' % ip.replace('.', '-') data['options'] = self._get_dhcp_options( context, ip, dhcp_opts, net_id, subnet) elif (dhcp_opts is not None or options is not None): data['options'] = self._get_dhcp_options( context, ip, dhcp_opts, net_id, subnet) if gateway_ip is not False: # Note that None is valid for gateway_ip, means deleting it. data['gateway_ip'] = gateway_ip self.nsxlib.dhcp_server.update_binding( binding['nsx_service_id'], binding['nsx_binding_id'], **data) LOG.debug("Updated static binding (mac: %(mac)s, ip: %(ip)s, " "gateway: %(gateway)s) for port %(port)s on " "logical DHCP server %(server)s", {'mac': mac, 'ip': ip, 'gateway': gateway_ip, 'port': binding['port_id'], 'server': binding['nsx_service_id']}) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to update static binding (mac: %(mac)s, " "ip: %(ip)s, gateway: %(gateway)s) for port " "%(port)s on logical DHCP server %(server)s", {'mac': mac, 'ip': ip, 'gateway': gateway_ip, 'port': binding['port_id'], 'server': binding['nsx_service_id']}) def _validate_extra_dhcp_options(self, opts): if not opts or not self._has_native_dhcp_metadata(): return for opt in opts: opt_name = opt['opt_name'] opt_val = opt['opt_value'] if opt_name == 'classless-static-route': # separate validation for option121 if opt_val is not None: try: net, ip = opt_val.split(',') except Exception: msg = (_("Bad value %(val)s for DHCP option " "%(name)s") % {'name': opt_name, 'val': opt_val}) raise n_exc.InvalidInput(error_message=msg) elif not nsxlib_utils.get_dhcp_opt_code(opt_name): msg = (_("DHCP option %s is not supported") % opt_name) raise n_exc.InvalidInput(error_message=msg) def _is_ddi_supported_on_network(self, context, network_id, network=None): result, _ = self._is_ddi_supported_on_net_with_type( context, network_id, network=network) return result def _is_ddi_supported_on_net_with_type(self, context, network_id, network=None): # Get the network dictionary from the inputs if network: net = (network if isinstance(network, dict) else self._translate_net_db_2_dict(context, network)) else: net = self.get_network(context, network_id) # NSX current does not support transparent VLAN ports for # DHCP and metadata if cfg.CONF.vlan_transparent: if net.get('vlan_transparent') is True: return False, "VLAN transparent" # NSX current does not support flat network ports for # DHCP and metadata if net.get(pnet.NETWORK_TYPE) == utils.NsxV3NetworkTypes.FLAT: return False, "flat" # supported for overlay networks, and for vlan networks depending on # NSX version is_overlay = self._is_overlay_network(context, network_id) net_type = "overlay" if is_overlay else "non-overlay" return True, net_type def _has_no_dhcp_enabled_subnet(self, context, network): # Check if there is no DHCP-enabled subnet in the network. for subnet in network.subnets: if (subnet.enable_dhcp and subnet.ipv6_address_mode != constants.IPV6_SLAAC): return False return True def _has_single_dhcp_enabled_subnet(self, context, network): # Check if there is only one DHCP-enabled subnet in the network. count = 0 for subnet in network.subnets: if subnet.enable_dhcp and subnet.ip_version == 4: count += 1 if count > 1: return False return True if count == 1 else False def _cidrs_overlap(self, cidr0, cidr1): return cidr0.first <= cidr1.last and cidr1.first <= cidr0.last def _validate_address_space(self, context, subnet): # get the subnet IPs if ('allocation_pools' in subnet and validators.is_attr_set(subnet['allocation_pools'])): # use the pools instead of the cidr subnet_networks = [ netaddr.IPRange(pool.get('start'), pool.get('end')) for pool in subnet.get('allocation_pools')] else: cidr = subnet.get('cidr') if not validators.is_attr_set(cidr): return subnet_networks = [netaddr.IPNetwork(subnet['cidr'])] # Check if subnet overlaps with shared address space. # This is checked on the backend when attaching subnet to a router. shared_ips_cidrs = self._get_conf_attr('transit_networks') for subnet_net in subnet_networks: for shared_ips in shared_ips_cidrs: if netaddr.IPSet(subnet_net) & netaddr.IPSet([shared_ips]): msg = _("Subnet overlaps with shared address space " "%s") % shared_ips LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) # Ensure that the NSX uplink cidr does not lie on the same subnet as # the external subnet filters = {'id': [subnet['network_id']], 'router:external': [True]} external_nets = self.get_networks(context, filters=filters) tier0_routers = [ext_net[pnet.PHYSICAL_NETWORK] for ext_net in external_nets if ext_net.get(pnet.PHYSICAL_NETWORK)] for tier0_rtr in set(tier0_routers): tier0_cidrs = self._get_tier0_uplink_cidrs(tier0_rtr) for cidr in tier0_cidrs: tier0_subnet = netaddr.IPNetwork(cidr).cidr for subnet_network in subnet_networks: if self._cidrs_overlap(tier0_subnet, subnet_network): msg = _("External subnet cannot overlap with T0 " "router cidr %s") % cidr LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _need_router_no_dnat_rules(self, subnet): # NAT is not supported for IPv6 return (subnet['ip_version'] == 4) def _need_router_snat_rules(self, context, router_id, subnet, gw_address_scope): # NAT is not supported for IPv6 if subnet['ip_version'] != 4: return False # if the subnets address scope is the same as the gateways: # no need for SNAT if gw_address_scope: subnet_address_scope = self._get_subnetpool_address_scope( context, subnet['subnetpool_id']) if (gw_address_scope == subnet_address_scope): LOG.info("No need for SNAT rule for router %(router)s " "and subnet %(subnet)s because they use the " "same address scope %(addr_scope)s.", {'router': router_id, 'subnet': subnet['id'], 'addr_scope': gw_address_scope}) return False return True def _get_mdproxy_port_name(self, net_name, net_id): return utils.get_name_and_uuid('%s-%s' % ('mdproxy', net_name or 'network'), net_id) def _create_net_mp_mdproxy_port(self, context, network, az, nsx_net_id): """Add MD proxy on the MP logical-switch by creating a logical port""" if (not self.nsxlib or not self._has_native_dhcp_metadata()): return is_ddi_network = self._is_ddi_supported_on_network( context, network['id'], network=network) if is_ddi_network: # Enable native metadata proxy for this network. tags = self.nsxlib.build_v3_tags_payload( network, resource_type='os-neutron-net-id', project_name=context.tenant_name) name = self._get_mdproxy_port_name(network['name'], network['id']) try: md_port = self.nsxlib.logical_port.create( nsx_net_id, az._native_md_proxy_uuid, tags=tags, name=name, attachment_type=nsxlib_consts.ATTACHMENT_MDPROXY) except nsx_lib_exc.ResourceNotFound: err_msg = (_('Logical switch %s or MD proxy %s do ' 'not exist') % (nsx_net_id, az._native_md_proxy_uuid)) LOG.error(err_msg) raise nsx_exc.NsxPluginException(err_msg=err_msg) LOG.debug("Created MD-Proxy logical port %(port)s " "for network %(network)s", {'port': md_port['id'], 'network': network['id']}) def _delete_nsx_port_by_network(self, network_id): if not self.nsxlib: return port_id = self.nsxlib.get_id_by_resource_and_tag( self.nsxlib.logical_port.resource_type, 'os-neutron-net-id', network_id) if port_id: self.nsxlib.logical_port.delete(port_id) def _validate_multiple_subnets_routers(self, context, router_id, net_id, subnet): network = self.get_network(context, net_id) # Unable to attach a trunked network to a router interface if cfg.CONF.vlan_transparent: if network.get('vlan_transparent') is True: err_msg = (_("Transparent VLAN networks cannot be attached to " "a logical router.")) LOG.error(err_msg) raise n_exc.InvalidInput(error_message=err_msg) intf_ports = self._get_network_interface_ports( context.elevated(), net_id) router_ids = [port['device_id'] for port in intf_ports if port['device_id']] if len(router_ids) > 0: err_msg = _("Only one subnet of each IP version in a network " "%(net_id)s can be attached to router, one subnet " "is already attached to router %(router_id)s") % { 'net_id': net_id, 'router_id': router_ids[0]} if router_id in router_ids: # We support 2 subnets from same net only for dual stack case if not subnet: # No IP provided on connected port LOG.error(err_msg) raise n_exc.InvalidInput(error_message=err_msg) for port in intf_ports: if port['device_id'] != router_id: continue if 'fixed_ips' in port and port['fixed_ips']: ex_subnet = self.get_subnet( context.elevated(), port['fixed_ips'][0]['subnet_id']) if ex_subnet['ip_version'] == subnet['ip_version']: # attach to the same router with same IP version LOG.error(err_msg) raise n_exc.InvalidInput(error_message=err_msg) else: # attach to multiple routers LOG.error(err_msg) raise l3_exc.RouterInterfaceAttachmentConflict(reason=err_msg) def _router_has_edge_fw_rules(self, context, router): if not router.gw_port_id: # No GW -> No rule on the edge firewall return False if self.fwaas_callbacks and self.fwaas_callbacks.fwaas_enabled: ports = self._get_router_interfaces(context, router.id) return self.fwaas_callbacks.router_with_fwg(context, ports) def _get_tz_restricted_vlans(self, tz_uuid): if not self.nsxlib: return [] restricted_vlans = [] # Get all transport nodes of this transport zone tns = self.nsxlib.transport_node.list()['results'] for tn in tns: # Check if it belongs to the current TZ tzs = [ep.get('transport_zone_id') for ep in tn.get('transport_zone_endpoints', [])] if tz_uuid not in tzs: continue if ('host_switch_spec' in tn and 'host_switches' in tn['host_switch_spec']): for hs in tn['host_switch_spec']['host_switches']: profile_attrs = hs.get('host_switch_profile_ids', []) for profile_attr in profile_attrs: if profile_attr['key'] == 'UplinkHostSwitchProfile': profile = self.nsxlib.host_switch_profiles.get( profile_attr['value']) vlan_id = profile.get('transport_vlan') if vlan_id: restricted_vlans.append(vlan_id) return restricted_vlans @api_replay_mode_wrapper def _create_floating_ip_wrapper(self, context, floatingip): initial_status = (constants.FLOATINGIP_STATUS_ACTIVE if floatingip['floatingip']['port_id'] else constants.FLOATINGIP_STATUS_DOWN) return super(NsxPluginV3Base, self).create_floatingip( context, floatingip, initial_status=initial_status) def _ensure_default_security_group(self, context, tenant_id): # NOTE(arosen): if in replay mode we'll create all the default # security groups for the user with their data so we don't # want this to be called. if not cfg.CONF.api_replay_mode: return super(NsxPluginV3Base, self)._ensure_default_security_group( context, tenant_id) def _handle_api_replay_default_sg(self, context, secgroup_db): """Set default api-replay migrated SG as default manually""" if (secgroup_db['name'] == 'default'): # this is a default security group copied from another cloud # Ugly patch! mark it as default manually with context.session.begin(subtransactions=True): try: default_entry = securitygroup_model.DefaultSecurityGroup( security_group_id=secgroup_db['id'], project_id=secgroup_db['project_id']) context.session.add(default_entry) except Exception as e: LOG.error("Failed to mark migrated security group %(id)s " "as default %(e)s", {'id': secgroup_db['id'], 'e': e}) class TagsCallbacks(object): target = oslo_messaging.Target( namespace=None, version='1.0') def __init__(self, **kwargs): super(TagsCallbacks, self).__init__() def info(self, ctxt, publisher_id, event_type, payload, metadata): # Make sure we catch only tags operations, and each one only once # tagging events look like 'tag.create/delete.start/end' if (event_type.startswith('tag.') and event_type.endswith('.end')): action = event_type.split('.')[1] is_delete = (action == 'delete') # Currently support only ports tags if payload.get('parent_resource') == 'ports': core_plugin = directory.get_plugin() port_id = payload.get('parent_resource_id') core_plugin.update_port_nsx_tags(ctxt, port_id, payload.get('tags'), is_delete=is_delete) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1982536 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/dvs/0000755000175000017500000000000000000000000021645 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/dvs/__init__.py0000644000175000017500000000000000000000000023744 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/dvs/dhcp.py0000644000175000017500000000543000000000000023137 0ustar00coreycorey00000000000000# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron.agent.common import ovs_lib from neutron.agent.linux import dhcp LOG = logging.getLogger(__name__) OPTS = [ cfg.StrOpt('dvs_integration_bridge', default='br-dvs', help=_('Name of Open vSwitch bridge to use for DVS networks')), cfg.StrOpt('dhcp_override_mac', help=_('Override the MAC address of the DHCP interface')), ] cfg.CONF.register_opts(OPTS) class DeviceManager(dhcp.DeviceManager): def plug(self, network, port, interface_name): mac_address = (cfg.CONF.dhcp_override_mac if cfg.CONF.dhcp_override_mac else port.mac_address) self.driver.plug(network.id, port.id, interface_name, mac_address, namespace=network.namespace, mtu=network.get('mtu'), bridge=cfg.CONF.dvs_integration_bridge) vlan_tag = getattr(network, 'provider:segmentation_id', None) # Treat vlans if vlan_tag and vlan_tag != 0: br_dvs = ovs_lib.OVSBridge(self.conf.dvs_integration_bridge) # When ovs_use_veth is set to True, the DEV_NAME_PREFIX # will be changed from 'tap' to 'ns-' in # OVSInterfaceDriver dvs_port_name = interface_name.replace('ns-', 'tap') br_dvs.set_db_attribute("Port", dvs_port_name, "tag", vlan_tag) def unplug(self, device_name, network): self.driver.unplug( device_name, bridge=cfg.CONF.dvs_integration_bridge, namespace=network.namespace) class Dnsmasq(dhcp.Dnsmasq): def __init__(self, conf, network, process_monitor, version=None, plugin=None): super(Dnsmasq, self).__init__(conf, network, process_monitor, version=version, plugin=plugin) # Using the DeviceManager that enables us to directly plug the OVS LOG.debug("Using the DVS DeviceManager") self.device_manager = DeviceManager(conf, plugin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/dvs/plugin.py0000644000175000017500000007120400000000000023521 0ustar00coreycorey00000000000000# Copyright 2012 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import external_net as enet_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings as pbin from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.api import validators from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import allowedaddresspairs as addr_exc from neutron_lib.exceptions import port_security as psec_exc from neutron_lib.plugins import utils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from neutron.api import extensions as neutron_extensions from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import dns_db from neutron.db import external_net_db from neutron.db import l3_db from neutron.db.models import securitygroup as securitygroup_model from neutron.db import models_v2 from neutron.db import portbindings_db from neutron.db import portsecurity_db from neutron.db import securitygroups_db from neutron.db import vlantransparent_db as vlan_ext_db from neutron.extensions import securitygroup as ext_sg from neutron.quota import resource_registry import vmware_nsx from vmware_nsx._i18n import _ from vmware_nsx.common import config # noqa from vmware_nsx.common import managers as nsx_managers from vmware_nsx.common import nsx_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsxv_db from vmware_nsx.dhcp_meta import modes as dhcpmeta_modes from vmware_nsx.dvs import dvs from vmware_nsx.dvs import dvs_utils from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common import plugin as nsx_plugin_common from vmware_nsx.plugins.nsx import utils as tvd_utils LOG = logging.getLogger(__name__) @resource_extend.has_resource_extenders class NsxDvsV2(addr_pair_db.AllowedAddressPairsMixin, agentschedulers_db.DhcpAgentSchedulerDbMixin, nsx_plugin_common.NsxPluginBase, dhcpmeta_modes.DhcpMetadataAccess, external_net_db.External_net_db_mixin, l3_db.L3_NAT_dbonly_mixin, portbindings_db.PortBindingMixin, portsecurity_db.PortSecurityDbMixin, securitygroups_db.SecurityGroupDbMixin, dns_db.DNSDbMixin, vlan_ext_db.Vlantransparent_db_mixin): supported_extension_aliases = [addr_apidef.ALIAS, pbin.ALIAS, enet_apidef.ALIAS, mpnet_apidef.ALIAS, psec.ALIAS, pnet.ALIAS, "quotas", l3_apidef.ALIAS, "security-group", vlan_apidef.ALIAS] __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=securitygroup_model.SecurityGroup, security_group_rule=securitygroup_model.SecurityGroupRule) def __init__(self): self._is_sub_plugin = tvd_utils.is_tvd_core_plugin() dvs_utils.dvs_register_exceptions() super(NsxDvsV2, self).__init__() if self._is_sub_plugin: extension_drivers = cfg.CONF.nsx_tvd.dvs_extension_drivers else: extension_drivers = cfg.CONF.nsx_extension_drivers self._extension_manager = nsx_managers.ExtensionManager( extension_drivers=extension_drivers) LOG.debug('Driver support: DVS: %s' % dvs_utils.dvs_is_enabled()) self._extension_manager.initialize() self.supported_extension_aliases.extend( self._extension_manager.extension_aliases()) neutron_extensions.append_api_extensions_path( [vmware_nsx.NSX_EXT_PATH]) self.cfg_group = 'dvs' # group name for dvs section in nsx.ini self._dvs = dvs.SingleDvsManager() self.setup_dhcpmeta_access() @staticmethod def plugin_type(): return projectpluginmap.NsxPlugins.DVS @staticmethod def is_tvd_plugin(): return False def plugin_extend_port_dict_binding(self, context, result): result[pbin.VIF_TYPE] = nsx_constants.VIF_TYPE_DVS if not result['id']: return db_vnic_type = nsxv_db.get_nsxv_ext_attr_port_vnic_type( context.session, result['id']) if db_vnic_type: result[pbin.VNIC_TYPE] = db_vnic_type else: result[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL result[pbin.VIF_DETAILS] = { # TODO(rkukura): Replace with new VIF security details # security-groups extension supported by this plugin pbin.CAP_PORT_FILTER: True} @staticmethod def _extend_port_dict_binding(result, portdb): result[pbin.VIF_TYPE] = nsx_constants.VIF_TYPE_DVS port_attr = portdb.get('nsx_port_attributes') if port_attr: result[pbin.VNIC_TYPE] = port_attr.vnic_type else: result[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL result[pbin.VIF_DETAILS] = { # TODO(rkukura): Replace with new VIF security details # security-groups extension supported by this plugin pbin.CAP_PORT_FILTER: True} def _extend_get_network_dict_provider(self, context, network, multiprovider=None, bindings=None): if not bindings: bindings = nsx_db.get_network_bindings(context.session, network['id']) if not multiprovider: multiprovider = nsx_db.is_multiprovider_network(context.session, network['id']) # With NSX plugin 'normal' overlay networks will have no binding # TODO(salvatore-orlando) make sure users can specify a distinct # phy_uuid as 'provider network' for STT net type if bindings: if not multiprovider: # network came in through provider networks api network[pnet.NETWORK_TYPE] = bindings[0].binding_type network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id else: # network come in though multiprovider networks api network[mpnet_apidef.SEGMENTS] = [ {pnet.NETWORK_TYPE: binding.binding_type, pnet.PHYSICAL_NETWORK: binding.phy_uuid, pnet.SEGMENTATION_ID: binding.vlan_id} for binding in bindings] def _dvs_get_id(self, net_data): if net_data['name'] == '': return net_data['id'] else: # Maximum name length is 80 characters. 'id' length is 36 # maximum prefix for name is 43 return '%s-%s' % (net_data['name'][:43], net_data['id']) def _add_port_group(self, dvs_id, net_data, vlan_tag, trunk_mode): if validators.is_attr_set(net_data.get(pnet.PHYSICAL_NETWORK)): dvs_name = net_data.get(pnet.PHYSICAL_NETWORK) dvs_moref = self._dvs.dvs.get_dvs_moref_by_name(dvs_name) self._dvs.dvs.add_port_group(dvs_moref, dvs_id, vlan_tag, trunk_mode=trunk_mode) else: dvs_name = dvs_utils.dvs_name_get() self._dvs.add_port_group(dvs_id, vlan_tag, trunk_mode=trunk_mode) return dvs_name def _get_portgroup_info(self, net_id): pg_info, dvpg_moref = self._dvs.dvs.get_port_group_info(None, net_id) return pg_info, dvpg_moref def _dvs_create_network(self, context, network): net_data = network['network'] if net_data['admin_state_up'] is False: LOG.warning("Network with admin_state_up=False are not yet " "supported by this plugin. Ignoring setting for " "network %s", net_data.get('name', '')) net_data['id'] = uuidutils.generate_uuid() vlan_tag = 0 if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.VLAN: vlan_tag = net_data.get(pnet.SEGMENTATION_ID, 0) trunk_mode = False # vlan transparent can be an object if not set. if net_data.get(vlan_apidef.VLANTRANSPARENT) is True: trunk_mode = True net_id = dvs_name = None if net_data.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.PORTGROUP: net_id = net_data.get(pnet.PHYSICAL_NETWORK) pg_info, dvpg_moref = self._get_portgroup_info(net_id) if pg_info.get('name') != net_data.get('name'): err_msg = (_("Portgroup name %(dvpg)s must match network " "name %(network)s") % {'dvpg': pg_info.get('name'), 'network': net_data.get('name')}) raise n_exc.InvalidInput(error_message=err_msg) dvs_id = dvpg_moref.value else: dvs_id = self._dvs_get_id(net_data) try: dvs_name = self._add_port_group(dvs_id, net_data, vlan_tag, trunk_mode=trunk_mode) except dvs_utils.DvsOperationBulkFault: LOG.warning('One or more hosts may not be configured') try: with db_api.CONTEXT_WRITER.using(context): new_net = super(NsxDvsV2, self).create_network(context, network) self._extension_manager.process_create_network( context, net_data, new_net) # Process port security extension self._process_network_port_security_create( context, net_data, new_net) # Process vlan transparent extension net_db = self._get_network(context, new_net['id']) net_db['vlan_transparent'] = trunk_mode net_data['vlan_transparent'] = trunk_mode resource_extend.apply_funcs('networks', net_data, net_db) nsx_db.add_network_binding( context.session, new_net['id'], net_data.get(pnet.NETWORK_TYPE), net_id or dvs_name, vlan_tag) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create network') if (net_data.get(pnet.NETWORK_TYPE) != c_utils.NetworkTypes.PORTGROUP): self._delete_port_group(dvs_id, dvs_name) new_net[pnet.NETWORK_TYPE] = net_data.get(pnet.NETWORK_TYPE) new_net[pnet.PHYSICAL_NETWORK] = net_id or dvs_name new_net[pnet.SEGMENTATION_ID] = vlan_tag # this extra lookup is necessary to get the # latest db model for the extension functions net_model = self._get_network(context, net_data['id']) resource_extend.apply_funcs('networks', new_net, net_model) self.handle_network_dhcp_access(context, new_net, action='create_network') return new_net def _validate_network(self, context, net_data): network_type = net_data.get(pnet.NETWORK_TYPE) network_type_set = validators.is_attr_set(network_type) segmentation_id = net_data.get(pnet.SEGMENTATION_ID) segmentation_id_set = validators.is_attr_set(segmentation_id) physical_network = net_data.get(pnet.PHYSICAL_NETWORK) physical_network_set = validators.is_attr_set(physical_network) if network_type == 'vlan': if not physical_network_set: physical_network = dvs_utils.dvs_name_get() bindings = nsx_db.get_network_bindings_by_vlanid_and_physical_net( context.session, segmentation_id, physical_network) if bindings: err_msg = _("Network with that dvs-id and vlan tag already " "exists") raise n_exc.InvalidInput(error_message=err_msg) if not context.is_admin: err_msg = _("Only an admin can create a DVS provider " "network") raise n_exc.InvalidInput(error_message=err_msg) external = net_data.get(enet_apidef.EXTERNAL) is_external_net = validators.is_attr_set(external) and external if is_external_net: err_msg = _("External network cannot be created with dvs based " "port groups") raise n_exc.InvalidInput(error_message=err_msg) err_msg = None if not network_type_set: err_msg = _("Network provider information must be " "specified") raise n_exc.InvalidInput(error_message=err_msg) if (network_type == c_utils.NetworkTypes.FLAT or network_type == c_utils.NetworkTypes.PORTGROUP): if segmentation_id_set: err_msg = (_("Segmentation ID cannot be specified with " "%s network type") % network_type) if (network_type == c_utils.NetworkTypes.PORTGROUP and not physical_network_set): err_msg = (_("Physical network must be specified with " "%s network type") % network_type) elif network_type == c_utils.NetworkTypes.VLAN: if not segmentation_id_set: err_msg = _("Segmentation ID must be specified with " "vlan network type") if (segmentation_id_set and not utils.is_valid_vlan_tag(segmentation_id)): err_msg = (_("%(segmentation_id)s out of range " "(%(min_id)s through %(max_id)s)") % {'segmentation_id': segmentation_id, 'min_id': constants.MIN_VLAN_TAG, 'max_id': constants.MAX_VLAN_TAG}) else: err_msg = (_("%(net_type_param)s %(net_type_value)s not " "supported") % {'net_type_param': pnet.NETWORK_TYPE, 'net_type_value': network_type}) if err_msg: raise n_exc.InvalidInput(error_message=err_msg) def create_network(self, context, network): self._validate_network(context, network['network']) return self._dvs_create_network(context, network) def _delete_port_group(self, dvs_id, dvs_name): if dvs_name == dvs_utils.dvs_name_get(): self._dvs.delete_port_group(dvs_id) else: dvs_moref = self._dvs.dvs.get_dvs_moref_by_name(dvs_name) self._dvs.dvs.delete_port_group(dvs_moref, dvs_id) def _dvs_delete_network(self, context, id): network = self._get_network(context, id) dvs_id = self._dvs_get_id(network) bindings = nsx_db.get_network_bindings(context.session, id) with db_api.CONTEXT_WRITER.using(context): nsx_db.delete_network_bindings(context.session, id) super(NsxDvsV2, self).delete_network(context, id) try: if (not bindings or bindings[0].binding_type != c_utils.NetworkTypes.PORTGROUP): dvs_name = bindings[0].phy_uuid self._delete_port_group(dvs_id, dvs_name) except Exception: LOG.exception('Unable to delete DVS port group %s', id) self.handle_network_dhcp_access(context, id, action='delete_network') def delete_network(self, context, id): self._dvs_delete_network(context, id) def _dvs_get_network(self, context, id, fields=None): with db_api.CONTEXT_READER.using(context): # goto to the plugin DB and fetch the network network = self._get_network(context, id) # Don't do field selection here otherwise we won't be able # to add provider networks fields net_result = self._make_network_dict(network, context=context) self._extend_get_network_dict_provider(context, net_result) return db_utils.resource_fields(net_result, fields) def _dvs_get_network_type(self, context, id, fields=None): net = self._dvs_get_network(context, id, fields=fields) return net[pnet.NETWORK_TYPE] def get_network(self, context, id, fields=None): return self._dvs_get_network(context, id, fields=None) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} with db_api.CONTEXT_READER.using(context): networks = ( super(NsxDvsV2, self).get_networks( context, filters, fields, sorts, limit, marker, page_reverse)) for net in networks: self._extend_get_network_dict_provider(context, net) return (networks if not fields else [db_utils.resource_fields(network, fields) for network in networks]) def update_network(self, context, id, network): net_attrs = network['network'] c_utils.raise_if_updates_provider_attributes(net_attrs) with db_api.CONTEXT_WRITER.using(context): net_res = super(NsxDvsV2, self).update_network(context, id, network) self._extension_manager.process_update_network(context, net_attrs, net_res) # Process port security extension self._process_network_port_security_update( context, net_attrs, net_res) self._extend_get_network_dict_provider(context, net_res) return net_res def _process_vnic_type(self, context, port_data, port_id): vnic_type = port_data.get(pbin.VNIC_TYPE) if validators.is_attr_set(vnic_type): if (vnic_type != pbin.VNIC_NORMAL and vnic_type != pbin.VNIC_DIRECT and vnic_type != pbin.VNIC_DIRECT_PHYSICAL): err_msg = _("Only direct, direct-physical and normal VNIC " "types supported") raise n_exc.InvalidInput(error_message=err_msg) nsxv_db.update_nsxv_port_ext_attributes( session=context.session, port_id=port_id, vnic_type=vnic_type) def create_port(self, context, port): # If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED # then we pass the port to the policy engine. The reason why we don't # pass the value to the policy engine when the port is # ATTR_NOT_SPECIFIED is for the case where a port is created on a # shared network that is not owned by the tenant. port_data = port['port'] network_type = self._dvs_get_network_type(context, port['port'][ 'network_id']) with db_api.CONTEXT_WRITER.using(context): # First we allocate port in neutron database neutron_db = super(NsxDvsV2, self).create_port(context, port) self._extension_manager.process_create_port( context, port_data, neutron_db) if network_type and network_type == 'vlan': # Not allowed to enable port security on vlan DVS ports port_data[psec.PORTSECURITY] = False else: port_security = self._get_network_security_binding( context, neutron_db['network_id']) port_data[psec.PORTSECURITY] = port_security self._process_port_port_security_create( context, port_data, neutron_db) # Update fields obtained from neutron db (eg: MAC address) port["port"].update(neutron_db) has_ip = self._ip_on_port(neutron_db) # security group extension checks if network_type and network_type != 'vlan': if has_ip: self._ensure_default_security_group_on_port(context, port) elif validators.is_attr_set(port_data.get( ext_sg.SECURITYGROUPS)): raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() if network_type and network_type == 'vlan': port_data[ext_sg.SECURITYGROUPS] = [] else: port_data[ext_sg.SECURITYGROUPS] = ( self._get_security_groups_on_port(context, port)) self._process_port_create_security_group( context, port_data, port_data[ext_sg.SECURITYGROUPS]) self._process_portbindings_create_and_update(context, port['port'], port_data) # allowed address pair checks if validators.is_attr_set(port_data.get( addr_apidef.ADDRESS_PAIRS)): if not port_security: raise addr_exc.AddressPairAndPortSecurityRequired() else: self._process_create_allowed_address_pairs( context, neutron_db, port_data[addr_apidef.ADDRESS_PAIRS]) else: # remove ATTR_NOT_SPECIFIED port_data[addr_apidef.ADDRESS_PAIRS] = [] self._process_portbindings_create_and_update(context, port['port'], port_data) self._process_vnic_type(context, port_data, neutron_db['id']) LOG.debug("create_port completed on NSX for tenant " "%(tenant_id)s: (%(id)s)", port_data) # DB Operation is complete, perform DVS operation port_data = port['port'] self.plugin_extend_port_dict_binding(context, port_data) self.handle_port_dhcp_access(context, port_data, action='create_port') return port_data def update_port(self, context, id, port): delete_addr_pairs = self._check_update_deletes_allowed_address_pairs( port) has_addr_pairs = self._check_update_has_allowed_address_pairs(port) with db_api.CONTEXT_WRITER.using(context): ret_port = super(NsxDvsV2, self).update_port( context, id, port) # Save current mac learning state to check whether it's # being updated or not # copy values over - except fixed_ips as # they've already been processed port['port'].pop('fixed_ips', None) ret_port.update(port['port']) # populate port_security setting, ignoring vlan network ports. network_type = self._dvs_get_network_type(context, ret_port['network_id']) if (psec.PORTSECURITY not in port['port'] and network_type != 'vlan'): ret_port[psec.PORTSECURITY] = self._get_port_security_binding( context, id) elif (network_type == 'vlan' and psec.PORTSECURITY in port['port'] and port['port'][psec.PORTSECURITY]): # Not allowed to enable port security on vlan DVS ports err_msg = _("Cannot enable port security on port %s") % id raise n_exc.InvalidInput(error_message=err_msg) # validate port security and allowed address pairs if not ret_port[psec.PORTSECURITY]: # has address pairs in request if has_addr_pairs: raise addr_exc.AddressPairAndPortSecurityRequired() elif not delete_addr_pairs: # check if address pairs are in db ret_port[addr_apidef.ADDRESS_PAIRS] = ( self.get_allowed_address_pairs(context, id)) if ret_port[addr_apidef.ADDRESS_PAIRS]: raise addr_exc.AddressPairAndPortSecurityRequired() if delete_addr_pairs or has_addr_pairs: # delete address pairs and read them in self._delete_allowed_address_pairs(context, id) self._process_create_allowed_address_pairs( context, ret_port, ret_port[addr_apidef.ADDRESS_PAIRS]) if psec.PORTSECURITY in port['port']: if network_type != 'vlan': self._process_port_port_security_update( context, port['port'], ret_port) else: ret_port[psec.PORTSECURITY] = False self._process_vnic_type(context, port['port'], id) LOG.debug("Updating port: %s", port) self._extension_manager.process_update_port( context, port['port'], ret_port) self._process_portbindings_create_and_update(context, port['port'], ret_port) return ret_port def delete_port(self, context, id, l3_port_check=True, nw_gw_port_check=True): """Deletes a port on a specified Virtual Network. If the port contains a remote interface attachment, the remote interface is first un-plugged and then the port is deleted. :returns: None :raises: exception.PortInUse :raises: exception.PortNotFound :raises: exception.NetworkNotFound """ neutron_db_port = self.get_port(context, id) with db_api.CONTEXT_WRITER.using(context): # metadata_dhcp_host_route self.handle_port_metadata_access( context, neutron_db_port, is_delete=True) super(NsxDvsV2, self).delete_port(context, id) self.handle_port_dhcp_access( context, neutron_db_port, action='delete_port') def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} with db_api.CONTEXT_READER.using(context): ports = ( super(NsxDvsV2, self).get_ports( context, filters, fields, sorts, limit, marker, page_reverse)) self._log_get_ports(ports, filters) # Add port extensions for port in ports: self.plugin_extend_port_dict_binding(context, port) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports]) def get_port(self, context, id, fields=None): port = super(NsxDvsV2, self).get_port(context, id, fields=None) self.plugin_extend_port_dict_binding(context, port) return db_utils.resource_fields(port, fields) def create_router(self, context, router): # DVS backend cannot support logical router msg = (_("Unable to create router %s with DVS") % router['router']['name']) raise n_exc.BadRequest(resource="router", msg=msg) def get_network_availability_zones(self, net_db): """Api to comply with the NSX-TVD plugin""" return [] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1982536 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx/0000755000175000017500000000000000000000000021661 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx/__init__.py0000644000175000017500000000000000000000000023760 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx/plugin.py0000644000175000017500000013411000000000000023531 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from neutron_lib.api.definitions import network as net_def from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_context from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db.availability_zone import router as router_az_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db from neutron.db import extraroute_db from neutron.db import l3_db from neutron.db import l3_gwmode_db from neutron.db.models import l3 as l3_db_models from neutron.db.models import securitygroup as securitygroup_model from neutron.db import models_v2 from neutron.db import portsecurity_db from neutron.db import securitygroups_db from neutron.quota import resource_registry from neutron_lib.api import validators from neutron_lib import exceptions as n_exc from vmware_nsx.common import availability_zones as nsx_com_az from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.common import managers as nsx_managers from vmware_nsx.common import utils as com_utils from vmware_nsx.db import ( routertype as rt_rtr) from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsx_portbindings_db as pbin_db from vmware_nsx.extensions import advancedserviceproviders as as_providers from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common import plugin as nsx_plugin_common from vmware_nsx.plugins.dvs import plugin as dvs from vmware_nsx.plugins.nsx_v import plugin as v from vmware_nsx.plugins.nsx_v3 import plugin as t from vmware_nsx.services.lbaas.octavia import octavia_listener from vmware_nsx.services.lbaas.octavia import tvd_wrapper as octavia_tvd LOG = logging.getLogger(__name__) TVD_PLUGIN_TYPE = "Nsx-TVD" @resource_extend.has_resource_extenders class NsxTVDPlugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin, addr_pair_db.AllowedAddressPairsMixin, agents_db.AgentDbMixin, nsx_plugin_common.NsxPluginBase, rt_rtr.RouterType_mixin, external_net_db.External_net_db_mixin, extraroute_db.ExtraRoute_db_mixin, extradhcpopt_db.ExtraDhcpOptMixin, router_az_db.RouterAvailabilityZoneMixin, l3_gwmode_db.L3_NAT_db_mixin, pbin_db.NsxPortBindingMixin, portsecurity_db.PortSecurityDbMixin, securitygroups_db.SecurityGroupDbMixin, nsx_com_az.NSXAvailabilityZonesPluginCommon, projectpluginmap.ProjectPluginMapPluginBase): supported_extension_aliases = [projectpluginmap.ALIAS] __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=securitygroup_model.SecurityGroup, security_group_rule=securitygroup_model.SecurityGroupRule, router=l3_db_models.Router, floatingip=l3_db_models.FloatingIP) def __init__(self): self._extension_manager = nsx_managers.ExtensionManager() LOG.info("Start NSX TVD Plugin") self.init_is_complete = False # Validate configuration config.validate_nsx_config_options() super(NsxTVDPlugin, self).__init__() # init the different supported plugins self.init_plugins() # init the extensions supported by any of the plugins self.init_extensions() self._unsubscribe_callback_events() registry.subscribe(self.spawn_complete, resources.PROCESS, events.AFTER_SPAWN) registry.subscribe(self.init_complete, resources.PROCESS, events.AFTER_INIT) @staticmethod def plugin_type(): return TVD_PLUGIN_TYPE @staticmethod def is_tvd_plugin(): return True @com_utils.retry_upon_exception(Exception, 0.5, 2, cfg.CONF.nsx_tvd.init_retries) def _call_plugin_init_with_retry(self, map_type, plugin_class): try: self.plugins[map_type] = plugin_class() except Exception as e: with excutils.save_and_reraise_exception(): LOG.warning("%s plugin failed to initialized: %s", map_type.upper(), e) def _init_plugin(self, map_type, plugin_class): if map_type not in cfg.CONF.nsx_tvd.enabled_plugins: # skip this plugin LOG.info("%s plugin was not enabled by the configuration", map_type.upper()) return try: self._call_plugin_init_with_retry(map_type, plugin_class) except Exception as e: LOG.warning("%s plugin will not be supported", map_type.upper()) if map_type == self.default_plugin: msg = (_("The default plugin %(def)s failed to start. " "Reason: %(reason)s") % {'def': self.default_plugin, 'reason': e}) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) else: LOG.info("%s plugin will be supported", map_type.upper()) def init_plugins(self): # initialize all supported plugins self.plugins = {} self.as_providers = {} # update the default plugin for new projects self.default_plugin = cfg.CONF.nsx_tvd.default_plugin plugins = [(projectpluginmap.NsxPlugins.NSX_T, t.NsxV3Plugin), (projectpluginmap.NsxPlugins.NSX_V, v.NsxVPluginV2), (projectpluginmap.NsxPlugins.DVS, dvs.NsxDvsV2)] for (map_type, plugin_class) in plugins: self._init_plugin(map_type, plugin_class) if not len(self.plugins): msg = _("No active plugins were found") raise nsx_exc.NsxPluginException(err_msg=msg) for k, val in self.plugins.items(): if as_providers.ALIAS in val.supported_extension_aliases: self.as_providers[k] = val LOG.info("NSX-TVD plugin will use %s as the default plugin", self.default_plugin) # validate the availability zones configuration self.init_availability_zones() def get_plugin_by_type(self, plugin_type): return self.plugins.get(plugin_type) def init_extensions(self): # Support all the extensions supported by any of the plugins extensions = [] for plugin in self.plugins: extensions.extend(self.plugins[plugin].supported_extension_aliases) self.supported_extension_aliases.extend(list(set(extensions))) # mark extensions which are supported by only one of the plugins self._unsupported_fields = {} for plugin in self.plugins: # TODO(asarfaty): add other resources here plugin_type = self.plugins[plugin].plugin_type() self._unsupported_fields[plugin_type] = {'router': [], 'port': [], 'security_group': []} # router size and type are supported only by the V plugin if plugin_type in [t.NsxV3Plugin.plugin_type(), dvs.NsxDvsV2.plugin_type()]: self._unsupported_fields[plugin_type]['router'] = [ 'router_size', 'router_type'] # port mac learning, and provider sg are not supported by # the dvs plugin if plugin_type in [dvs.NsxDvsV2.plugin_type()]: self._unsupported_fields[plugin_type]['port'] = [ 'mac_learning_enabled', 'provider_security_groups'] # security group policy can be supported only by nsx-v if plugin_type in [t.NsxV3Plugin.plugin_type(), dvs.NsxDvsV2.plugin_type()]: self._unsupported_fields[plugin_type]['security_group'] = [ 'policy'] def init_availability_zones(self): # Make sure there are no overlaps between v/t availability zones if (self.plugins.get(projectpluginmap.NsxPlugins.NSX_V) and self.plugins.get(projectpluginmap.NsxPlugins.NSX_T) and bool(set(cfg.CONF.nsxv.availability_zones) & set(cfg.CONF.nsx_v3.availability_zones))): msg = _("Cannot use the same availability zones in NSX-V and T") raise nsx_exc.NsxPluginException(err_msg=msg) def _get_octavia_objects(self, plugin_type): plugin = self.get_plugin_by_type(plugin_type) if plugin: return plugin._get_octavia_objects() else: return {'loadbalancer': None, 'listener': None, 'pool': None, 'member': None, 'healthmonitor': None, 'l7policy': None, 'l7rule': None} def init_complete(self, resource, event, trigger, payload=None): with locking.LockManager.get_lock('plugin-init-complete-tvd'): if self.init_is_complete: # Should be called only once per worker return self.init_octavia() self.init_is_complete = True def init_octavia(self): # Init Octavia listener and endpoints v_objects = self._get_octavia_objects( projectpluginmap.NsxPlugins.NSX_V) t_objects = self._get_octavia_objects( projectpluginmap.NsxPlugins.NSX_T) self.octavia_listener = octavia_listener.NSXOctaviaListener( loadbalancer=octavia_tvd.OctaviaTVDWrapper( v_objects['loadbalancer'], t_objects['loadbalancer']), listener=octavia_tvd.OctaviaTVDWrapper( v_objects['listener'], t_objects['listener']), pool=octavia_tvd.OctaviaTVDWrapper( v_objects['pool'], t_objects['pool']), member=octavia_tvd.OctaviaTVDWrapper( v_objects['member'], t_objects['member']), healthmonitor=octavia_tvd.OctaviaTVDWrapper( v_objects['healthmonitor'], t_objects['healthmonitor']), l7policy=octavia_tvd.OctaviaTVDWrapper( v_objects['l7policy'], t_objects['l7policy']), l7rule=octavia_tvd.OctaviaTVDWrapper( v_objects['l7rule'], t_objects['l7rule'])) def spawn_complete(self, resource, event, trigger, payload=None): # This method should run only once, but after init_complete if not self.init_is_complete: self.init_complete(None, None, None) self.init_octavia_stats_collector() def init_octavia_stats_collector(self): self.octavia_stats_collector = ( octavia_listener.NSXOctaviaStatisticsCollector( self, octavia_tvd.stats_getter)) def start_rpc_listeners(self): # Run the start_rpc_listeners of one of the sub-plugins for plugin_type in self.plugins: plugin = self.plugins[plugin_type] if plugin.rpc_workers_supported(): return plugin.start_rpc_listeners() def _unsubscribe_callback_events(self): # unsubscribe the callback that should be called on all plugins # other that NSX-T. registry.unsubscribe_all( l3_db.L3_NAT_dbonly_mixin._prevent_l3_port_delete_callback) # Instead we will subscribe our internal callback. registry.subscribe(self._prevent_l3_port_delete_callback, resources.PORT, events.BEFORE_DELETE) @staticmethod def _prevent_l3_port_delete_callback(resource, event, trigger, payload=None): """Register a callback to replace the default one This callback will prevent port deleting only if the port plugin is not NSX-T (in NSX-T plugin it was already handled) """ context = payload.context port_id = payload.resource_id port_check = payload.metadata['port_check'] l3plugin = directory.get_plugin(plugin_constants.L3) if l3plugin and port_check: # if not nsx-t - call super code core_plugin = directory.get_plugin() db_port = core_plugin._get_port(context, port_id) p = core_plugin._get_plugin_from_net_id( context, db_port['network_id']) if p.plugin_type() != projectpluginmap.NsxPlugins.NSX_T: l3plugin.prevent_l3_port_deletion(context, port_id) def _validate_obj_extensions(self, data, plugin_type, obj_type): """prevent configuration of unsupported extensions""" for field in self._unsupported_fields[plugin_type][obj_type]: if validators.is_attr_set(data.get(field)): err_msg = (_('Can not support %(field)s extension for ' '%(obj_type)s %(p)s plugin') % { 'field': field, 'obj_type': obj_type, 'p': plugin_type}) raise n_exc.InvalidInput(error_message=err_msg) def _cleanup_obj_fields(self, data, plugin_type, obj_type): """Remove data of unsupported extensions""" for field in self._unsupported_fields[plugin_type][obj_type]: if field in data: del data[field] def _list_availability_zones(self, context, filters=None): p = self._get_plugin_for_request(context, filters) if p: return p._list_availability_zones(context, filters=filters) return [] def validate_availability_zones(self, context, resource_type, availability_zones): p = self._get_plugin_from_project(context, context.project_id) return p.validate_availability_zones(context, resource_type, availability_zones) def _get_plugin_from_net_id(self, context, net_id): # get the network using the super plugin - here we use the # _get_network (so as not to call the make dict method) network = self._get_network(context, net_id) return self._get_plugin_from_project(context, network['tenant_id']) def get_network_availability_zones(self, net_db): ctx = n_context.get_admin_context() p = self._get_plugin_from_project(ctx, net_db['tenant_id']) return p.get_network_availability_zones(net_db) def create_network(self, context, network): net_data = network['network'] tenant_id = net_data['tenant_id'] self._ensure_default_security_group(context, tenant_id) p = self._get_plugin_from_project(context, tenant_id) return p.create_network(context, network) @db_api.retry_if_session_inactive() def create_network_bulk(self, context, networks): #Implement create bulk so that the plugin calculation will be done once objects = [] items = networks['networks'] # look at the first network to find out the project & plugin net_data = items[0]['network'] tenant_id = net_data['tenant_id'] self._ensure_default_security_group(context, tenant_id) p = self._get_plugin_from_project(context, tenant_id) # create all networks one by one try: with db_api.CONTEXT_WRITER.using(context): for item in items: objects.append(p.create_network(context, item)) except Exception: with excutils.save_and_reraise_exception(): LOG.error("An exception occurred while creating " "the networks:%(item)s", {'item': item}) return objects def delete_network(self, context, id): p = self._get_plugin_from_net_id(context, id) p.delete_network(context, id) def get_network(self, context, id, fields=None): p = self._get_plugin_from_net_id(context, id) return p.get_network(context, id, fields=fields) def _get_plugin_for_request(self, context, filters, keys=None): project_id = context.project_id if filters: if filters.get('tenant_id'): project_id = filters.get('tenant_id') elif filters.get('project_id'): project_id = filters.get('project_id') else: # we have specific filters on the request. If those are # specific enough, we should not filter by project if filters.get('id'): return if keys: for key in keys: if filters.get(key): return # If there are multiple tenants/projects being requested then # we will not filter according to the plugin if isinstance(project_id, list): return return self._get_plugin_from_project(context, project_id) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters, keys=['shared']) filters = filters or {} with db_api.CONTEXT_READER.using(context): networks = ( super(NsxTVDPlugin, self).get_networks( context, filters, fields, sorts, limit, marker, page_reverse)) for net in networks[:]: p = self._get_plugin_from_project(context, net['tenant_id']) if p == req_p or req_p is None: p._extend_get_network_dict_provider(context, net) else: networks.remove(net) return (networks if not fields else [db_utils.resource_fields(network, fields) for network in networks]) def update_network(self, context, id, network): p = self._get_plugin_from_net_id(context, id) return p.update_network(context, id, network) def create_port(self, context, port): net_id = port['port']['network_id'] p = self._get_plugin_from_net_id(context, net_id) self._validate_obj_extensions( port['port'], p.plugin_type(), 'port') new_port = p.create_port(context, port) self._cleanup_obj_fields( new_port, p.plugin_type(), 'port') return new_port def update_port(self, context, id, port): db_port = self._get_port(context, id) p = self._get_plugin_from_net_id(context, db_port['network_id']) self._validate_obj_extensions( port['port'], p.plugin_type(), 'port') return p.update_port(context, id, port) def delete_port(self, context, id, **kwargs): db_port = self._get_port(context, id) p = self._get_plugin_from_net_id(context, db_port['network_id']) p.delete_port(context, id, **kwargs) def get_port(self, context, id, fields=None): db_port = self._get_port(context, id) p = self._get_plugin_from_net_id(context, db_port['network_id']) port = p.get_port(context, id, fields=fields) self._cleanup_obj_fields( port, p.plugin_type(), 'port') return port def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters, keys=['device_id', 'network_id', 'fixed_ips']) filters = filters or {} with db_api.CONTEXT_READER.using(context): ports = ( super(NsxTVDPlugin, self).get_ports( context, filters, fields, sorts, limit, marker, page_reverse)) # Add port extensions for port in ports[:]: port_model = None if 'id' in port: port_model = self._get_port(context, port['id']) resource_extend.apply_funcs('ports', port, port_model) p = self._get_plugin_from_net_id(context, port['network_id']) if p == req_p or req_p is None: if hasattr(p, '_extend_get_port_dict_qos_and_binding'): p._extend_get_port_dict_qos_and_binding(context, port) else: if not port_model: port_model = port p._extend_port_dict_binding(port, port_model) if hasattr(p, '_remove_provider_security_groups_from_list'): p._remove_provider_security_groups_from_list(port) self._cleanup_obj_fields( port, p.plugin_type(), 'port') else: ports.remove(port) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports]) def _get_subnet_plugin_by_id(self, context, subnet_id): db_subnet = self._get_subnet(context, subnet_id) return self._get_plugin_from_net_id(context, db_subnet['network_id']) def get_subnet(self, context, id, fields=None): p = self._get_subnet_plugin_by_id(context, id) return p.get_subnet(context, id, fields=fields) def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Check if we need to invoke metadata search. Here we are unable to # filter according to projects as this is from the nova api service # so we invoke on all plugins that support this extension if ((fields and as_providers.ADV_SERVICE_PROVIDERS in fields) or (filters and filters.get(as_providers.ADV_SERVICE_PROVIDERS))): for plugin in self.as_providers.values(): f = copy.copy(filters) subnets = plugin.get_subnets(context, filters=f, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) if subnets: return subnets return [] else: # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) filters = filters or {} subnets = super(NsxTVDPlugin, self).get_subnets( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for subnet in subnets[:]: p = self._get_plugin_from_project(context, subnet['tenant_id']) if req_p and p != req_p: subnets.remove(subnet) return subnets def delete_subnet(self, context, id): p = self._get_subnet_plugin_by_id(context, id) p.delete_subnet(context, id) def _get_subnet_plugin(self, context, subnet_data): # get the plugin of the associated network net_id = subnet_data['network_id'] net_plugin = self._get_plugin_from_net_id(context, net_id) # make sure it matches the plugin of the current tenant tenant_id = subnet_data['tenant_id'] tenant_plugin = self._get_plugin_from_project(context, tenant_id) if tenant_plugin.plugin_type() != net_plugin.plugin_type(): err_msg = (_('Subnet should belong to the %s plugin ' 'as the network') % net_plugin.plugin_type()) raise n_exc.InvalidInput(error_message=err_msg) return net_plugin def create_subnet(self, context, subnet): p = self._get_subnet_plugin(context, subnet['subnet']) return p.create_subnet(context, subnet) def create_subnet_bulk(self, context, subnets): # look at the first subnet to find out the project & plugin items = subnets['subnets'] p = self._get_subnet_plugin(context, items[0]['subnet']) return p.create_subnet_bulk(context, subnets) def update_subnet(self, context, id, subnet): p = self._get_subnet_plugin_by_id(context, id) return p.update_subnet(context, id, subnet) def get_router_availability_zones(self, router): ctx = n_context.get_admin_context() p = self._get_plugin_from_project(ctx, router['tenant_id']) return p.get_router_availability_zones(router) def _validate_router_gw_plugin(self, context, router_plugin, gw_info): if gw_info and gw_info.get('network_id'): net_plugin = self._get_plugin_from_net_id( context, gw_info['network_id']) if net_plugin.plugin_type() != router_plugin.plugin_type(): err_msg = (_('Router gateway should belong to the %s plugin ' 'as the router') % router_plugin.plugin_type()) raise n_exc.InvalidInput(error_message=err_msg) def _validate_router_interface_plugin(self, context, router_plugin, interface_info): is_port, is_sub = self._validate_interface_info(interface_info) if is_port: net_id = self._get_port( context, interface_info['port_id'])['network_id'] elif is_sub: net_id = self._get_subnet( context, interface_info['subnet_id'])['network_id'] net_plugin = self._get_plugin_from_net_id(context, net_id) if net_plugin.plugin_type() != router_plugin.plugin_type(): err_msg = (_('Router interface should belong to the %s plugin ' 'as the router') % router_plugin.plugin_type()) raise n_exc.InvalidInput(error_message=err_msg) def _get_plugin_from_router_id(self, context, router_id): # get the router using the super plugin - here we use the # _get_router (so as not to call the make dict method) router = self._get_router(context, router_id) return self._get_plugin_from_project(context, router['tenant_id']) def create_router(self, context, router): tenant_id = router['router']['tenant_id'] self._ensure_default_security_group(context, tenant_id) p = self._get_plugin_from_project(context, tenant_id) self._validate_router_gw_plugin(context, p, router['router'].get( 'external_gateway_info')) self._validate_obj_extensions( router['router'], p.plugin_type(), 'router') new_router = p.create_router(context, router) self._cleanup_obj_fields( new_router, p.plugin_type(), 'router') return new_router def update_router(self, context, router_id, router): p = self._get_plugin_from_router_id(context, router_id) self._validate_router_gw_plugin(context, p, router['router'].get( 'external_gateway_info')) self._validate_obj_extensions( router['router'], p.plugin_type(), 'router') return p.update_router(context, router_id, router) def get_router(self, context, id, fields=None): p = self._get_plugin_from_router_id(context, id) router = p.get_router(context, id, fields=fields) self._cleanup_obj_fields(router, p.plugin_type(), 'router') return router def delete_router(self, context, id): p = self._get_plugin_from_router_id(context, id) p.delete_router(context, id) def add_router_interface(self, context, router_id, interface_info): p = self._get_plugin_from_router_id(context, router_id) self._validate_router_interface_plugin(context, p, interface_info) return p.add_router_interface(context, router_id, interface_info) def remove_router_interface(self, context, router_id, interface_info): p = self._get_plugin_from_router_id(context, router_id) return p.remove_router_interface(context, router_id, interface_info) def _validate_fip_router_plugin(self, context, fip_plugin, fip_data): if 'router_id' in fip_data: router_plugin = self._get_plugin_from_router_id( context, fip_data['router_id']) if router_plugin.plugin_type() != fip_plugin.plugin_type(): err_msg = (_('Floatingip router should belong to the %s ' 'plugin as the floatingip') % fip_plugin.plugin_type()) raise n_exc.InvalidInput(error_message=err_msg) def get_routers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) routers = super(NsxTVDPlugin, self).get_routers( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for router in routers[:]: p = self._get_plugin_from_project(context, router['tenant_id']) if req_p and p != req_p: routers.remove(router) return routers def create_floatingip(self, context, floatingip): net_id = floatingip['floatingip']['floating_network_id'] p = self._get_plugin_from_net_id(context, net_id) self._validate_fip_router_plugin(context, p, floatingip['floatingip']) return p.create_floatingip(context, floatingip) def update_floatingip(self, context, id, floatingip): fip = self._get_floatingip(context, id) net_id = fip['floating_network_id'] p = self._get_plugin_from_net_id(context, net_id) self._validate_fip_router_plugin(context, p, floatingip['floatingip']) return p.update_floatingip(context, id, floatingip) def delete_floatingip(self, context, id): fip = self._get_floatingip(context, id) net_id = fip['floating_network_id'] p = self._get_plugin_from_net_id(context, net_id) return p.delete_floatingip(context, id) def get_floatingip(self, context, id, fields=None): fip = self._get_floatingip(context, id) net_id = fip['floating_network_id'] p = self._get_plugin_from_net_id(context, net_id) return p.get_floatingip(context, id, fields=fields) def get_floatingips(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters, keys=['port_id']) fips = super(NsxTVDPlugin, self).get_floatingips( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for fip in fips[:]: p = self._get_plugin_from_project(context, fip['tenant_id']) if req_p and p != req_p: fips.remove(fip) return fips def disassociate_floatingips(self, context, port_id): db_port = self._get_port(context, port_id) p = self._get_plugin_from_net_id(context, db_port['network_id']) return p.disassociate_floatingips(context, port_id) def _get_plugin_from_sg_id(self, context, sg_id): sg = self._get_security_group(context, sg_id) return self._get_plugin_from_project(context, sg['tenant_id']) def create_security_group(self, context, security_group, default_sg=False): if not default_sg: secgroup = security_group['security_group'] tenant_id = secgroup['tenant_id'] self._ensure_default_security_group(context, tenant_id) p = self._get_plugin_from_project(context, context.project_id) self._validate_obj_extensions( security_group['security_group'], p.plugin_type(), 'security_group') new_sg = p.create_security_group(context, security_group, default_sg=default_sg) self._cleanup_obj_fields( new_sg, p.plugin_type(), 'security_group') return new_sg def delete_security_group(self, context, id): p = self._get_plugin_from_sg_id(context, id) p.delete_security_group(context, id) def update_security_group(self, context, id, security_group): p = self._get_plugin_from_sg_id(context, id) self._validate_obj_extensions( security_group['security_group'], p.plugin_type(), 'security_group') return p.update_security_group(context, id, security_group) def get_security_group(self, context, id, fields=None): p = self._get_plugin_from_sg_id(context, id) sg = p.get_security_group(context, id, fields=fields) self._cleanup_obj_fields( sg, p.plugin_type(), 'security_group') return sg def get_security_groups(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False, default_sg=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) sgs = super(NsxTVDPlugin, self).get_security_groups( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse, default_sg=default_sg) for sg in sgs[:]: p = self._get_plugin_from_project(context, sg['tenant_id']) if req_p and p != req_p: sgs.remove(sg) return sgs def create_security_group_rule_bulk(self, context, security_group_rules): p = self._get_plugin_from_project(context, context.project_id) return p.create_security_group_rule_bulk(context, security_group_rules) def create_security_group_rule(self, context, security_group_rule): p = self._get_plugin_from_project(context, context.project_id) return p.create_security_group_rule(context, security_group_rule) def delete_security_group_rule(self, context, id): rule_db = self._get_security_group_rule(context, id) sg_id = rule_db['security_group_id'] p = self._get_plugin_from_sg_id(context, sg_id) p.delete_security_group_rule(context, id) def get_security_group_rules(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) rules = super(NsxTVDPlugin, self).get_security_group_rules( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for rule in rules[:]: p = self._get_plugin_from_project(context, rule['tenant_id']) if req_p and p != req_p: rules.remove(rule) return rules @staticmethod @resource_extend.extends([net_def.COLLECTION_NAME]) def _ext_extend_network_dict(result, netdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() p = plugin._get_plugin_from_project(ctx, netdb['tenant_id']) with db_api.CONTEXT_WRITER.using(ctx): p._extension_manager.extend_network_dict( ctx.session, netdb, result) @staticmethod @resource_extend.extends([port_def.COLLECTION_NAME]) def _ext_extend_port_dict(result, portdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() p = plugin._get_plugin_from_project(ctx, portdb['tenant_id']) with db_api.CONTEXT_WRITER.using(ctx): p._extension_manager.extend_port_dict( ctx.session, portdb, result) @staticmethod @resource_extend.extends([subnet_def.COLLECTION_NAME]) def _ext_extend_subnet_dict(result, subnetdb): ctx = n_context.get_admin_context() # get the core plugin as this is a static method with no 'self' plugin = directory.get_plugin() p = plugin._get_plugin_from_project(ctx, subnetdb['tenant_id']) with db_api.CONTEXT_WRITER.using(ctx): p._extension_manager.extend_subnet_dict( ctx.session, subnetdb, result) def _get_project_plugin_dict(self, data): return {'id': data['project'], 'project': data['project'], 'plugin': data['plugin'], 'tenant_id': data['project']} def create_project_plugin_map(self, context, project_plugin_map, internal=False): data = project_plugin_map['project_plugin_map'] # validations: # 1. validate it doesn't already exist if nsx_db.get_project_plugin_mapping( context.session, data['project']): raise projectpluginmap.ProjectPluginAlreadyExists( project_id=data['project']) if not internal: # 2. only admin user is allowed if not context.is_admin: raise projectpluginmap.ProjectPluginAdminOnly() # 3. Validate the project id # TODO(asarfaty): Validate project id exists in keystone if not uuidutils.is_uuid_like(data['project']): raise projectpluginmap.ProjectPluginIllegalId( project_id=data['project']) # 4. Check that plugin is available if data['plugin'] not in self.plugins: raise projectpluginmap.ProjectPluginNotAvailable( plugin=data['plugin']) # Add the entry to the DB and return it LOG.info("Adding mapping between project %(project)s and plugin " "%(plugin)s", {'project': data['project'], 'plugin': data['plugin']}) nsx_db.add_project_plugin_mapping(context.session, data['project'], data['plugin']) return self._get_project_plugin_dict(data) def get_project_plugin_map(self, context, id, fields=None): data = nsx_db.get_project_plugin_mapping(context.session, id) if data: return self._get_project_plugin_dict(data) else: raise n_exc.ObjectNotFound(id=id) def get_project_plugin_maps(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # TODO(asarfaty) filter the results mappings = nsx_db.get_project_plugin_mappings(context.session) return [self._get_project_plugin_dict(data) for data in mappings] def get_plugin_type_from_project(self, context, project_id): """Get the correct plugin type for this project. Look for the project in the DB. If not there - add an entry with the default plugin """ plugin_type = self.default_plugin if not project_id: # if the project_id is empty - return the default one and do not # add to db (used by admin context to get actions) return plugin_type mapping = nsx_db.get_project_plugin_mapping( context.session, project_id) if mapping: plugin_type = mapping['plugin'] else: # add a new entry with the default plugin try: self.create_project_plugin_map( context, {'project_plugin_map': {'plugin': plugin_type, 'project': project_id}}, internal=True) except projectpluginmap.ProjectPluginAlreadyExists: # Maybe added by another thread pass if not self.plugins.get(plugin_type): msg = (_("Cannot use unsupported plugin %(plugin)s for project " "%(project)s") % {'plugin': plugin_type, 'project': project_id}) raise nsx_exc.NsxPluginException(err_msg=msg) LOG.debug("Using %s plugin for project %s", plugin_type, project_id) return plugin_type def _get_plugin_from_project(self, context, project_id): """Get the correct plugin for this project. Look for the project in the DB. If not there - add an entry with the default plugin """ plugin_type = self.get_plugin_type_from_project(context, project_id) return self.plugins[plugin_type] def get_housekeeper(self, context, name, fields=None): p = self._get_plugin_from_project(context, context.project_id) if hasattr(p, 'housekeeper'): return p.housekeeper.get(name) msg = _("Housekeeper %s not found") % name raise nsx_exc.NsxPluginException(err_msg=msg) def get_housekeepers(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): p = self._get_plugin_for_request(context, filters) if p and hasattr(p, 'housekeeper'): return p.housekeeper.list() return [] def update_housekeeper(self, context, name, housekeeper): p = self._get_plugin_from_project(context, context.project_id) if hasattr(p, 'housekeeper'): p.housekeeper.run(context, name) return p.housekeeper.get(name) def get_address_scopes(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) address_scopes = super(NsxTVDPlugin, self).get_address_scopes( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for address_scope in address_scopes[:]: p = self._get_plugin_from_project(context, address_scope['tenant_id']) if req_p and p != req_p: address_scopes.remove(address_scope) return address_scopes def get_subnetpools(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Read project plugin to filter relevant projects according to # plugin req_p = self._get_plugin_for_request(context, filters) pools = super(NsxTVDPlugin, self).get_subnetpools( context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) for pool in pools[:]: p = self._get_plugin_from_project(context, pool['tenant_id']) if req_p and p != req_p: pools.remove(pool) return pools def get_nsx_policy(self, context, id, fields=None): # Extension supported only by the nsxv plugin p = self._get_plugin_from_project(context, context.project_id) if p.plugin_type() != v.NsxVPluginV2.plugin_type(): err_msg = (_('Can not support %(field)s extension for ' '%(p)s plugin') % { 'field': 'nsx-policy', 'p': p.plugin_type()}) raise n_exc.InvalidInput(error_message=err_msg) return p.get_nsx_policy(context, id, fields=fields) def get_nsx_policies(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): # Extension supported only by the nsxv plugin p = self._get_plugin_from_project(context, context.project_id) if p.plugin_type() != v.NsxVPluginV2.plugin_type(): return [] return p.get_nsx_policies(context, filters=filters, fields=fields, sorts=sorts, limit=limit, marker=marker, page_reverse=page_reverse) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx/utils.py0000644000175000017500000000626300000000000023402 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from neutron_lib import context as n_context from neutron_lib import exceptions from neutron_lib.plugins import directory from vmware_nsx.db import db as nsx_db LOG = log.getLogger(__name__) def is_tvd_core_plugin(): core_plugin = cfg.CONF.core_plugin if (core_plugin.endswith('NsxTVDPlugin') or core_plugin.endswith('vmware_nsxtvd')): return True return False def get_tvd_plugin_type_for_project(project_id, context=None): """Get the plugin type used by a project Raise an exception if not found or the plugin is not in use """ if not context: context = n_context.get_admin_context() core_plugin = directory.get_plugin() return core_plugin.get_plugin_type_from_project(context, project_id) def filter_plugins(cls): """ Class decorator to separate the results of each of the given methods by plugin """ def get_project_mapping(context, project_id): """Return the plugin associated with this project""" mapping = nsx_db.get_project_plugin_mapping( context.session, project_id) if mapping: return mapping['plugin'] else: raise exceptions.ObjectNotFound(id=project_id) def add_separate_plugin_hook(name): orig_method = getattr(cls, name, None) def filter_results_by_plugin(self, context, **kwargs): """Run the original get-list method, and filter the results by the project id of the context """ entries = orig_method(self, context, **kwargs) if not context.project_id or not entries: return entries req_p = get_project_mapping(context, context.project_id) for entry in entries[:]: if entry.get('tenant_id'): try: p = get_project_mapping(context, entry['tenant_id']) except exceptions.ObjectNotFound: # This could be a project that was already deleted LOG.info("Project %s is not associated with any " "plugin and will be ignored", entry['tenant_id']) entries.remove(entry) else: if p != req_p: entries.remove(entry) return entries setattr(cls, name, filter_results_by_plugin) for method in cls.methods_to_separate: add_separate_plugin_hook(method) return cls ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2022538 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_p/0000755000175000017500000000000000000000000022200 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_p/__init__.py0000644000175000017500000000000000000000000024277 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_p/availability_zones.py0000644000175000017500000003036600000000000026452 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from vmware_nsx.common import availability_zones as common_az from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.plugins.common_v3 import availability_zones as v3_az from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3.policy import utils as p_utils LOG = log.getLogger(__name__) DEFAULT_NAME = common_az.DEFAULT_NAME + 'p' class NsxPAvailabilityZone(v3_az.NsxV3AvailabilityZone): def get_az_opts(self): return config.get_nsxp_az_opts(self.name) def init_defaults(self): # use the default configuration self.metadata_proxy = cfg.CONF.nsx_p.metadata_proxy self.dhcp_profile = cfg.CONF.nsx_p.dhcp_profile self.native_metadata_route = cfg.CONF.nsx_p.native_metadata_route self.default_overlay_tz = cfg.CONF.nsx_p.default_overlay_tz self.default_vlan_tz = cfg.CONF.nsx_p.default_vlan_tz self.default_tier0_router = cfg.CONF.nsx_p.default_tier0_router self.dns_domain = cfg.CONF.nsx_p.dns_domain self.nameservers = cfg.CONF.nsx_p.nameservers self.edge_cluster = cfg.CONF.nsx_p.edge_cluster def _init_default_resource(self, nsxpolicy, resource_api, config_name, filter_list_results=None, auto_config=False, is_mandatory=True, search_scope=None): # NOTE(annak): we may need to generalize this for API calls # requiring path ids name_or_id = getattr(self, config_name) if not name_or_id: if auto_config: # If the field not specified, the system will auto-configure # in case only single resource is present resources = resource_api.list() if filter_list_results: resources = filter_list_results(resources) if len(resources) == 1: return resources[0]['id'] if is_mandatory: if self.is_default(): raise cfg.RequiredOptError(config_name, group=cfg.OptGroup('nsx_p')) else: msg = (_("No %(res)s provided for availability " "zone %(az)s") % { 'res': config_name, 'az': self.name}) raise nsx_exc.NsxPluginException(err_msg=msg) return None try: # Check if the configured value is the ID resource_api.get(name_or_id, silent=True) return name_or_id except nsx_lib_exc.ResourceNotFound: # Search by tags if search_scope: resource_type = resource_api.entry_def.resource_type() resource_id = nsxpolicy.get_id_by_resource_and_tag( resource_type, search_scope, name_or_id) if resource_id: return resource_id # Check if the configured value is the name resource = resource_api.get_by_name(name_or_id) if resource: return resource['id'] # Resource not found if self.is_default(): raise cfg.RequiredOptError(config_name, group=cfg.OptGroup('nsx_p')) else: msg = (_("Could not find %(res)s %(id)s for availability " "zone %(az)s") % { 'res': config_name, 'id': name_or_id, 'az': self.name}) raise nsx_exc.NsxPluginException(err_msg=msg) def translate_configured_names_to_uuids(self, nsxpolicy, nsxlib=None, search_scope=None): super(NsxPAvailabilityZone, self).translate_configured_names_to_uuids( nsxpolicy) self._default_overlay_tz_uuid = self._init_default_resource( nsxpolicy, nsxpolicy.transport_zone, 'default_overlay_tz', auto_config=True, is_mandatory=True, filter_list_results=lambda tzs: [ tz for tz in tzs if tz['tz_type'].startswith('OVERLAY')], search_scope=search_scope) self._default_vlan_tz_uuid = self._init_default_resource( nsxpolicy, nsxpolicy.transport_zone, 'default_vlan_tz', auto_config=True, is_mandatory=False, filter_list_results=lambda tzs: [ tz for tz in tzs if tz['tz_type'].startswith('VLAN')], search_scope=search_scope) self._default_tier0_router = self._init_default_resource( nsxpolicy, nsxpolicy.tier0, 'default_tier0_router', auto_config=True, is_mandatory=True, search_scope=search_scope) self._edge_cluster_uuid = self._init_default_resource( nsxpolicy, nsxpolicy.edge_cluster, 'edge_cluster', auto_config=False, is_mandatory=False, search_scope=search_scope) # Init dhcp config from policy or MP self.use_policy_dhcp = False if (nsxpolicy.feature_supported( nsx_constants.FEATURE_NSX_POLICY_DHCP)): try: self._policy_dhcp_server_config = self._init_default_resource( nsxpolicy, nsxpolicy.dhcp_server_config, 'dhcp_profile', auto_config=False, is_mandatory=False, search_scope=search_scope) if self._policy_dhcp_server_config: self.use_policy_dhcp = True except Exception: # Not found. try as MP profile pass self._native_dhcp_profile_uuid = None if not self.use_policy_dhcp and nsxlib: self._translate_dhcp_profile(nsxlib, search_scope=search_scope) self.use_policy_md = False if (nsxpolicy.feature_supported( nsx_constants.FEATURE_NSX_POLICY_MDPROXY)): # Try to initialize md-proxy from the policy try: self._native_md_proxy_uuid = self._init_default_resource( nsxpolicy, nsxpolicy.md_proxy, 'metadata_proxy', auto_config=True, is_mandatory=True, search_scope=search_scope) LOG.info("NSX-P az using policy MD proxy: %s", self._native_md_proxy_uuid) self.use_policy_md = True except Exception: LOG.info("NSX-P az could not use policy MD proxy. Using MP " "one instead") if not self.use_policy_md: # Try to initialize md-proxy from the MP if nsxlib: self._translate_metadata_proxy( nsxlib, search_scope=search_scope) LOG.info("NSX-P az using MP MD proxy: %s", self._native_md_proxy_uuid) else: self._native_md_proxy_uuid = None def _get_edge_cluster_tzs(self, nsxpolicy, nsxlib, ec_uuid): ec_nodes = nsxpolicy.edge_cluster.get_edge_node_ids(ec_uuid) ec_tzs = [] for tn_uuid in ec_nodes: ec_tzs.extend(nsxlib.transport_node.get_transport_zones( tn_uuid)) return ec_tzs def _validate_tz(self, nsxpolicy, nsxlib, obj_type, obj_id, ec_uuid): obj_tzs = self._get_edge_cluster_tzs(nsxpolicy, nsxlib, ec_uuid) if self._default_overlay_tz_uuid not in obj_tzs: msg = (_("%(type)s %(id)s of availability zone %(az)s with edge " "cluster %(ec)s does not match the default overlay tz " "%(tz)s") % { 'type': obj_type, 'id': obj_id, 'ec': ec_uuid, 'tz': self._default_overlay_tz_uuid, 'az': self.name}) raise nsx_exc.NsxPluginException(err_msg=msg) if (self._default_vlan_tz_uuid and self._default_vlan_tz_uuid not in obj_tzs): msg = (_("%(type)s %(id)s of availability zone %(az)s with edge " "cluster %(ec)s does not match the default vlan tz " "%(tz)s") % { 'type': obj_type, 'id': obj_id, 'ec': ec_uuid, 'tz': self._default_vlan_tz_uuid, 'az': self.name}) raise nsx_exc.NsxPluginException(err_msg=msg) def validate_availability_zone(self, nsxpolicy, nsxlib=None): """Validate that all the components of this AZ are connected""" if not nsxlib: LOG.warning("Cannot validate availability zone %s without " "passthrough api", self.name) return # Validate tier0 TZ match the default ones tier0_ec_path = nsxpolicy.tier0.get_edge_cluster_path( self._default_tier0_router) if not tier0_ec_path: msg = (_("Tier0 %(id)s of availability zone %(az)s does not have " "an edge cluster") % { 'id': self._default_tier0_router, 'az': self.name}) raise nsx_exc.NsxPluginException(err_msg=msg) tier0_ec_uuid = p_utils.path_to_id(tier0_ec_path) self._validate_tz(nsxpolicy, nsxlib, 'Tier0', self._default_tier0_router, tier0_ec_uuid) if self.use_policy_dhcp: dhcp_ec_path = nsxpolicy.dhcp_server_config.get( self._policy_dhcp_server_config).get('edge_cluster_path') dhcp_ec = p_utils.path_to_id(dhcp_ec_path) if dhcp_ec != tier0_ec_uuid: self._validate_tz(nsxpolicy, nsxlib, 'DHCP server config', self._policy_dhcp_server_config, dhcp_ec) elif self._native_dhcp_profile_uuid: dhcp_ec = nsxlib.native_dhcp_profile.get( self._native_dhcp_profile_uuid).get('edge_cluster_id') if dhcp_ec != tier0_ec_uuid: self._validate_tz(nsxpolicy, nsxlib, 'DHCP profile', self._native_dhcp_profile_uuid, dhcp_ec) if self._native_md_proxy_uuid: # Validate that the edge cluster of the MD proxy (MP or policy one) # match the configured TZs if self.use_policy_md: md_ec_path = nsxpolicy.md_proxy.get( self._native_md_proxy_uuid).get('edge_cluster_path') md_ec = p_utils.path_to_id(md_ec_path) else: md_ec = nsxlib.native_md_proxy.get( self._native_md_proxy_uuid).get('edge_cluster_id') if md_ec != tier0_ec_uuid: self._validate_tz(nsxpolicy, nsxlib, 'MD Proxy', self._native_md_proxy_uuid, md_ec) class NsxPAvailabilityZones(common_az.ConfiguredAvailabilityZones): default_name = DEFAULT_NAME def __init__(self): default_azs = cfg.CONF.default_availability_zones super(NsxPAvailabilityZones, self).__init__( cfg.CONF.nsx_p.availability_zones, NsxPAvailabilityZone, default_availability_zones=default_azs) self.non_default_dns_domain = self.dns_domain_configured_non_default() def dns_domain_configured_non_default(self): for az in self.availability_zones.values(): if az.dns_domain and az.dns_domain != cfg.CONF.nsx_p.dns_domain: return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_p/plugin.py0000644000175000017500000056145100000000000024064 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import netaddr from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log from oslo_utils import excutils from oslo_utils import uuidutils from neutron.db import agents_db from neutron.db import l3_db from neutron.db.models import l3 as l3_db_models from neutron.db.models import securitygroup as securitygroup_model from neutron.db import models_v2 from neutron.extensions import securitygroup as ext_sg from neutron.quota import resource_registry from neutron_lib.api.definitions import address_scope from neutron_lib.api.definitions import agent as agent_apidef from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import availability_zone as az_apidef from neutron_lib.api.definitions import dhcpagentscheduler from neutron_lib.api.definitions import external_net from neutron_lib.api.definitions import extra_dhcp_opt as ext_edo from neutron_lib.api.definitions import extraroute from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import network_availability_zone from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings as pbin_apidef from neutron_lib.api.definitions import provider_net as pnet_apidef from neutron_lib.api.definitions import router_availability_zone from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib import context as n_context from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from vmware_nsx._i18n import _ from vmware_nsx.common import config # noqa from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import l3_rpc_agent_api from vmware_nsx.common import locking from vmware_nsx.common import managers from vmware_nsx.common import utils from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import api_replay from vmware_nsx.extensions import maclearning as mac_ext from vmware_nsx.extensions import projectpluginmap from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.extensions import secgroup_rule_local_ip_prefix as sg_prefix from vmware_nsx.extensions import securitygrouplogging as sg_logging from vmware_nsx.plugins.common_v3 import plugin as nsx_plugin_common from vmware_nsx.plugins.nsx_p import availability_zones as nsxp_az from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.services.fwaas.common import utils as fwaas_utils from vmware_nsx.services.fwaas.nsx_p import fwaas_callbacks_v2 from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_p.implementation import healthmonitor_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import l7policy_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import l7rule_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import listener_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import loadbalancer_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import member_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import pool_mgr from vmware_nsx.services.lbaas.octavia import constants as oct_const from vmware_nsx.services.lbaas.octavia import octavia_listener from vmware_nsx.services.qos.common import utils as qos_com_utils from vmware_nsx.services.qos.nsx_v3 import driver as qos_driver from vmware_nsx.services.qos.nsx_v3 import pol_utils as qos_utils from vmware_nsx.services.trunk.nsx_p import driver as trunk_driver from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts from vmware_nsxlib.v3.policy import constants as policy_constants from vmware_nsxlib.v3.policy import core_defs as policy_defs from vmware_nsxlib.v3.policy import transaction as policy_trans from vmware_nsxlib.v3.policy import utils as p_utils from vmware_nsxlib.v3 import security from vmware_nsxlib.v3 import utils as nsxlib_utils LOG = log.getLogger(__name__) NSX_P_SECURITY_GROUP_TAG = 'os-security-group' NSX_P_GLOBAL_DOMAIN_ID = policy_constants.DEFAULT_DOMAIN NSX_P_DEFAULT_GROUP = 'os_default_group' NSX_P_DEFAULT_GROUP_DESC = 'Default Group for the openstack plugin' NSX_P_DEFAULT_SECTION = 'os_default_section' NSX_P_DEFAULT_SECTION_DESC = ('This section is handled by OpenStack to ' 'contain default rules on security-groups.') NSX_P_DEFAULT_SECTION_CATEGORY = policy_constants.CATEGORY_APPLICATION NSX_P_REGULAR_SECTION_CATEGORY = policy_constants.CATEGORY_ENVIRONMENT NSX_P_PROVIDER_SECTION_CATEGORY = policy_constants.CATEGORY_INFRASTRUCTURE NSX_P_PORT_RESOURCE_TYPE = 'os-neutron-port-id' NSX_P_EXCLUDE_LIST_GROUP = 'neutron_excluded_ports_group' NSX_P_EXCLUDE_LIST_TAG = 'Exclude-Port' SPOOFGUARD_PROFILE_ID = 'neutron-spoofguard-profile' NO_SPOOFGUARD_PROFILE_ID = policy_defs.SpoofguardProfileDef.DEFAULT_PROFILE MAC_DISCOVERY_PROFILE_ID = 'neutron-mac-discovery-profile' NO_MAC_DISCOVERY_PROFILE_ID = ( policy_defs.MacDiscoveryProfileDef.DEFAULT_PROFILE) NO_SEG_SECURITY_PROFILE_ID = 'neutron-no-segment-security-profile' SEG_SECURITY_PROFILE_ID = ( policy_defs.SegmentSecurityProfileDef.DEFAULT_PROFILE) SLAAC_NDRA_PROFILE_ID = 'neutron-slaac-profile' NO_SLAAC_NDRA_PROFILE_ID = 'neutron-no-slaac-profile' STATELESS_DHCP_NDRA_PROFILE_ID = 'neutron-stateless-dhcp-profile' STATEFUL_DHCP_NDRA_PROFILE_ID = 'neutron-stateful-dhcp-profile' IPV6_RA_SERVICE = 'neutron-ipv6-ra' IPV6_ROUTER_ADV_RULE_NAME = 'all-ipv6' # Priorities for NAT rules: (FIP specific rules should come before GW rules) NAT_RULE_PRIORITY_FIP = 2000 NAT_RULE_PRIORITY_GW = 3000 NSX_P_CLIENT_SSL_PROFILE = 'neutron-client-ssl-profile' # Cache for mapping between network ids in neutron and NSX (MP) NET_NEUTRON_2_NSX_ID_CACHE = {} NET_NSX_2_NEUTRON_ID_CACHE = {} @resource_extend.has_resource_extenders class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base): __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = [addr_apidef.ALIAS, address_scope.ALIAS, "quotas", pbin_apidef.ALIAS, ext_edo.ALIAS, agent_apidef.ALIAS, dhcpagentscheduler.ALIAS, "ext-gw-mode", "security-group", sg_prefix.ALIAS, psec.ALIAS, pnet_apidef.ALIAS, external_net.ALIAS, extraroute.ALIAS, l3_apidef.ALIAS, az_apidef.ALIAS, network_availability_zone.ALIAS, router_availability_zone.ALIAS, "subnet_allocation", sg_logging.ALIAS, provider_sg.ALIAS, "port-security-groups-filtering", mac_ext.ALIAS, "advanced-service-providers"] @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=securitygroup_model.SecurityGroup, security_group_rule=securitygroup_model.SecurityGroupRule, router=l3_db_models.Router, floatingip=l3_db_models.FloatingIP) def __init__(self): self.fwaas_callbacks = None self.init_is_complete = False self._is_sub_plugin = False self.octavia_listener = None self.octavia_stats_collector = None nsxlib_utils.set_is_attr_callback(validators.is_attr_set) self._extend_fault_map() extension_drivers = cfg.CONF.nsx_extension_drivers self._extension_manager = managers.ExtensionManager( extension_drivers=extension_drivers) self.cfg_group = 'nsx_p' # group name for nsx_p section in nsx.ini self.init_availability_zones() self.nsxpolicy = v3_utils.get_nsxpolicy_wrapper() # NOTE: This is needed for passthrough APIs, should be removed when # policy has full support self.nsxlib = None if cfg.CONF.nsx_p.allow_passthrough: self.nsxlib = v3_utils.get_nsxlib_wrapper( plugin_conf=cfg.CONF.nsx_p, allow_overwrite_header=True) super(NsxPolicyPlugin, self).__init__() # Bind the dummy L3 notifications self.l3_rpc_notifier = l3_rpc_agent_api.L3NotifyAPI() LOG.info("Starting NsxPolicyPlugin") self._extension_manager.initialize() self.supported_extension_aliases.extend( self._extension_manager.extension_aliases()) # Support transparent VLANS only if the global configuration flag # vlan_transparent is True if cfg.CONF.vlan_transparent: self.supported_extension_aliases.append(vlan_apidef.ALIAS) # Support api-reply for migration environments to the policy plugin if cfg.CONF.api_replay_mode: self.supported_extension_aliases.append(api_replay.ALIAS) nsxlib_utils.set_inject_headers_callback(v3_utils.inject_headers) self._validate_nsx_policy_version() self._validate_config() self._init_default_config() self._prepare_default_rules() self._init_profiles() self._prepare_exclude_list() self._init_dhcp_metadata() # Init QoS qos_driver.register(qos_utils.PolicyQosNotificationsHandler()) # Register NSXP trunk driver to support trunk extensions self.trunk_driver = trunk_driver.NsxpTrunkDriver.create(self) registry.subscribe(self.spawn_complete, resources.PROCESS, events.AFTER_SPAWN) # subscribe the init complete method last, so it will be called only # if init was successful registry.subscribe(self.init_complete, resources.PROCESS, events.AFTER_INIT) def _validate_config(self): if cfg.CONF.ipam_driver != 'internal': msg = _("External IPAM drivers not supported with nsxp plugin") LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _enable_ipv6_routing(self): # Ipv6 is disabled by default in NSX and should be enabled if self.nsxpolicy.feature_supported( nsxlib_consts.FEATURE_NSX_POLICY_GLOBAL_CONFIG): self.nsxpolicy.global_config.enable_ipv6() return if cfg.CONF.nsx_p.allow_passthrough: self.nsxlib.global_routing.enable_ipv6() else: LOG.warning("Unable to switch on Ipv6 forwarding. Ipv6 " "connectivity might be broken.") def _init_default_config(self): self._enable_ipv6_routing() # Validate other mandatory configuration if not cfg.CONF.nsx_p.dhcp_profile: raise cfg.RequiredOptError("dhcp_profile", group=cfg.OptGroup('nsx_p')) if not cfg.CONF.nsx_p.metadata_proxy: raise cfg.RequiredOptError("metadata_proxy", group=cfg.OptGroup('nsx_p')) # If using tags to find the objects, make sure tag scope is configured if (cfg.CONF.nsx_p.init_objects_by_tags and not cfg.CONF.nsx_p.search_objects_scope): raise cfg.RequiredOptError("search_objects_scope", group=cfg.OptGroup('nsx_p')) # Init AZ resources search_scope = (cfg.CONF.nsx_p.search_objects_scope if cfg.CONF.nsx_p.init_objects_by_tags else None) for az in self.get_azs_list(): az.translate_configured_names_to_uuids( self.nsxpolicy, nsxlib=self.nsxlib, search_scope=search_scope) az.validate_availability_zone(self.nsxpolicy, nsxlib=self.nsxlib) # WAF is currently not supported by the NSX self._waf_profile_uuid = None try: self.nsxpolicy.mixed_service.get(IPV6_RA_SERVICE) except nsx_lib_exc.ResourceNotFound: # create or override ipv6 RA service unicast_ra = self.nsxpolicy.icmp_service.build_entry( 'unicast RA', IPV6_RA_SERVICE, 'unicast', version=6, icmp_type=134) multicast_ra = self.nsxpolicy.icmp_service.build_entry( 'multicast RA', IPV6_RA_SERVICE, 'multicast', version=6, icmp_type=151) try: self.nsxpolicy.mixed_service.create_or_overwrite( IPV6_RA_SERVICE, IPV6_RA_SERVICE, entries=[unicast_ra, multicast_ra]) except nsx_lib_exc.StaleRevision as e: # This means that another controller is also creating this LOG.info("Failed to configure mixed_service: %s", e) except nsx_lib_exc.ManagerError: msg = _("Failed to configure RA service for IPv6 connectivity") LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) def _init_backend_resource(self, resource_api, name_or_id, search_scope=None): resource_type = resource_api.entry_def.resource_type() if not name_or_id: return None try: # Check if the configured value is the ID resource_api.get(name_or_id, silent=True) return name_or_id except nsx_lib_exc.ResourceNotFound: # Search by tags if search_scope: resource_id = self.nsxpolicy.get_id_by_resource_and_tag( resource_type, search_scope, name_or_id) if resource_id: return resource_id # Check if the configured value is the name resource = resource_api.get_by_name(name_or_id) if resource: return resource['id'] msg = (_("Could not find %(type)s %(id)s") % { 'type': resource_type, 'id': name_or_id}) raise nsx_exc.NsxPluginException(err_msg=msg) def get_waf_profile_path_and_mode(self): # WAF is currently not supported by the NSX return None, None def _init_dhcp_metadata(self): if cfg.CONF.dhcp_agent_notification: msg = _("Need to disable dhcp_agent_notification when " "native DHCP & Metadata is enabled") raise nsx_exc.NsxPluginException(err_msg=msg) default_az = self.get_default_az() if default_az.use_policy_dhcp: self.use_policy_dhcp = True LOG.info("The policy plugin will use policy based DHCP v4/6") else: self._init_native_dhcp() self.use_policy_dhcp = False LOG.info("The policy plugin will use MP based DHCP v4") self._init_native_metadata() def init_availability_zones(self): self._availability_zones_data = nsxp_az.NsxPAvailabilityZones() def _validate_nsx_policy_version(self): self._nsx_version = self.nsxpolicy.get_version() LOG.info("NSX Version: %s", self._nsx_version) if (not self.nsxpolicy.feature_supported( nsxlib_consts.FEATURE_NSX_POLICY_NETWORKING) or not utils.is_nsx_version_2_5_0(self._nsx_version)): msg = (_("The NSX Policy plugin requires version 2.5 " "(current version %(ver)s)") % {'ver': self._nsx_version}) raise nsx_exc.NsxPluginException(err_msg=msg) def _init_profiles(self): """Find/Create segment profiles this plugin will use""" # Spoofguard profile (find it or create) try: self.nsxpolicy.spoofguard_profile.get(SPOOFGUARD_PROFILE_ID) except nsx_lib_exc.ResourceNotFound: try: self.nsxpolicy.spoofguard_profile.create_or_overwrite( SPOOFGUARD_PROFILE_ID, profile_id=SPOOFGUARD_PROFILE_ID, address_binding_whitelist=True, tags=self.nsxpolicy.build_v3_api_version_tag()) except nsx_lib_exc.StaleRevision as e: # This means that another controller is also creating this LOG.info("Failed to configure spoofguard_profile: %s", e) # No Port security spoofguard profile # (default NSX profile. just verify it exists) try: self.nsxpolicy.spoofguard_profile.get(NO_SPOOFGUARD_PROFILE_ID) except nsx_lib_exc.ResourceNotFound: msg = (_("Cannot find spoofguard profile %s") % NO_SPOOFGUARD_PROFILE_ID) raise nsx_exc.NsxPluginException(err_msg=msg) # Mac discovery profile (find it or create) try: self.nsxpolicy.mac_discovery_profile.get( MAC_DISCOVERY_PROFILE_ID) except nsx_lib_exc.ResourceNotFound: try: self.nsxpolicy.mac_discovery_profile.create_or_overwrite( MAC_DISCOVERY_PROFILE_ID, profile_id=MAC_DISCOVERY_PROFILE_ID, mac_change_enabled=True, mac_learning_enabled=True, tags=self.nsxpolicy.build_v3_api_version_tag()) except nsx_lib_exc.StaleRevision as e: # This means that another controller is also creating this LOG.info("Failed to configure mac_discovery_profile: %s", e) # No Mac discovery profile profile # (default NSX profile. just verify it exists) try: self.nsxpolicy.mac_discovery_profile.get( NO_MAC_DISCOVERY_PROFILE_ID) except nsx_lib_exc.ResourceNotFound: msg = (_("Cannot find MAC discovery profile %s") % NO_MAC_DISCOVERY_PROFILE_ID) raise nsx_exc.NsxPluginException(err_msg=msg) # No Port security segment-security profile (find it or create) try: self.nsxpolicy.segment_security_profile.get( NO_SEG_SECURITY_PROFILE_ID) except nsx_lib_exc.ResourceNotFound: try: self.nsxpolicy.segment_security_profile.create_or_overwrite( NO_SEG_SECURITY_PROFILE_ID, profile_id=NO_SEG_SECURITY_PROFILE_ID, bpdu_filter_enable=False, dhcp_client_block_enabled=False, dhcp_client_block_v6_enabled=False, dhcp_server_block_enabled=False, dhcp_server_block_v6_enabled=False, non_ip_traffic_block_enabled=False, ra_guard_enabled=False, rate_limits_enabled=False, tags=self.nsxpolicy.build_v3_api_version_tag()) except nsx_lib_exc.StaleRevision as e: # This means that another controller is also creating this LOG.info("Failed to configure segment_security_profile: %s", e) # Port security segment-security profile # (default NSX profile. just verify it exists) try: self.nsxpolicy.segment_security_profile.get( SEG_SECURITY_PROFILE_ID) except nsx_lib_exc.ResourceNotFound: msg = (_("Cannot find segment security profile %s") % SEG_SECURITY_PROFILE_ID) raise nsx_exc.NsxPluginException(err_msg=msg) # Find or create all neutron NDRA profiles ndra_profiles = { SLAAC_NDRA_PROFILE_ID: policy_constants.IPV6_RA_MODE_SLAAC_RA, STATELESS_DHCP_NDRA_PROFILE_ID: policy_constants.IPV6_RA_MODE_SLAAC_DHCP, STATEFUL_DHCP_NDRA_PROFILE_ID: policy_constants.IPV6_RA_MODE_DHCP, NO_SLAAC_NDRA_PROFILE_ID: policy_constants.IPV6_RA_MODE_DISABLED } for profile in ndra_profiles: try: self.nsxpolicy.ipv6_ndra_profile.get(profile) except nsx_lib_exc.ResourceNotFound: try: self.nsxpolicy.ipv6_ndra_profile.create_or_overwrite( profile, profile_id=profile, ra_mode=ndra_profiles[profile], tags=self.nsxpolicy.build_v3_api_version_tag()) except nsx_lib_exc.StaleRevision as e: # This means that another controller is also creating this LOG.info("Failed to configure ipv6_ndra_profile %s: %s", profile, e) self.client_ssl_profile = None LOG.debug("Initializing NSX-P Load Balancer default profiles") try: self._init_lb_profiles() except Exception as e: msg = (_("Unable to initialize NSX-P lb profiles: " "Reason: %(reason)s") % {'reason': str(e)}) raise nsx_exc.NsxPluginException(err_msg=msg) @staticmethod def plugin_type(): return projectpluginmap.NsxPlugins.NSX_P @staticmethod def is_tvd_plugin(): return False def _init_fwaas(self, with_rpc): if self.fwaas_callbacks: # already initialized return if fwaas_utils.is_fwaas_v2_plugin_enabled(): LOG.info("NSXp FWaaS v2 plugin enabled") self.fwaas_callbacks = fwaas_callbacks_v2.NsxpFwaasCallbacksV2( with_rpc) def _get_octavia_stats_getter(self): return listener_mgr.stats_getter def _init_lb_profiles(self): ssl_profile_client = self.nsxpolicy.load_balancer.client_ssl_profile with locking.LockManager.get_lock('nsxp_lb_profiles_init'): try: ssl_profile_client.get(NSX_P_CLIENT_SSL_PROFILE) except nsx_lib_exc.ResourceNotFound: try: ssl_profile_client.create_or_overwrite( NSX_P_CLIENT_SSL_PROFILE, client_ssl_profile_id=NSX_P_CLIENT_SSL_PROFILE, description='Neutron LB Client SSL Profile', tags=self.nsxpolicy.build_v3_api_version_tag()) except nsx_lib_exc.StaleRevision as e: # This means that another controller is also creating this LOG.info("Failed to configure LB client_ssl_profile: %s", e) self.client_ssl_profile = NSX_P_CLIENT_SSL_PROFILE def spawn_complete(self, resource, event, trigger, payload=None): # Init the FWaaS support with RPC listeners for the original process self._init_fwaas(with_rpc=True) self._init_octavia() self.octavia_stats_collector = ( octavia_listener.NSXOctaviaStatisticsCollector( self, self._get_octavia_stats_getter())) def _init_octavia(self): octavia_objects = self._get_octavia_objects() self.octavia_listener = octavia_listener.NSXOctaviaListener( **octavia_objects) def _get_octavia_objects(self): return { 'loadbalancer': loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(), 'listener': listener_mgr.EdgeListenerManagerFromDict(), 'pool': pool_mgr.EdgePoolManagerFromDict(), 'member': member_mgr.EdgeMemberManagerFromDict(), 'healthmonitor': healthmonitor_mgr.EdgeHealthMonitorManagerFromDict(), 'l7policy': l7policy_mgr.EdgeL7PolicyManagerFromDict(), 'l7rule': l7rule_mgr.EdgeL7RuleManagerFromDict()} def init_complete(self, resource, event, trigger, payload=None): with locking.LockManager.get_lock('plugin-init-complete'): if self.init_is_complete: # Should be called only once per worker return # reinitialize the cluster upon fork for api workers to ensure # each process has its own keepalive loops + state self.nsxpolicy.reinitialize_cluster(resource, event, trigger, payload=payload) if self.nsxlib: self.nsxlib.reinitialize_cluster(resource, event, trigger, payload=payload) # Init the FWaaS support without RPC listeners # for the spawn workers self._init_fwaas(with_rpc=False) # Init octavia listener and endpoints self._init_octavia() self.init_is_complete = True def _setup_rpc(self): self.endpoints = [agents_db.AgentExtRpcCallback()] def _net_nsx_name(self, network): return utils.get_name_and_uuid(network['name'] or 'network', network['id']) def _create_network_on_backend(self, context, net_data, transparent_vlan, provider_data, az): net_data['id'] = net_data.get('id') or uuidutils.generate_uuid() # update the network name to indicate the neutron id too. net_name = self._net_nsx_name(net_data) tags = self.nsxpolicy.build_v3_tags_payload( net_data, resource_type='os-neutron-net-id', project_name=context.tenant_name) admin_state = net_data.get('admin_state_up', True) LOG.debug('create_network: %(net_name)s, %(physical_net)s, ' '%(tags)s, %(admin_state)s, %(vlan_id)s', {'net_name': net_name, 'physical_net': provider_data['physical_net'], 'tags': tags, 'admin_state': admin_state, 'vlan_id': provider_data['vlan_id']}) if transparent_vlan: # all vlan tags are allowed for guest vlan vlan_ids = ["0-%s" % const.MAX_VLAN_TAG] elif provider_data['vlan_id']: vlan_ids = [provider_data['vlan_id']] else: vlan_ids = None kwargs = { 'segment_id': net_data['id'], 'description': net_data.get('description'), 'vlan_ids': vlan_ids, 'transport_zone_id': provider_data['physical_net'], 'tags': tags} if (not admin_state and self.nsxpolicy.feature_supported( nsxlib_consts.FEATURE_NSX_POLICY_ADMIN_STATE)): kwargs['admin_state'] = admin_state if az.use_policy_md: kwargs['metadata_proxy_id'] = az._native_md_proxy_uuid self.nsxpolicy.segment.create_or_overwrite( net_name, **kwargs) if (not admin_state and cfg.CONF.nsx_p.allow_passthrough and not self.nsxpolicy.feature_supported( nsxlib_consts.FEATURE_NSX_POLICY_ADMIN_STATE)): # This api uses the passthrough api self.nsxpolicy.segment.set_admin_state( net_data['id'], admin_state) def _tier0_validator(self, tier0_uuid): # Fail if the tier0 uuid was not found on the NSX self.nsxpolicy.tier0.get(tier0_uuid) def _get_nsx_net_tz_id(self, nsx_net): return nsx_net['transport_zone_path'].split('/')[-1] def _allow_ens_networks(self): return True def _ens_psec_supported(self): """ENS security features are always enabled on NSX versions which the policy plugin supports. """ return True def _ens_qos_supported(self): return self.nsxpolicy.feature_supported( nsxlib_consts.FEATURE_ENS_WITH_QOS) def _validate_ens_net_portsecurity(self, net_data): """ENS security features are always enabled on NSX versions which the policy plugin supports. So no validation is needed """ pass def _assert_on_resource_admin_state_down(self, resource_data): """Network & port admin state is only supported with passthrough api""" if (not cfg.CONF.nsx_p.allow_passthrough and resource_data.get("admin_state_up") is False): err_msg = (_("admin_state_up=False is not supported when " "passthrough is disabled")) LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def create_network(self, context, network): net_data = network['network'] external = net_data.get(external_net.EXTERNAL) is_external_net = validators.is_attr_set(external) and external tenant_id = net_data['tenant_id'] # validate the availability zone, and get the AZ object az = self._validate_obj_az_on_creation(context, net_data, 'network') self._ensure_default_security_group(context, tenant_id) vlt = False if extensions.is_extension_supported(self, 'vlan-transparent'): vlt = vlan_apidef.get_vlan_transparent(net_data) self._validate_create_network(context, net_data) self._assert_on_resource_admin_state_down(net_data) if is_external_net: is_provider_net, net_type, physical_net, vlan_id = ( self._validate_external_net_create( net_data, az._default_tier0_router, self._tier0_validator)) provider_data = {'is_provider_net': is_provider_net, 'net_type': net_type, 'physical_net': physical_net, 'vlan_id': vlan_id} is_backend_network = False else: provider_data = self._validate_provider_create( context, net_data, az, self.nsxpolicy.transport_zone, self.nsxpolicy.segment, transparent_vlan=vlt) if (provider_data['is_provider_net'] and provider_data['net_type'] == utils.NsxV3NetworkTypes.NSX_NETWORK): is_backend_network = False else: is_backend_network = True # Create the neutron network with db_api.CONTEXT_WRITER.using(context): # Create network in Neutron created_net = super(NsxPolicyPlugin, self).create_network( context, network) net_id = created_net['id'] if extensions.is_extension_supported(self, 'vlan-transparent'): super(NsxPolicyPlugin, self).update_network( context, net_id, {'network': {'vlan_transparent': vlt}}) self._extension_manager.process_create_network( context, net_data, created_net) if psec.PORTSECURITY not in net_data: net_data[psec.PORTSECURITY] = True self._process_network_port_security_create( context, net_data, created_net) self._process_l3_create(context, created_net, net_data) self._add_az_to_net(context, net_id, net_data) if provider_data['is_provider_net']: # Save provider network fields, needed by get_network() net_bindings = [nsx_db.add_network_binding( context.session, net_id, provider_data['net_type'], provider_data['physical_net'], provider_data['vlan_id'])] self._extend_network_dict_provider(context, created_net, bindings=net_bindings) # Create the backend NSX network if is_backend_network: try: self._create_network_on_backend( context, created_net, vlt, provider_data, az) except Exception as e: LOG.exception("Failed to create NSX network network: %s", e) with excutils.save_and_reraise_exception(): super(NsxPolicyPlugin, self).delete_network( context, net_id) # this extra lookup is necessary to get the # latest db model for the extension functions net_model = self._get_network(context, net_id) resource_extend.apply_funcs('networks', created_net, net_model) # MD Proxy is currently supported by the passthrough api only if (is_backend_network and not az.use_policy_md and cfg.CONF.nsx_p.allow_passthrough): # The new segment was not realized yet. Waiting for a bit. time.sleep(cfg.CONF.nsx_p.realization_wait_sec) nsx_net_id = self._get_network_nsx_id(context, net_id) if not nsx_net_id: msg = ("Unable to obtain backend network id for metadata " "proxy creation for network %s" % net_id) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) try: self._create_net_mp_mdproxy_port( context, created_net, az, nsx_net_id) except Exception as e: msg = ("Failed to complete network creation. error: %s" % e) LOG.exception(msg) self.delete_network(context, net_id) raise nsx_exc.NsxPluginException(err_msg=msg) # Update the QoS policy (will affect only future compute ports) qos_com_utils.set_qos_policy_on_new_net( context, net_data, created_net) if net_data.get(qos_consts.QOS_POLICY_ID): LOG.info("QoS Policy %(qos)s will be applied to future compute " "ports of network %(net)s", {'qos': net_data[qos_consts.QOS_POLICY_ID], 'net': created_net['id']}) return created_net def delete_network(self, context, network_id): is_external_net = self._network_is_external(context, network_id) if not is_external_net: # First disable DHCP & delete its port if self.use_policy_dhcp: lock = 'nsxp_network_' + network_id with locking.LockManager.get_lock(lock): network = self._get_network(context, network_id) if not self._has_active_port(context, network_id): self._disable_network_dhcp(context, network) elif cfg.CONF.nsx_p.allow_passthrough: self._delete_network_disable_dhcp(context, network_id) is_nsx_net = self._network_is_nsx_net(context, network_id) # Call DB operation for delete network as it will perform # checks on active ports self._retry_delete_network(context, network_id) # Delete MD proxy port. This is relevant only if the plugin used # MP MD proxy when this network is created. # If not - the port will not be found, and it is ok. # Note(asarfaty): In the future this code can be removed. if not is_external_net and cfg.CONF.nsx_p.allow_passthrough: self._delete_nsx_port_by_network(network_id) # Delete the network segment from the backend if not is_external_net and not is_nsx_net: try: self.nsxpolicy.segment.delete(network_id) except nsx_lib_exc.ResourceNotFound: # If the resource was not found on the backend do not worry # about it. The conditions has already been logged, so there # is no need to do further logging pass except nsx_lib_exc.ManagerError as e: # If there is a failure in deleting the resource, fail the # neutron operation even though the neutron object was already # deleted. This way the user will be aware of zombie resources # that may fail future actions. msg = (_("Backend segment deletion for neutron network %(id)s " "failed. The object was however removed from the " "Neutron database: %(e)s") % {'id': network_id, 'e': e}) raise nsx_exc.NsxPluginException(err_msg=msg) # Remove from caches if network_id in NET_NEUTRON_2_NSX_ID_CACHE: nsx_id = NET_NEUTRON_2_NSX_ID_CACHE[network_id] del NET_NEUTRON_2_NSX_ID_CACHE[network_id] if nsx_id in NET_NSX_2_NEUTRON_ID_CACHE: del NET_NSX_2_NEUTRON_ID_CACHE[nsx_id] def update_network(self, context, network_id, network): original_net = super(NsxPolicyPlugin, self).get_network( context, network_id) net_data = network['network'] # Validate the updated parameters self._validate_update_network(context, network_id, original_net, net_data) self._assert_on_resource_admin_state_down(net_data) # Neutron does not support changing provider network values utils.raise_if_updates_provider_attributes(net_data) extern_net = self._network_is_external(context, network_id) is_nsx_net = self._network_is_nsx_net(context, network_id) # Update the neutron network updated_net = super(NsxPolicyPlugin, self).update_network( context, network_id, network) self._extension_manager.process_update_network(context, net_data, updated_net) self._process_l3_update(context, updated_net, network['network']) self._extend_network_dict_provider(context, updated_net) if qos_consts.QOS_POLICY_ID in net_data: # attach the policy to the network in neutron DB #(will affect only future compute ports) qos_com_utils.update_network_policy_binding( context, network_id, net_data[qos_consts.QOS_POLICY_ID]) updated_net[qos_consts.QOS_POLICY_ID] = net_data[ qos_consts.QOS_POLICY_ID] if net_data[qos_consts.QOS_POLICY_ID]: LOG.info("QoS Policy %(qos)s will be applied to future " "compute ports of network %(net)s", {'qos': net_data[qos_consts.QOS_POLICY_ID], 'net': network_id}) # Update the backend segment if (not extern_net and not is_nsx_net and ('name' in net_data or 'description' in net_data or 'admin_state_up' in net_data)): net_name = utils.get_name_and_uuid( updated_net['name'] or 'network', network_id) kwargs = {'name': net_name, 'description': updated_net.get('description', '')} if 'admin_state_up' in net_data: if (self.nsxpolicy.feature_supported( nsxlib_consts.FEATURE_NSX_POLICY_ADMIN_STATE)): kwargs['admin_state'] = net_data['admin_state_up'] elif cfg.CONF.nsx_p.allow_passthrough: # Update admin state using the passthrough api self.nsxpolicy.segment.set_admin_state( network_id, net_data['admin_state_up']) try: self.nsxpolicy.segment.update(network_id, **kwargs) except nsx_lib_exc.ManagerError: LOG.exception("Unable to update NSX backend, rolling " "back changes on neutron") with excutils.save_and_reraise_exception(): # remove the AZ from the network before rollback because # it is read only, and breaks the rollback if 'availability_zone_hints' in original_net: del original_net['availability_zone_hints'] super(NsxPolicyPlugin, self).update_network( context, network_id, {'network': original_net}) return updated_net def _get_subnets_nd_profile(self, subnets, additional_profile=None): profiles = [] if additional_profile: profiles.append(additional_profile) for sub in subnets: profiles.append(self._get_subnet_ndra_profile(sub)) # If there is 1 stateful/stateless DHCP subnet (cannot have both) # use this profile if STATEFUL_DHCP_NDRA_PROFILE_ID in profiles: return STATEFUL_DHCP_NDRA_PROFILE_ID elif STATELESS_DHCP_NDRA_PROFILE_ID in profiles: return STATELESS_DHCP_NDRA_PROFILE_ID elif SLAAC_NDRA_PROFILE_ID in profiles: # if there is slaac subnet and no DHCP subnet use SLAAC return SLAAC_NDRA_PROFILE_ID return NO_SLAAC_NDRA_PROFILE_ID def _update_slaac_on_router(self, context, router_id, subnet, router_subnets, delete=False): # TODO(annak): redesign when policy supports downlink-level # ndra profile attachment # This code is optimised to deal with concurrency challenges # (which can not be always solved by lock because the plugin # can run on different hosts). # We prefer to make another backend call for attaching the # profile even if it is already attached, than rely on DB # to have an accurate picture of existing subnets. # This method assumes that all the v6 subnets have the same # ipv6_address_mode. # Otherwise, earlier validation would already fail. if subnet.get('ip_version') == 4: # This subnet will not affect the ND profile return # Fetch other overlay interface networks # (VLAN advertising is attached on interface level) ipv6_overlay_subnets = [s for s in router_subnets if s['id'] != subnet['id'] and s.get('ip_version') == 6 and s.get('enable_dhcp') and self._is_overlay_network(context, s['network_id'])] if delete: # 'subnet' was already removed from the router_subnets list before # calling this method if ipv6_overlay_subnets: # If there is another ipv6 overlay - select the profile by its # address mode profile_id = self._get_subnets_nd_profile(ipv6_overlay_subnets) else: # this was the last ipv6 subnet connected - # need to disable slaac on router profile_id = NO_SLAAC_NDRA_PROFILE_ID else: profile_id = self._get_subnet_ndra_profile(subnet) # Check the other subnets too if (ipv6_overlay_subnets and profile_id in [NO_SLAAC_NDRA_PROFILE_ID, SLAAC_NDRA_PROFILE_ID]): profile_id = self._get_subnets_nd_profile( ipv6_overlay_subnets, additional_profile=profile_id) self.nsxpolicy.tier1.update(router_id, ipv6_ndra_profile_id=profile_id) def _validate_net_dhcp_edge_cluster(self, context, network, az): """Validate that the dhcp server edge cluster match the one of the network TZ """ if not self.nsxlib: # Cannot validate the TZ because the fabric apis are available # only via the nsxlib return net_tz = self._get_net_tz(context, network['id']) dhcp_ec_path = self.nsxpolicy.dhcp_server_config.get( az._policy_dhcp_server_config).get('edge_cluster_path') ec_id = p_utils.path_to_id(dhcp_ec_path) ec_nodes = self.nsxlib.edge_cluster.get_transport_nodes(ec_id) ec_tzs = [] for tn_uuid in ec_nodes: ec_tzs.extend(self.nsxlib.transport_node.get_transport_zones( tn_uuid)) if net_tz not in ec_tzs: msg = (_('Network TZ %(tz)s does not match DHCP server ' 'edge cluster %(ec)s') % {'tz': net_tz, 'ec': ec_id}) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _create_subnet_dhcp_port(self, context, az, network, subnet): port = self._get_net_dhcp_port(context, network['id']) if port: # If the port already exist (with another subnet) - update it with # the additional ip port['fixed_ips'].append({'subnet_id': subnet['id']}) super(NsxPolicyPlugin, self).update_port( context, port['id'], {'port': {'fixed_ips': port['fixed_ips']}}) return port_data = { "name": "", "admin_state_up": True, "device_id": network['id'], "device_owner": const.DEVICE_OWNER_DHCP, "network_id": network['id'], "tenant_id": network['tenant_id'], "mac_address": const.ATTR_NOT_SPECIFIED, "fixed_ips": [{'subnet_id': subnet['id']}], psec.PORTSECURITY: False } # Create the DHCP port (on neutron only) and update its port security port = {'port': port_data} neutron_port = super(NsxPolicyPlugin, self).create_port(context, port) is_ens_tz_port = self._is_ens_tz_port(context, port_data) self._create_port_preprocess_security(context, port, port_data, neutron_port, is_ens_tz_port) self._process_portbindings_create_and_update( context, port_data, neutron_port) def _delete_subnet_dhcp_port(self, context, net_id, subnet_id=None): dhcp_port = self._get_net_dhcp_port(context, net_id) if dhcp_port: if subnet_id: # deleting just this subnets dhcp if len(dhcp_port['fixed_ips']) > 1: new_fixed_ips = [ip for ip in dhcp_port['fixed_ips'] if ip['subnet_id'] != subnet_id] super(NsxPolicyPlugin, self).update_port( context, dhcp_port['id'], {'port': {'fixed_ips': new_fixed_ips}}) return # Delete the port itself self.delete_port(context, dhcp_port['id'], force_delete_dhcp=True) def _get_net_dhcp_port(self, context, net_id): filters = { 'network_id': [net_id], 'device_owner': [const.DEVICE_OWNER_DHCP] } dhcp_ports = self.get_ports(context, filters=filters) return dhcp_ports[0] if dhcp_ports else None def _get_sunbet_dhcp_server_ip(self, context, net_id, dhcp_subnet_id): dhcp_port = self._get_net_dhcp_port(context, net_id) if dhcp_port: dhcp_server_ips = [fip['ip_address'] for fip in dhcp_port['fixed_ips'] if fip['subnet_id'] == dhcp_subnet_id] if dhcp_server_ips: return dhcp_server_ips[0] def _is_dhcp_network(self, context, net_id): dhcp_port = self._get_net_dhcp_port(context, net_id) return True if dhcp_port else False def _get_segment_subnets(self, context, net_id, net_az=None, interface_subnets=None, deleted_dhcp_subnets=None): """Get an updated list of segmentSubnet objects to put on the segment Including router interface subnets (for overlay networks) & DHCP subnets (if using policy v4/v6 DHCP) """ dhcp_subnets = [] if self.use_policy_dhcp: # Find networks DHCP enabled subnets with db_api.CONTEXT_READER.using(context): network = self._get_network(context, net_id) for subnet in network.subnets: if(subnet.enable_dhcp and (subnet.ip_version == 4 or subnet.ipv6_address_mode != const.IPV6_SLAAC)): if (deleted_dhcp_subnets and subnet.id in deleted_dhcp_subnets): # Skip this one as it is being deleted continue dhcp_subnets.append(self.get_subnet(context, subnet.id)) if len(dhcp_subnets) == 2: # A network an have at most 2 DHCP subnets break router_subnets = [] if interface_subnets: router_subnets = interface_subnets else: # Get networks overlay router interfaces if self._is_overlay_network(context, net_id): router_ids = self._get_network_router_ids( context.elevated(), net_id) if router_ids: router_id = router_ids[0] router_subnets = self._load_router_subnet_cidrs_from_db( context.elevated(), router_id) seg_subnets = [] dhcp_subnet_ids = [] for dhcp_subnet in dhcp_subnets: dhcp_subnet_id = dhcp_subnet['id'] dhcp_subnet_ids.append(dhcp_subnet_id) gw_addr = self._get_gateway_addr_from_subnet(dhcp_subnet) cidr_prefix = int(dhcp_subnet['cidr'].split('/')[1]) dhcp_server_ip = self._get_sunbet_dhcp_server_ip( context, net_id, dhcp_subnet_id) dns_nameservers = dhcp_subnet['dns_nameservers'] if not net_az: net_az = self.get_network_az_by_net_id(context, net_id) if (not dns_nameservers or not validators.is_attr_set(dns_nameservers)): # Use pre-configured dns server dns_nameservers = net_az.nameservers is_ipv6 = True if dhcp_subnet.get('ip_version') == 6 else False server_ip = "%s/%s" % (dhcp_server_ip, cidr_prefix) kwargs = {'server_address': server_ip, 'dns_servers': dns_nameservers} if is_ipv6: network = self._get_network(context, net_id) kwargs['domain_names'] = [ self._get_network_dns_domain(net_az, network)] dhcp_config = policy_defs.SegmentDhcpConfigV6(**kwargs) else: dhcp_config = policy_defs.SegmentDhcpConfigV4(**kwargs) seg_subnet = policy_defs.Subnet(gateway_address=gw_addr, dhcp_config=dhcp_config) seg_subnets.append(seg_subnet) for rtr_subnet in router_subnets: if rtr_subnet['id'] in dhcp_subnet_ids: # Do not add the same subnet twice continue if rtr_subnet['network_id'] == net_id: gw_addr = self._get_gateway_addr_from_subnet(rtr_subnet) seg_subnets.append( policy_defs.Subnet(gateway_address=gw_addr, dhcp_config=None)) return seg_subnets def _enable_subnet_dhcp(self, context, network, subnet, az): # Allocate a neutron port for the DHCP server self._create_subnet_dhcp_port(context, az, network, subnet) # Update the DHCP server on the segment net_id = network['id'] segment_id = self._get_network_nsx_segment_id(context, net_id) seg_subnets = self._get_segment_subnets(context, net_id, net_az=az) # Update dhcp server config on the segment self.nsxpolicy.segment.update( segment_id=segment_id, dhcp_server_config_id=az._policy_dhcp_server_config, subnets=seg_subnets) def _get_net_dhcp_subnets(self, context, net_id): net_dhcp_subnets = [] net_obj = self._get_network(context, net_id) for subnet in net_obj.subnets: if(subnet.enable_dhcp and (subnet.ip_version == 4 or subnet.ipv6_address_mode != const.IPV6_SLAAC)): # This is a DHCP subnet net_dhcp_subnets.append(subnet.id) return net_dhcp_subnets def _disable_network_dhcp(self, context, network, subnet_id=None): net_id = network['id'] net_dhcp_subnets = self._get_net_dhcp_subnets(context, net_id) segment_id = self._get_network_nsx_segment_id(context, net_id) if subnet_id and len(net_dhcp_subnets) > 1: # remove dhcp only from this subnet seg_subnets = self._get_segment_subnets( context, net_id, deleted_dhcp_subnets=[subnet_id]) self.nsxpolicy.segment.update( segment_id, subnets=seg_subnets) self._delete_subnet_dhcp_port(context, net_id, subnet_id=subnet_id) else: # Remove dhcp server config completly from the segment seg_subnets = self._get_segment_subnets( context, net_id, deleted_dhcp_subnets=net_dhcp_subnets) self.nsxpolicy.segment.update( segment_id=segment_id, subnets=seg_subnets, dhcp_server_config_id=None) # Delete the neutron DHCP port (and its bindings) self._delete_subnet_dhcp_port(context, net_id) def _update_nsx_net_dhcp(self, context, network, az, subnet=None): """Update the DHCP config on a network Update the segment DHCP config, as well as the dhcp bindings on the ports. If just a specific subnet was modified, update only its ports. """ net_id = network['id'] segment_id = self._get_network_nsx_segment_id(context, net_id) seg_subnets = self._get_segment_subnets(context, net_id, net_az=az) filters = {'network_id': [net_id]} ports = self.get_ports(context, filters=filters) self.nsxpolicy.segment.update( segment_id=segment_id, dhcp_server_config_id=az._policy_dhcp_server_config, subnets=seg_subnets) # Update DHCP bindings for all the ports. for port in ports: self._add_or_overwrite_port_policy_dhcp_binding( context, port, segment_id, subnet) def _validate_net_type_with_dhcp(self, context, network): ddi_support, ddi_type = self._is_ddi_supported_on_net_with_type( context, network['id'], network=network) if not ddi_support: msg = _("Native DHCP is not supported for %(type)s " "network %(id)s") % {'id': network['id'], 'type': ddi_type} LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _validate_segment_subnets_num(self, context, net_id, subnet_data): """Validate no multiple segment subnets on the NSX The NSX cannot support more than 1 segment subnet of the same ip version. This include dhcp subnets and overlay router interfaces """ if ('enable_dhcp' not in subnet_data or not subnet_data.get('enable_dhcp')): # NO DHCP so no new segment subnet return ip_ver = subnet_data.get('ip_version', 4) if ip_ver == 6: # Since the plugin does not allow multiple ipv6 subnets, # this can be ignored. return overlay_net = self._is_overlay_network(context, net_id) if not overlay_net: # Since the plugin allows only 1 DHCP subnet, if this is not an # overlay network, no problem. return interface_ports = self._get_network_interface_ports( context, net_id) if interface_ports: # Should have max 1 router interface per network if_port = interface_ports[0] if if_port['fixed_ips']: if_subnet = interface_ports[0]['fixed_ips'][0]['subnet_id'] if subnet_data.get('id') != if_subnet: msg = (_("Can not create a DHCP subnet on network %(net)s " "as another %(ver)s subnet is attached to a " "router") % {'net': net_id, 'ver': ip_ver}) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _init_ipv6_gateway(self, subnet): # Override neutron decision to verify that also for ipv6 the first # ip in the cidr is not used, as the NSX does not support xxxx::0 as a # segment subnet gateway. if (subnet.get('gateway_ip') is const.ATTR_NOT_SPECIFIED and subnet.get('ip_version') == const.IP_VERSION_6 and subnet.get('cidr') and subnet['cidr'] != const.ATTR_NOT_SPECIFIED): net = netaddr.IPNetwork(subnet['cidr']) subnet['gateway_ip'] = str(net.network + 1) def _validate_subnet_host_routes(self, subnet, orig_subnet=None): self._validate_number_of_subnet_static_routes(subnet) if orig_subnet: self._validate_host_routes_input( subnet, orig_enable_dhcp=orig_subnet['enable_dhcp'], orig_host_routes=orig_subnet['host_routes']) else: self._validate_host_routes_input(subnet) # IPv6 subnets cannot support host routes if (subnet['subnet'].get('ip_version') == 6 or (orig_subnet and orig_subnet.get('ip_version') == 6)): if (validators.is_attr_set(subnet['subnet'].get('host_routes')) and subnet['subnet']['host_routes']): err_msg = _("Host routes can only be supported with IPv4 " "subnets") raise n_exc.InvalidInput(error_message=err_msg) def _has_dhcp_enabled_subnet(self, context, network, ip_version=4): for subnet in network.subnets: if subnet.enable_dhcp and subnet.ip_version == ip_version: if ip_version == 4: return True elif subnet.ipv6_address_mode != const.IPV6_SLAAC: return True return False @nsx_plugin_common.api_replay_mode_wrapper def create_subnet(self, context, subnet): if not self.use_policy_dhcp: # Subnet with MP DHCP return self._create_subnet_with_mp_dhcp(context, subnet) self._validate_subnet_host_routes(subnet) net_id = subnet['subnet']['network_id'] network = self._get_network(context, net_id) self._validate_single_ipv6_subnet(context, network, subnet['subnet']) self._init_ipv6_gateway(subnet['subnet']) net_az = self.get_network_az_by_net_id(context, net_id) # Allow manipulation of only 1 subnet of the same network at once lock = 'nsxp_network_' + net_id with locking.LockManager.get_lock(lock): # DHCP validations (before creating the neutron subnet) with_dhcp = False if self._subnet_with_native_dhcp(subnet['subnet']): with_dhcp = True self._validate_external_subnet(context, net_id) self._validate_net_dhcp_edge_cluster(context, network, net_az) self._validate_net_type_with_dhcp(context, network) ip_version = subnet['subnet'].get('ip_version', 4) if self._has_dhcp_enabled_subnet(context, network, ip_version): msg = (_("Can not create more than one DHCP-enabled " "subnet for IPv%(ver)s in network %(net)s") % {'ver': ip_version, 'net': net_id}) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) self._validate_segment_subnets_num( context, net_id, subnet['subnet']) # Create the neutron subnet. # Any failure from here and on will require rollback. created_subnet = super(NsxPolicyPlugin, self).create_subnet( context, subnet) try: # This can be called only after the super create # since we need the subnet pool to be translated # to allocation pools self._validate_address_space(context, created_subnet) except n_exc.InvalidInput: # revert the subnet creation with excutils.save_and_reraise_exception(): super(NsxPolicyPlugin, self).delete_subnet( context, created_subnet['id']) self._extension_manager.process_create_subnet(context, subnet['subnet'], created_subnet) if with_dhcp: try: # Enable the network DHCP on the NSX self._enable_subnet_dhcp( context, network, created_subnet, net_az) except (nsx_lib_exc.ManagerError, nsx_exc.NsxPluginException): # revert the subnet creation with excutils.save_and_reraise_exception(): # Try to delete the DHCP port, and the neutron subnet self._delete_subnet_dhcp_port( context, net_id, subnet_id=created_subnet['id']) super(NsxPolicyPlugin, self).delete_subnet( context, created_subnet['id']) return created_subnet def delete_subnet(self, context, subnet_id): if not self.use_policy_dhcp: # Subnet with MP DHCP return self.delete_subnet_with_mp_dhcp(context, subnet_id) if self._has_native_dhcp_metadata(): # Ensure that subnet is not deleted if attached to router. self._subnet_check_ip_allocations_internal_router_ports( context, subnet_id) subnet = self.get_subnet(context, subnet_id) if self._subnet_with_native_dhcp(subnet): lock = 'nsxp_network_' + subnet['network_id'] with locking.LockManager.get_lock(lock): # Remove this subnet DHCP config network = self._get_network(context, subnet['network_id']) try: self._disable_network_dhcp(context, network, subnet_id=subnet_id) except Exception as e: LOG.error("Failed to disable DHCP for " "network %(id)s. Exception: %(e)s", {'id': network['id'], 'e': e}) # Continue for the neutron subnet deletion # Delete neutron subnet super(NsxPolicyPlugin, self).delete_subnet(context, subnet_id) def update_subnet(self, context, subnet_id, subnet): if not self.use_policy_dhcp: # Subnet with MP DHCP return self.update_subnet_with_mp_dhcp(context, subnet_id, subnet) subnet_data = subnet['subnet'] updated_subnet = None orig_subnet = self.get_subnet(context, subnet_id) self._validate_subnet_host_routes(subnet, orig_subnet=orig_subnet) net_id = orig_subnet['network_id'] network = self._get_network(context, net_id) net_az = self.get_network_az_by_net_id(context, net_id) enable_dhcp = self._subnet_with_native_dhcp( subnet_data, orig_subnet=orig_subnet) orig_enable_dhcp = self._subnet_with_native_dhcp(orig_subnet) if enable_dhcp != orig_enable_dhcp: # Update subnet with DHCP status change self._validate_external_subnet(context, net_id) lock = 'nsxp_network_' + net_id with locking.LockManager.get_lock(lock): if enable_dhcp: self._validate_net_type_with_dhcp(context, network) ip_version = orig_subnet.get('ip_version', 4) if self._has_dhcp_enabled_subnet(context, network, ip_version): msg = (_("Can not create more than one DHCP-enabled " "subnet for IPv%(ver)s in network %(net)s") % {'net': net_id, 'ver': ip_version}) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) self._validate_segment_subnets_num( context, net_id, subnet_data) updated_subnet = super(NsxPolicyPlugin, self).update_subnet( context, subnet_id, subnet) self._extension_manager.process_update_subnet( context, subnet_data, updated_subnet) try: if enable_dhcp: self._enable_subnet_dhcp(context, network, updated_subnet, net_az) else: self._disable_network_dhcp(context, network, subnet_id=subnet_id) except (nsx_lib_exc.ManagerError, nsx_exc.NsxPluginException): # revert the subnet update with excutils.save_and_reraise_exception(): super(NsxPolicyPlugin, self).update_subnet( context, subnet_id, {'subnet': orig_subnet}) else: # No dhcp changes - just call super update updated_subnet = super(NsxPolicyPlugin, self).update_subnet( context, subnet_id, subnet) self._extension_manager.process_update_subnet( context, subnet_data, updated_subnet) # Check if needs to update DHCP related NSX resources # (only if the subnet changed, but dhcp was already enabled) if (enable_dhcp and orig_enable_dhcp and ('dns_nameservers' in subnet_data or 'gateway_ip' in subnet_data or 'host_routes' in subnet_data)): self._update_nsx_net_dhcp(context, network, net_az, updated_subnet) return updated_subnet def _build_port_address_bindings(self, context, port_data): psec_on, has_ip = self._determine_port_security_and_has_ip(context, port_data) if not psec_on: return None address_bindings = [] for fixed_ip in port_data['fixed_ips']: ip_addr = fixed_ip['ip_address'] mac_addr = port_data['mac_address'] binding = self.nsxpolicy.segment_port.build_address_binding( ip_addr, mac_addr) address_bindings.append(binding) # add address binding for link local ipv6 address, otherwise # neighbor discovery will be blocked by spoofguard. # for now only one ipv6 address is allowed if netaddr.IPAddress(ip_addr).version == 6: lladdr = netaddr.EUI(mac_addr).ipv6_link_local() binding = self.nsxpolicy.segment_port.build_address_binding( lladdr, mac_addr) address_bindings.append(binding) for pair in port_data.get(addr_apidef.ADDRESS_PAIRS): binding = self.nsxpolicy.segment_port.build_address_binding( pair['ip_address'], pair['mac_address']) address_bindings.append(binding) return address_bindings def _get_network_nsx_id(self, context, network_id): """Return the id of this logical switch in the nsx manager This api waits for the segment to really be realized, and return the ID of the NSX logical switch. If it was not realized or timed out retrying, it will return None The nova api will use this to attach to the instance. """ if network_id in NET_NEUTRON_2_NSX_ID_CACHE: return NET_NEUTRON_2_NSX_ID_CACHE[network_id] if not self._network_is_external(context, network_id): segment_id = self._get_network_nsx_segment_id(context, network_id) try: nsx_id = self.nsxpolicy.segment.get_realized_logical_switch_id( segment_id) # Add result to caches NET_NEUTRON_2_NSX_ID_CACHE[network_id] = nsx_id NET_NSX_2_NEUTRON_ID_CACHE[nsx_id] = network_id return nsx_id except nsx_lib_exc.ManagerError: LOG.error("Network %s was not realized", network_id) # Do not cache this result else: # Add empty result to cache NET_NEUTRON_2_NSX_ID_CACHE[network_id] = None def _get_network_nsx_segment_id(self, context, network_id): """Return the NSX segment ID matching the neutron network id Usually the NSX ID is the same as the neutron ID. The exception is when this is a provider NSX_NETWORK, which means the network already existed on the NSX backend, and it is being consumed by the plugin. """ bindings = nsx_db.get_network_bindings(context.session, network_id) if (bindings and bindings[0].binding_type == utils.NsxV3NetworkTypes.NSX_NETWORK): # return the ID of the NSX network return bindings[0].phy_uuid return network_id def _build_port_tags(self, port_data): sec_groups = [] sec_groups.extend(port_data.get(ext_sg.SECURITYGROUPS, [])) sec_groups.extend(port_data.get(provider_sg.PROVIDER_SECURITYGROUPS, [])) tags = [] for sg in sec_groups: tags = nsxlib_utils.add_v3_tag(tags, NSX_P_SECURITY_GROUP_TAG, sg) return tags def _do_port_backend_calls(self, name, segment_id, spoofguard_profile, seg_sec_profile, mac_discovery_profile, qos_policy_id, **kwargs): self.nsxpolicy.segment_port.create_or_overwrite( name, segment_id, **kwargs) # add the security profiles to the port self.nsxpolicy.segment_port_security_profiles.create_or_overwrite( name, segment_id, port_id=kwargs['port_id'], spoofguard_profile_id=spoofguard_profile, segment_security_profile_id=seg_sec_profile) # add the mac discovery profile to the port self.nsxpolicy.segment_port_discovery_profiles.create_or_overwrite( name, segment_id, kwargs['port_id'], mac_discovery_profile_id=mac_discovery_profile) # Add QoS segment profile (only if QoS is enabled) if directory.get_plugin(plugin_const.QOS): self.nsxpolicy.segment_port_qos_profiles.create_or_overwrite( name, segment_id, kwargs['port_id'], qos_profile_id=qos_policy_id) def _create_or_update_port_on_backend(self, context, port_data, is_psec_on, qos_policy_id, original_port=None): is_create = original_port is None is_update = not is_create name = self._build_port_name(context, port_data) address_bindings = self._build_port_address_bindings( context, port_data) device_owner = port_data.get('device_owner') vif_id = None if device_owner and device_owner != l3_db.DEVICE_OWNER_ROUTER_INTF: vif_id = port_data['id'] tags = self._build_port_tags(port_data) if device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF: tag_resource_type = 'os-neutron-rport-id' else: tag_resource_type = NSX_P_PORT_RESOURCE_TYPE tags.extend(self.nsxpolicy.build_v3_tags_payload( port_data, resource_type=tag_resource_type, project_name=context.tenant_name)) if self._is_excluded_port(device_owner, is_psec_on): tags.append({'scope': security.PORT_SG_SCOPE, 'tag': NSX_P_EXCLUDE_LIST_TAG}) if self.support_external_port_tagging: external_tags = self.get_external_tags_for_port( context, port_data['id']) if external_tags: total_len = len(external_tags) + len(tags) if total_len > nsxlib_utils.MAX_TAGS: LOG.warning("Cannot add external tags to port %s: " "too many tags", port_data['id']) else: tags.extend(external_tags) segment_id = self._get_network_nsx_segment_id( context, port_data['network_id']) # Calculate the port security profiles if is_psec_on: spoofguard_profile = SPOOFGUARD_PROFILE_ID seg_sec_profile = SEG_SECURITY_PROFILE_ID else: spoofguard_profile = NO_SPOOFGUARD_PROFILE_ID seg_sec_profile = NO_SEG_SECURITY_PROFILE_ID mac_disc_profile_must = False if is_psec_on: address_pairs = port_data.get(addr_apidef.ADDRESS_PAIRS) if validators.is_attr_set(address_pairs) and address_pairs: mac_disc_profile_must = True mac_learning_enabled = ( validators.is_attr_set(port_data.get(mac_ext.MAC_LEARNING)) and port_data.get(mac_ext.MAC_LEARNING) is True) if mac_disc_profile_must or mac_learning_enabled: mac_discovery_profile = MAC_DISCOVERY_PROFILE_ID else: mac_discovery_profile = NO_MAC_DISCOVERY_PROFILE_ID # Prepare the args for the segment port creation kwargs = {'port_id': port_data['id'], 'description': port_data.get('description', ''), 'address_bindings': address_bindings, 'tags': tags} if vif_id: kwargs['vif_id'] = vif_id if (self.nsxpolicy.feature_supported( nsxlib_consts.FEATURE_NSX_POLICY_ADMIN_STATE) and 'admin_state_up' in port_data): kwargs['admin_state'] = port_data['admin_state_up'] if not self.nsxpolicy.feature_supported( nsxlib_consts.FEATURE_PARTIAL_UPDATES): # If partial updates are not supported, using transactions will # reset the backend segment name self._do_port_backend_calls( name, segment_id, spoofguard_profile, seg_sec_profile, mac_discovery_profile, qos_policy_id, **kwargs) else: # Create/ update the backend port in a single transaction with policy_trans.NsxPolicyTransaction(): self._do_port_backend_calls( name, segment_id, spoofguard_profile, seg_sec_profile, mac_discovery_profile, qos_policy_id, **kwargs) # Update port admin status using passthrough api, only if it changed # or new port with disabled admin state if (not self.nsxpolicy.feature_supported( nsxlib_consts.FEATURE_NSX_POLICY_ADMIN_STATE) and cfg.CONF.nsx_p.allow_passthrough and 'admin_state_up' in port_data): new_state = port_data['admin_state_up'] if ((is_create and new_state is False) or (is_update and original_port.get('admin_state_up') != new_state)): # This api uses the passthrough api self.nsxpolicy.segment_port.set_admin_state( segment_id, port_data['id'], new_state) def base_create_port(self, context, port): neutron_db = super(NsxPolicyPlugin, self).create_port(context, port) self._extension_manager.process_create_port( context, port['port'], neutron_db) return neutron_db def _is_backend_port(self, context, port_data, delete=False): is_external_net = self._network_is_external( context, port_data['network_id']) device_owner = port_data.get('device_owner') is_router_interface = (device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF) is_dhcp_port = (device_owner == const.DEVICE_OWNER_DHCP) is_octavia_port = (device_owner == oct_const.DEVICE_OWNER_OCTAVIA) if is_external_net or is_router_interface or is_dhcp_port: # DHCP is handled on NSX level # Router is connected automatically in policy return False if not delete and is_octavia_port: # Octavia vip port should not be created on the NSX. # Since octavia backend ports from older deployments may exist, # need to try and delete those. return False return True def _filter_ipv6_dhcp_fixed_ips(self, context, fixed_ips): ips = [] for fixed_ip in fixed_ips: if netaddr.IPNetwork(fixed_ip['ip_address']).version != 6: continue with db_api.CONTEXT_READER.using(context): subnet = self.get_subnet(context, fixed_ip['subnet_id']) if (subnet['enable_dhcp'] and subnet.get('ipv6_address_mode') != 'slaac'): ips.append(fixed_ip) return ips def _add_or_overwrite_port_policy_dhcp_binding( self, context, port, segment_id, dhcp_subnet=None): if not utils.is_port_dhcp_configurable(port): return net_id = port['network_id'] for fixed_ip in self._filter_ipv4_dhcp_fixed_ips( context, port['fixed_ips']): # There will be only one ipv4 ip here binding_id = port['id'] + '-ipv4' name = 'IPv4 binding for port %s' % port['id'] ip = fixed_ip['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') if dhcp_subnet: if fixed_ip['subnet_id'] != dhcp_subnet['id']: continue subnet = dhcp_subnet else: subnet = self.get_subnet(context, fixed_ip['subnet_id']) gateway_ip = subnet.get('gateway_ip') options = self._get_dhcp_options( context, ip, port.get(ext_edo.EXTRADHCPOPTS), net_id, subnet) self.nsxpolicy.segment_dhcp_static_bindings.create_or_overwrite_v4( name, segment_id, binding_id=binding_id, gateway_address=gateway_ip, host_name=hostname, ip_address=ip, lease_time=cfg.CONF.nsx_p.dhcp_lease_time, mac_address=port['mac_address'], options=options) for fixed_ip in self._filter_ipv6_dhcp_fixed_ips( context, port['fixed_ips']): # There will be only one ipv6 ip here binding_id = port['id'] + '-ipv6' name = 'IPv6 binding for port %s' % port['id'] ip = fixed_ip['ip_address'] if dhcp_subnet: if fixed_ip['subnet_id'] != dhcp_subnet['id']: continue subnet = dhcp_subnet else: subnet = self.get_subnet(context, fixed_ip['subnet_id']) self.nsxpolicy.segment_dhcp_static_bindings.create_or_overwrite_v6( name, segment_id, binding_id=binding_id, ip_addresses=[ip], lease_time=cfg.CONF.nsx_p.dhcp_lease_time, mac_address=port['mac_address']) def _add_port_policy_dhcp_binding(self, context, port): net_id = port['network_id'] if not self._is_dhcp_network(context, net_id): return segment_id = self._get_network_nsx_segment_id(context, net_id) self._add_or_overwrite_port_policy_dhcp_binding( context, port, segment_id) def _delete_port_policy_dhcp_binding(self, context, port): # Do not check device_owner here because Nova may have already # deleted that before Neutron's port deletion. net_id = port['network_id'] if not self._is_dhcp_network(context, net_id): return segment_id = self._get_network_nsx_segment_id(context, net_id) v4_dhcp = v6_dhcp = False for fixed_ip in port['fixed_ips']: ip_addr = fixed_ip['ip_address'] if netaddr.IPAddress(ip_addr).version == 6: v6_dhcp = True else: v4_dhcp = True if v4_dhcp: try: bindingv4_id = port['id'] + '-ipv4' self.nsxpolicy.segment_dhcp_static_bindings.delete( segment_id, bindingv4_id) except nsx_lib_exc.ResourceNotFound: pass if v6_dhcp: try: bindingv6_id = port['id'] + '-ipv6' self.nsxpolicy.segment_dhcp_static_bindings.delete( segment_id, bindingv6_id) except nsx_lib_exc.ResourceNotFound: pass def _update_port_policy_dhcp_binding(self, context, old_port, new_port): # First check if any address in fixed_ips changed. # Then update DHCP server setting or DHCP static binding # depending on the port type. # Note that Neutron allows a port with multiple IPs in the # same subnet. But backend DHCP server may not support that. if (utils.is_port_dhcp_configurable(old_port) != utils.is_port_dhcp_configurable(new_port)): # Note that the device_owner could be changed, # but still needs DHCP binding. if utils.is_port_dhcp_configurable(old_port): self._delete_port_policy_dhcp_binding(context, old_port) else: self._add_port_policy_dhcp_binding(context, new_port) return # Collect IPv4 DHCP addresses from original and updated fixed_ips # in the form of [(subnet_id, ip_address)]. old_fixed_v4 = set([(fixed_ip['subnet_id'], fixed_ip['ip_address']) for fixed_ip in self._filter_ipv4_dhcp_fixed_ips( context, old_port['fixed_ips'])]) new_fixed_v4 = set([(fixed_ip['subnet_id'], fixed_ip['ip_address']) for fixed_ip in self._filter_ipv4_dhcp_fixed_ips( context, new_port['fixed_ips'])]) old_fixed_v6 = set([(fixed_ip['subnet_id'], fixed_ip['ip_address']) for fixed_ip in self._filter_ipv6_dhcp_fixed_ips( context, old_port['fixed_ips'])]) new_fixed_v6 = set([(fixed_ip['subnet_id'], fixed_ip['ip_address']) for fixed_ip in self._filter_ipv6_dhcp_fixed_ips( context, new_port['fixed_ips'])]) # Find out the subnet/IP differences before and after the update. v4_to_add = list(new_fixed_v4 - old_fixed_v4) v4_to_delete = list(old_fixed_v4 - new_fixed_v4) v6_to_add = list(new_fixed_v6 - old_fixed_v6) v6_to_delete = list(old_fixed_v6 - new_fixed_v6) ip_change = (v4_to_add or v4_to_delete or v6_to_add or v6_to_delete) if (old_port["device_owner"] == const.DEVICE_OWNER_DHCP and ip_change): # Update backend DHCP server address if the IP address of a DHCP # port is changed. if len(new_fixed_v4) > 1 or len(new_fixed_v6) > 1: msg = _("Can only configure one IP address on a DHCP server") LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) net_id = old_port['network_id'] network = self.get_network(context, net_id) net_az = self.get_network_az_by_net_id(context, net_id) self._update_nsx_net_dhcp(context, network, net_az) elif utils.is_port_dhcp_configurable(new_port): dhcp_opts_changed = (old_port[ext_edo.EXTRADHCPOPTS] != new_port[ext_edo.EXTRADHCPOPTS]) if (ip_change or dhcp_opts_changed or old_port['mac_address'] != new_port['mac_address']): if new_fixed_v4 or new_fixed_v6: # Recreate the bindings of this port self._add_port_policy_dhcp_binding(context, new_port) else: self._delete_port_policy_dhcp_binding(context, old_port) def _assert_on_ipv6_port_with_dhcpopts(self, context, port_data, orig_port=None): """IPv6 port only port cannot support EXTRADHCPOPTS""" # Get the updated EXTRADHCPOPTS extradhcpopts = None if ext_edo.EXTRADHCPOPTS in port_data: extradhcpopts = port_data[ext_edo.EXTRADHCPOPTS] elif orig_port: extradhcpopts = orig_port.get(ext_edo.EXTRADHCPOPTS) if not extradhcpopts: return # Get the updated list of fixed ips fixed_ips = [] if (port_data.get('fixed_ips') and validators.is_attr_set(port_data['fixed_ips'])): fixed_ips = port_data['fixed_ips'] elif (orig_port and orig_port.get('fixed_ips') and validators.is_attr_set(orig_port['fixed_ips'])): fixed_ips = orig_port['fixed_ips'] # Check if any of the ips belongs to an ipv6 subnet with DHCP # And no ipv4 subnets for fixed_ip in fixed_ips: if netaddr.IPNetwork(fixed_ip['ip_address']).version == 4: # If there are ipv4 addresses - it is allowed return with db_api.CONTEXT_READER.using(context): subnet = self.get_subnet(context, fixed_ip['subnet_id']) if (subnet['enable_dhcp'] and subnet['ipv6_address_mode'] != const.IPV6_SLAAC): err_msg = (_("%s are not supported for IPv6 ports with " "DHCP v6") % ext_edo.EXTRADHCPOPTS) LOG.error(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def create_port(self, context, port, l2gw_port_check=False): port_data = port['port'] # validate the new port parameters self._validate_create_port(context, port_data) self._assert_on_resource_admin_state_down(port_data) # Validate the vnic type (the same types as for the NSX-T plugin) direct_vnic_type = self._validate_port_vnic_type( context, port_data, port_data['network_id'], projectpluginmap.NsxPlugins.NSX_T) is_external_net = self._network_is_external( context, port_data['network_id']) if is_external_net: self._assert_on_external_net_with_compute(port_data) with db_api.CONTEXT_WRITER.using(context): neutron_db = self.base_create_port(context, port) port["port"].update(neutron_db) try: # Validate ipv6 only after fixed_ips are allocated self._assert_on_ipv6_port_with_dhcpopts(context, port["port"]) except Exception: with excutils.save_and_reraise_exception(): # rollback super(NsxPolicyPlugin, self).delete_port( context, neutron_db['id']) self.fix_direct_vnic_port_sec(direct_vnic_type, port_data) (is_psec_on, has_ip, sgids, psgids) = ( self._create_port_preprocess_security(context, port, port_data, neutron_db, False)) self._process_portbindings_create_and_update( context, port['port'], port_data, vif_type=self._vif_type_by_vnic_type(direct_vnic_type)) self._process_port_create_extra_dhcp_opts( context, port_data, port_data.get(ext_edo.EXTRADHCPOPTS)) self._process_port_create_security_group(context, port_data, sgids) self._process_port_create_provider_security_group( context, port_data, psgids) # Handle port mac learning if validators.is_attr_set(port_data.get(mac_ext.MAC_LEARNING)): # Make sure mac_learning and port sec are not both enabled if port_data.get(mac_ext.MAC_LEARNING) and is_psec_on: msg = _('Mac learning requires that port security be ' 'disabled') LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) # save the mac learning value in the DB self._create_mac_learning_state(context, port_data) elif mac_ext.MAC_LEARNING in port_data: # This is due to the fact that the default is # ATTR_NOT_SPECIFIED port_data.pop(mac_ext.MAC_LEARNING) qos_policy_id = self._get_port_qos_policy_id( context, None, port_data) if self._is_backend_port(context, port_data): # router interface port is created automatically by policy try: self._create_or_update_port_on_backend( context, port_data, is_psec_on, qos_policy_id) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Failed to create port %(id)s on NSX ' 'backend. Exception: %(e)s', {'id': neutron_db['id'], 'e': e}) super(NsxPolicyPlugin, self).delete_port( context, neutron_db['id']) # Attach the QoS policy to the port in the neutron DB if qos_policy_id: qos_com_utils.update_port_policy_binding(context, neutron_db['id'], qos_policy_id) # this extra lookup is necessary to get the # latest db model for the extension functions port_model = self._get_port(context, port_data['id']) resource_extend.apply_funcs('ports', port_data, port_model) self._extend_nsx_port_dict_binding(context, port_data) self._remove_provider_security_groups_from_list(port_data) # Add Mac/IP binding to native DHCP server and neutron DB. try: if self.use_policy_dhcp: self._add_port_policy_dhcp_binding(context, port_data) elif cfg.CONF.nsx_p.allow_passthrough: self._add_port_mp_dhcp_binding(context, port_data) except nsx_lib_exc.ManagerError: # Rollback create port self.delete_port(context, port_data['id'], force_delete_dhcp=True) msg = _('Unable to create port. Please contact admin') LOG.exception(msg) raise nsx_exc.NsxPluginException(err_msg=msg) kwargs = {'context': context, 'port': neutron_db} registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs) return port_data def _delete_port_on_backend(self, context, net_id, port_id): try: segment_id = self._get_network_nsx_segment_id(context, net_id) self.nsxpolicy.segment_port_security_profiles.delete( segment_id, port_id) self.nsxpolicy.segment_port_discovery_profiles.delete( segment_id, port_id) if directory.get_plugin(plugin_const.QOS): self.nsxpolicy.segment_port_qos_profiles.delete( segment_id, port_id) self.nsxpolicy.segment_port.delete(segment_id, port_id) except nsx_lib_exc.ResourceNotFound: # If the resource was not found on the backend do not worry about # it. The conditions has already been logged, so there is no need # to do further logging pass except nsx_lib_exc.ManagerError as e: # If there is a failure in deleting the resource. # In this case the neutron port was not deleted yet. msg = (_("Backend port deletion for neutron port %(id)s " "failed: %(e)s") % {'id': port_id, 'e': e}) raise nsx_exc.NsxPluginException(err_msg=msg) def delete_port(self, context, port_id, l3_port_check=True, l2gw_port_check=True, force_delete_dhcp=False, force_delete_vpn=False): # first update neutron (this will perform all types of validations) port_data = self.get_port(context, port_id) net_id = port_data['network_id'] # if needed, check to see if this is a port owned by # a l3 router. If so, we should prevent deletion here if l3_port_check: self.prevent_l3_port_deletion(context, port_id) # Prevent DHCP port deletion if native support is enabled if (cfg.CONF.nsx_p.allow_passthrough and not force_delete_dhcp and port_data['device_owner'] in [const.DEVICE_OWNER_DHCP]): msg = (_('Can not delete DHCP port %s') % port_id) raise n_exc.BadRequest(resource='port', msg=msg) if not force_delete_vpn: self._assert_on_vpn_port_change(port_data) self.disassociate_floatingips(context, port_id) # Remove Mac/IP binding from native DHCP server and neutron DB. if self.use_policy_dhcp: self._delete_port_policy_dhcp_binding(context, port_data) elif cfg.CONF.nsx_p.allow_passthrough: self._delete_port_mp_dhcp_binding(context, port_data) super(NsxPolicyPlugin, self).delete_port(context, port_id) # Delete the backend port last to prevent recreation by another process if self._is_backend_port(context, port_data, delete=True): try: self._delete_port_on_backend(context, net_id, port_id) except nsx_lib_exc.ResourceNotFound: # If the resource was not found on the backend do not worry # about it. The conditions has already been logged, so there # is no need to do further logging pass except nsx_lib_exc.ManagerError as e: # If there is a failure in deleting the resource, fail the # neutron operation even though the neutron object was already # deleted. This way the user will be aware of zombie resources # that may fail future actions. msg = (_("Backend segment port deletion for neutron port " "%(id)s failed. The object was however removed from " "the Neutron database: %(e)s") % {'id': port_id, 'e': e}) raise nsx_exc.NsxPluginException(err_msg=msg) def _update_port_on_backend(self, context, lport_id, original_port, updated_port, is_psec_on, qos_policy_id): # For now port create and update are the same # Update might evolve with more features return self._create_or_update_port_on_backend( context, updated_port, is_psec_on, qos_policy_id, original_port=original_port) def update_port(self, context, port_id, port): with db_api.CONTEXT_WRITER.using(context): # get the original port, and keep it honest as it is later used # for notifications original_port = super(NsxPolicyPlugin, self).get_port( context, port_id) self._remove_provider_security_groups_from_list(original_port) port_data = port['port'] self._validate_update_port(context, port_id, original_port, port_data) self._assert_on_resource_admin_state_down(port_data) validate_port_sec = self._should_validate_port_sec_on_update_port( port_data) is_external_net = self._network_is_external( context, original_port['network_id']) if is_external_net: self._assert_on_external_net_with_compute(port_data) device_owner = (port_data['device_owner'] if 'device_owner' in port_data else original_port.get('device_owner')) self._validate_max_ips_per_port(context, port_data.get('fixed_ips', []), device_owner) direct_vnic_type = self._validate_port_vnic_type( context, port_data, original_port['network_id']) self._assert_on_ipv6_port_with_dhcpopts( context, port_data, orig_port=original_port) updated_port = super(NsxPolicyPlugin, self).update_port( context, port_id, port) self._extension_manager.process_update_port(context, port_data, updated_port) # copy values over - except fixed_ips as # they've already been processed port_data.pop('fixed_ips', None) updated_port.update(port_data) updated_port = self._update_port_preprocess_security( context, port, port_id, updated_port, False, validate_port_sec=validate_port_sec, direct_vnic_type=direct_vnic_type) self._update_extra_dhcp_opts_on_port(context, port_id, port, updated_port) sec_grp_updated = self.update_security_group_on_port( context, port_id, port, original_port, updated_port) self._process_port_update_provider_security_group( context, port, original_port, updated_port) (port_security, has_ip) = self._determine_port_security_and_has_ip( context, updated_port) self._process_portbindings_create_and_update( context, port_data, updated_port, vif_type=self._vif_type_by_vnic_type(direct_vnic_type)) self._extend_nsx_port_dict_binding(context, updated_port) mac_learning_state = updated_port.get(mac_ext.MAC_LEARNING) if mac_learning_state is not None: if port_security and mac_learning_state: msg = _('Mac learning requires that port security be ' 'disabled') LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) self._update_mac_learning_state(context, port_id, mac_learning_state) self._remove_provider_security_groups_from_list(updated_port) # Update the QoS policy qos_policy_id = self._get_port_qos_policy_id( context, original_port, updated_port) qos_com_utils.update_port_policy_binding(context, port_id, qos_policy_id) # update the port in the backend, only if it exists in the DB # (i.e not external net) and is not router interface if self._is_backend_port(context, updated_port): try: self._update_port_on_backend(context, port_id, original_port, updated_port, port_security, qos_policy_id) except Exception as e: LOG.error('Failed to update port %(id)s on NSX ' 'backend. Exception: %(e)s', {'id': port_id, 'e': e}) # Rollback the change with excutils.save_and_reraise_exception(): with db_api.CONTEXT_WRITER.using(context): self._revert_neutron_port_update( context, port_id, original_port, updated_port, port_security, sec_grp_updated) else: # if this port changed ownership to router interface, it should # be deleted from policy, since policy handles router connectivity original_owner = original_port.get('device_owner') new_owner = port_data.get('device_owner') if (original_owner != new_owner and new_owner == const.DEVICE_OWNER_ROUTER_INTF): self._delete_port_on_backend(context, original_port['network_id'], port_id) # Update DHCP bindings. if self.use_policy_dhcp: self._update_port_policy_dhcp_binding( context, original_port, updated_port) elif cfg.CONF.nsx_p.allow_passthrough: self._update_port_mp_dhcp_binding( context, original_port, updated_port) # Make sure the port revision is updated if 'revision_number' in updated_port: port_model = self._get_port(context, port_id) updated_port['revision_number'] = port_model.revision_number # Notifications must be sent after the above transaction is complete kwargs = { 'context': context, 'port': updated_port, 'mac_address_updated': False, 'original_port': original_port, } registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs) return updated_port def get_port(self, context, id, fields=None): port = super(NsxPolicyPlugin, self).get_port( context, id, fields=None) self._extend_nsx_port_dict_binding(context, port) self._extend_qos_port_dict_binding(context, port) self._remove_provider_security_groups_from_list(port) return db_utils.resource_fields(port, fields) def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} self._update_filters_with_sec_group(context, filters) with db_api.CONTEXT_READER.using(context): ports = ( super(NsxPolicyPlugin, self).get_ports( context, filters, fields, sorts, limit, marker, page_reverse)) self._log_get_ports(ports, filters) # Add port extensions for port in ports[:]: self._extend_nsx_port_dict_binding(context, port) self._extend_qos_port_dict_binding(context, port) self._remove_provider_security_groups_from_list(port) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports]) def _add_subnet_snat_rule(self, context, router_id, subnet, gw_address_scope, gw_ip): if not self._need_router_snat_rules(context, router_id, subnet, gw_address_scope): return firewall_match = self._get_nat_firewall_match() self.nsxpolicy.tier1_nat_rule.create_or_overwrite( 'snat for subnet %s' % subnet['id'], router_id, nat_rule_id=self._get_snat_rule_id(subnet), action=policy_constants.NAT_ACTION_SNAT, sequence_number=NAT_RULE_PRIORITY_GW, translated_network=gw_ip, source_network=subnet['cidr'], firewall_match=firewall_match) def _get_snat_rule_id(self, subnet): return 'S-' + subnet['id'] def _get_no_dnat_rule_id(self, subnet): return 'ND-' + subnet['id'] def _add_subnet_no_dnat_rule(self, context, router_id, subnet): if not self._need_router_no_dnat_rules(subnet): return # Add NO-DNAT rule to allow internal traffic between VMs, even if # they have floating ips (Only for routers with snat enabled) self.nsxpolicy.tier1_nat_rule.create_or_overwrite( 'no-dnat for subnet %s' % subnet['id'], router_id, nat_rule_id=self._get_no_dnat_rule_id(subnet), action=policy_constants.NAT_ACTION_NO_DNAT, sequence_number=NAT_RULE_PRIORITY_GW, destination_network=subnet['cidr'], firewall_match=policy_constants.NAT_FIREWALL_MATCH_BYPASS) def _del_subnet_no_dnat_rule(self, router_id, subnet): # Delete the previously created NO-DNAT rules self.nsxpolicy.tier1_nat_rule.delete( router_id, nat_rule_id=self._get_no_dnat_rule_id(subnet)) def _del_subnet_snat_rule(self, router_id, subnet): # Delete the previously created SNAT rules self.nsxpolicy.tier1_nat_rule.delete( router_id, nat_rule_id=self._get_snat_rule_id(subnet)) def _get_router_edge_cluster_path(self, tier0_uuid, router): # Take the AZ edge cluster if configured az = self._get_router_az_obj(router) if az and az._edge_cluster_uuid: ec_id = az._edge_cluster_uuid # get the full path of the edge cluster (no backend call) return self.nsxpolicy.edge_cluster.get_path(ec_id) # Get the current tier0 edge cluster (cached call) return self.nsxpolicy.tier0.get_edge_cluster_path( tier0_uuid) def service_router_has_services(self, context, router_id, router=None): """Check if the neutron router has any services which require a backend service router currently those are: SNAT, Loadbalancer, Edge firewall """ if not router: router = self._get_router(context, router_id) snat_exist = router.enable_snat fw_exist = self._router_has_edge_fw_rules(context, router) vpn_exist = self.service_router_has_vpnaas(context, router_id) lb_exist = False if not (fw_exist or snat_exist or vpn_exist): lb_exist = self.service_router_has_loadbalancers( context, router_id) return snat_exist or lb_exist or fw_exist or vpn_exist def service_router_has_loadbalancers(self, context, router_id): tags_to_search = [{'scope': lb_const.LR_ROUTER_TYPE, 'tag': router_id}] router_lb_services = self.nsxpolicy.search_by_tags( tags_to_search, self.nsxpolicy.load_balancer.lb_service.entry_def.resource_type() )['results'] non_delete_services = [srv for srv in router_lb_services if not srv.get('marked_for_delete')] return True if non_delete_services else False def service_router_has_vpnaas(self, context, router_id): """Return True if there is a vpn service attached to this router""" vpn_plugin = directory.get_plugin(plugin_const.VPN) if vpn_plugin: filters = {'router_id': [router_id]} if vpn_plugin.get_vpnservices(context.elevated(), filters=filters): return True return False def verify_sr_at_backend(self, router_id): """Check if the backend Tier1 has a service router or not""" if self.nsxpolicy.tier1.get_edge_cluster_path(router_id): return True def _wait_until_edge_cluster_realized(self, router_id): """Wait until MP logical router has an edge-cluster Since currently the locale-services has no realization info, And some actions should be performed only after it was realized, this method checks the MP Lr for its edge-cluster id until it is set. """ if not cfg.CONF.nsx_p.allow_passthrough: return lr_id = self.nsxpolicy.tier1.get_realized_id( router_id, entity_type='RealizedLogicalRouter') if not lr_id: LOG.error("_wait_until_edge_cluster_realized Failed: No MP id " "found for Tier1 %s", router_id) return test_num = 0 max_attempts = cfg.CONF.nsx_p.realization_max_attempts sleep = cfg.CONF.nsx_p.realization_wait_sec while test_num < max_attempts: # get all the realized resources of the tier1 lr = self.nsxlib.logical_router.get(lr_id) if lr.get('edge_cluster_id'): break time.sleep(sleep) test_num += 1 if lr.get('edge_cluster_id'): LOG.debug("MP LR %s of Tier1 %s edge cluster %s was set after %s " "attempts", lr_id, router_id, lr.get('edge_cluster_id'), test_num + 1) else: LOG.error("MP LR %s if Tier1 %s edge cluster was not set after %s " "attempts", lr_id, router_id, test_num + 1) def create_service_router(self, context, router_id, router=None, update_firewall=True): """Create a service router and enable standby relocation""" if not router: router = self._get_router(context, router_id) tier0_uuid = self._get_tier0_uuid_by_router(context, router) if not tier0_uuid: err_msg = (_("Cannot create service router for %s without a " "gateway") % router_id) raise n_exc.InvalidInput(error_message=err_msg) edge_cluster_path = self._get_router_edge_cluster_path( tier0_uuid, router) if edge_cluster_path: self.nsxpolicy.tier1.set_edge_cluster_path( router_id, edge_cluster_path) else: LOG.error("Tier0 %s does not have an edge cluster", tier0_uuid) try: # Enable standby relocation & FW on this router self.nsxpolicy.tier1.update( router['id'], disable_firewall=False, enable_standby_relocation=True) except Exception as ex: LOG.warning("Failed to enable standby relocation for router " "%s: %s", router_id, ex) # Validate locale-services realization before additional tier1 config self._wait_until_edge_cluster_realized(router_id) # update firewall rules (there might be FW group waiting for a # service router) if update_firewall: self.update_router_firewall(context, router_id) def delete_service_router(self, router_id): """Delete the Tier1 service router by removing its edge cluster Before that - disable all the features that require the service router to exist. """ # remove the gateway firewall policy if self.fwaas_callbacks and self.fwaas_callbacks.fwaas_enabled: self.fwaas_callbacks.delete_router_gateway_policy(router_id) # Disable gateway firewall and standby relocation self.nsxpolicy.tier1.update( router_id, disable_firewall=True, enable_standby_relocation=False) # remove the edge cluster from the tier1 router self.nsxpolicy.tier1.remove_edge_cluster(router_id) def _run_under_transaction(self, method): if self.nsxpolicy.feature_supported( nsxlib_consts.FEATURE_PARTIAL_UPDATES): with policy_trans.NsxPolicyTransaction(): method() else: method() def _update_router_gw_info(self, context, router_id, info, called_from=None): # Get the original data of the router GW router = self._get_router(context, router_id) orig_info = self._get_router_gw_info(context, router_id) org_tier0_uuid = self._get_tier0_uuid_by_router(context, router) org_enable_snat = router.enable_snat orgaddr, orgmask, _orgnexthop = ( self._get_external_attachment_info( context, router)) router_subnets = self._load_router_subnet_cidrs_from_db( context.elevated(), router_id) self._validate_router_gw_and_tz(context, router_id, info, org_enable_snat, router_subnets) # Interface subnets cannot overlap with the GW external subnet if info and info.get('network_id'): self._validate_gw_overlap_interfaces( context, info['network_id'], [sub['network_id'] for sub in router_subnets]) # First update the neutron DB super(NsxPolicyPlugin, self)._update_router_gw_info( context, router_id, info, router=router) # Get the new tier0 of the updated router (or None if GW was removed) new_tier0_uuid = self._get_tier0_uuid_by_router(context, router) new_enable_snat = router.enable_snat newaddr, newmask, _newnexthop = self._get_external_attachment_info( context, router) sr_currently_exists = self.verify_sr_at_backend(router_id) fw_exist = self._router_has_edge_fw_rules(context, router) vpn_exist = self.service_router_has_vpnaas(context, router_id) lb_exist = False if not (fw_exist or vpn_exist): # This is a backend call, so do it only if must lb_exist = self.service_router_has_loadbalancers( context, router_id) tier1_services_exist = fw_exist or vpn_exist or lb_exist actions = self._get_update_router_gw_actions( org_tier0_uuid, orgaddr, org_enable_snat, new_tier0_uuid, newaddr, new_enable_snat, tier1_services_exist, sr_currently_exists) try: if actions['add_service_router']: self.create_service_router(context, router_id, router=router) def _do_remove_nat(): if actions['remove_snat_rules']: for subnet in router_subnets: self._del_subnet_snat_rule(router_id, subnet) if actions['remove_no_dnat_rules']: for subnet in router_subnets: self._del_subnet_no_dnat_rule(router_id, subnet) self._run_under_transaction(_do_remove_nat) if (actions['remove_router_link_port'] or actions['add_router_link_port']): # GW was changed. update GW and route advertisement self.nsxpolicy.tier1.update_route_advertisement( router_id, static_routes=not new_enable_snat, nat=actions['advertise_route_nat_flag'], subnets=actions['advertise_route_connected_flag'], tier0=new_tier0_uuid) else: # Only update route advertisement self.nsxpolicy.tier1.update_route_advertisement( router_id, static_routes=not new_enable_snat, nat=actions['advertise_route_nat_flag'], subnets=actions['advertise_route_connected_flag']) def _do_add_nat(): if actions['add_snat_rules']: # Add SNAT rules for all the subnets which are in different # scope than the GW gw_address_scope = self._get_network_address_scope( context, router.gw_port.network_id) for subnet in router_subnets: self._add_subnet_snat_rule(context, router_id, subnet, gw_address_scope, newaddr) if actions['add_no_dnat_rules']: for subnet in router_subnets: self._add_subnet_no_dnat_rule( context, router_id, subnet) self._run_under_transaction(_do_add_nat) # always advertise ipv6 subnets if gateway is set advertise_ipv6_subnets = True if info else False self._update_router_advertisement_rules(router_id, router_subnets, advertise_ipv6_subnets) if actions['remove_service_router']: self.delete_service_router(router_id) except nsx_lib_exc.NsxLibException as e: # GW updates failed on the NSX. Rollback the change, # unless it is during create or delete of a router with excutils.save_and_reraise_exception(): if not called_from: LOG.error("Rolling back router %s GW info update because " "of NSX failure %s", router_id, e) super(NsxPolicyPlugin, self)._update_router_gw_info( context, router_id, orig_info, router=router) def _update_router_advertisement_rules(self, router_id, subnets, advertise_ipv6): # There is no NAT for ipv6 - all connected ipv6 segments should be # advertised ipv6_cidrs = [s['cidr'] for s in subnets if s.get('ip_version') == 6] if ipv6_cidrs and advertise_ipv6: self.nsxpolicy.tier1.add_advertisement_rule( router_id, IPV6_ROUTER_ADV_RULE_NAME, policy_constants.ADV_RULE_PERMIT, policy_constants.ADV_RULE_OPERATOR_EQ, [policy_constants.ADV_RULE_TIER1_CONNECTED], ipv6_cidrs) else: self.nsxpolicy.tier1.remove_advertisement_rule( router_id, IPV6_ROUTER_ADV_RULE_NAME) def create_router(self, context, router): r = router['router'] gw_info = self._extract_external_gw(context, router, is_extract=True) # validate the availability zone, and get the AZ object self._validate_obj_az_on_creation(context, r, 'router') with db_api.CONTEXT_WRITER.using(context): router = super(NsxPolicyPlugin, self).create_router( context, router) router_db = self._get_router(context, router['id']) self._process_extra_attr_router_create(context, router_db, r) router_name = utils.get_name_and_uuid(router['name'] or 'router', router['id']) tags = self.nsxpolicy.build_v3_tags_payload( r, resource_type='os-neutron-router-id', project_name=context.tenant_name) try: def _do_create_router(): self.nsxpolicy.tier1.create_or_overwrite( router_name, router['id'], tier0=None, ipv6_ndra_profile_id=NO_SLAAC_NDRA_PROFILE_ID, tags=tags) # Also create the empty locale-service as it must always exist self.nsxpolicy.tier1.create_locale_service(router['id']) self._run_under_transaction(_do_create_router) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error('Failed to create router %(id)s ' 'on NSX backend. Exception: %(e)s', {'id': router['id'], 'e': ex}) self.delete_router(context, router['id']) if gw_info and gw_info != const.ATTR_NOT_SPECIFIED: try: self._update_router_gw_info(context, router['id'], gw_info, called_from="create") except (db_exc.DBError, nsx_lib_exc.NsxLibException): with excutils.save_and_reraise_exception(): LOG.error("Failed to set gateway info for router " "being created: %s - removing router", router['id']) self.delete_router(context, router['id']) LOG.info("Create router failed while setting external " "gateway. Router:%s has been removed from " "DB and backend", router['id']) return self.get_router(context, router['id']) def delete_router(self, context, router_id): gw_info = self._get_router_gw_info(context, router_id) if gw_info: try: self._update_router_gw_info(context, router_id, {}, called_from="delete") except nsx_lib_exc.NsxLibException as e: LOG.error("Failed to remove router %s gw info before " "deletion, but going on with the deletion anyway: " "%s", router_id, e) ret_val = super(NsxPolicyPlugin, self).delete_router( context, router_id) try: self.nsxpolicy.tier1.delete_locale_service(router_id) self.nsxpolicy.tier1.delete(router_id) except nsx_lib_exc.ResourceNotFound: # If the resource was not found on the backend do not worry about # it. The conditions has already been logged, so there is no need # to do further logging pass except nsx_lib_exc.ManagerError as e: # If there is a failure in deleting the resource, fail the neutron # operation even though the neutron object was already deleted. # This way the user will be aware of zombie resources that may fail # future actions. msg = (_("Backend Tier1 deletion for neutron router %(id)s " "failed. The object was however removed from the " "Neutron database: %(e)s") % {'id': router_id, 'e': e}) nsx_exc.NsxPluginException(err_msg=msg) return ret_val def _get_static_route_id(self, route): return "%s-%s" % (route['destination'].replace('/', '_'), route['nexthop']) def _add_static_routes(self, router_id, routes): def _do_add_routes(): for route in routes: dest = route['destination'] self.nsxpolicy.tier1_static_route.create_or_overwrite( 'Static route for %s' % dest, router_id, static_route_id=self._get_static_route_id(route), network=dest, next_hop=route['nexthop']) self._run_under_transaction(_do_add_routes) def _delete_static_routes(self, router_id, routes): for route in routes: self.nsxpolicy.tier1_static_route.delete( router_id, static_route_id=self._get_static_route_id(route)) @nsx_plugin_common.api_replay_mode_wrapper def update_router(self, context, router_id, router): gw_info = self._extract_external_gw(context, router, is_extract=False) router_data = router['router'] self._assert_on_router_admin_state(router_data) vpn_driver = None if validators.is_attr_set(gw_info): self._validate_update_router_gw(context, router_id, gw_info) # VPNaaS need to be notified on router GW changes (there is # currently no matching upstream registration for this) vpn_plugin = directory.get_plugin(plugin_const.VPN) if vpn_plugin: vpn_driver = vpn_plugin.drivers[vpn_plugin.default_provider] vpn_driver.validate_router_gw_info(context, router_id, gw_info) routes_added = [] routes_removed = [] if 'routes' in router_data: routes_added, routes_removed = self._get_static_routes_diff( context, router_id, gw_info, router_data) # Update the neutron router updated_router = super(NsxPolicyPlugin, self).update_router( context, router_id, router) # Update the policy backend try: added_routes = removed_routes = False # Updating name & description if 'name' in router_data or 'description' in router_data: router_name = utils.get_name_and_uuid( updated_router.get('name') or 'router', router_id) self.nsxpolicy.tier1.update( router_id, name=router_name, description=updated_router.get('description', '')) # Updating static routes self._delete_static_routes(router_id, routes_removed) removed_routes = True self._add_static_routes(router_id, routes_added) added_routes = True except (nsx_lib_exc.ResourceNotFound, nsx_lib_exc.ManagerError): with excutils.save_and_reraise_exception(): with db_api.CONTEXT_WRITER.using(context): router_db = self._get_router(context, router_id) router_db['status'] = const.NET_STATUS_ERROR # return the static routes to the old state if added_routes: try: self._delete_static_routes(router_id, routes_added) except Exception as e: LOG.error("Rollback router %s changes failed to " "delete static routes: %s", router_id, e) if removed_routes: try: self._add_static_routes(router_id, routes_removed) except Exception as e: LOG.error("Rollback router %s changes failed to add " "static routes: %s", router_id, e) if vpn_driver: # Update vpn advertisement if GW was updated vpn_driver.update_router_advertisement(context, router_id) return updated_router def _get_gateway_addr_from_subnet(self, subnet): if subnet['gateway_ip'] and subnet['cidr']: cidr_prefix = int(subnet['cidr'].split('/')[1]) return "%s/%s" % (subnet['gateway_ip'], cidr_prefix) def _validate_router_segment_subnets(self, context, network_id, overlay_net, subnet): """Validate that adding an interface to a router will not cause multiple segments subnets which is not allowed """ if not overlay_net: # Only interfaces for overlay networks create segment subnets return if subnet.get('ip_version', 4) != 4: # IPv6 is not relevant here since plugin allow only 1 ipv6 subnet # per network return if subnet['enable_dhcp']: # This subnet is with dhcp, so there cannot be any other with dhcp return if not self.use_policy_dhcp: # Only policy DHCP creates segment subnets return # Look for another subnet with DHCP network = self._get_network(context.elevated(), network_id) for subnet in network.subnets: if subnet.enable_dhcp and subnet.ip_version == 4: msg = (_("Can not add router interface on network %(net)s " "as another %(ver)s subnet has enabled DHCP") % {'net': network_id, 'ver': subnet.ip_version}) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _get_subnet_ndra_profile(self, subnet): ndra_profile_id = NO_SLAAC_NDRA_PROFILE_ID if subnet.get('ip_version') == 6 and subnet.get('enable_dhcp', False): # Subnet with DHCP v6 of some kind addr_mode = subnet.get('ipv6_address_mode') if addr_mode == const.IPV6_SLAAC: ndra_profile_id = SLAAC_NDRA_PROFILE_ID elif addr_mode == const.DHCPV6_STATELESS: ndra_profile_id = STATELESS_DHCP_NDRA_PROFILE_ID else: # Stateful DHCP v6 is the default ndra_profile_id = STATEFUL_DHCP_NDRA_PROFILE_ID return ndra_profile_id def _validate_interfaces_address_mode(self, context, router_id, router_subnets, subnet): """Validate that all the overlay ipv6 interfaces of the router have the same ipv6_address_mode, when a new subnet is added """ if subnet['enable_dhcp']: subnet_address_mode = subnet.get('ipv6_address_mode', const.DHCPV6_STATEFUL) else: # Slaac and non-dhcp can coexist subnet_address_mode = const.IPV6_SLAAC ipv6_overlay_subnets = [s for s in router_subnets if s['id'] != subnet['id'] and s.get('ip_version') == 6 and s.get('enable_dhcp') and self._is_overlay_network(context, s['network_id'])] for rtr_subnet in ipv6_overlay_subnets: address_mode = rtr_subnet.get('ipv6_address_mode', const.DHCPV6_STATEFUL) if address_mode != subnet_address_mode: msg = (_("Interface network %(net_id)s with address mode " "%(am)s conflicts with other interfaces of router " "%(rtr_id)s") % {'net_id': subnet['network_id'], 'am': subnet_address_mode, 'rtr_id': router_id}) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) @nsx_plugin_common.api_replay_mode_wrapper def add_router_interface(self, context, router_id, interface_info): # NOTE: In dual stack case, neutron would create a separate interface # for each IP version # We only allow one subnet per IP version subnet = self._get_interface_subnet(context, interface_info) network_id = self._get_interface_network_id(context, interface_info, subnet=subnet) extern_net = self._network_is_external(context, network_id) overlay_net = self._is_overlay_network(context, network_id) router_db = self._get_router(context, router_id) gw_network_id = (router_db.gw_port.network_id if router_db.gw_port else None) with locking.LockManager.get_lock(str(network_id)): # disallow more than one subnets belong to same network being # attached to routers self._validate_multiple_subnets_routers( context, router_id, network_id, subnet) # A router interface cannot be an external network if extern_net: msg = _("An external network cannot be attached as " "an interface to a router") raise n_exc.InvalidInput(error_message=msg) # Non overlay networks should be configured with a centralized # router, which is allowed only if GW network is attached if not overlay_net and not gw_network_id: msg = _("A router attached to a VLAN backed network " "must have an external network assigned") raise n_exc.InvalidInput(error_message=msg) # Interface subnets cannot overlap with the GW external subnet self._validate_gw_overlap_interfaces(context, gw_network_id, [network_id]) if subnet: self._validate_router_segment_subnets(context, network_id, overlay_net, subnet) if subnet and subnet.get('ip_version') == 6 and overlay_net: orig_rtr_subnets = self._load_router_subnet_cidrs_from_db( context.elevated(), router_id) self._validate_interfaces_address_mode( context, router_id, orig_rtr_subnets, subnet) # Update the interface of the neutron router info = super(NsxPolicyPlugin, self).add_router_interface( context, router_id, interface_info) try: # If it is a no-snat router, interface address scope must be the # same as the gateways self._validate_interface_address_scope(context, router_db, subnet) # Check GW & subnets TZ tier0_uuid = self._get_tier0_uuid_by_router( context.elevated(), router_db) # Validate the TZ of the new subnet match the one of the router self._validate_router_tz(context.elevated(), tier0_uuid, [subnet]) segment_id = self._get_network_nsx_segment_id(context, network_id) rtr_subnets = self._load_router_subnet_cidrs_from_db( context.elevated(), router_id) if overlay_net: # overlay interface pol_subnets = self._get_segment_subnets( context, network_id, interface_subnets=rtr_subnets) self.nsxpolicy.segment.update(segment_id, tier1_id=router_id, subnets=pol_subnets) # will update the router only if needed self._update_slaac_on_router(context, router_id, subnet, rtr_subnets) else: # Vlan interface pol_subnets = [] for rtr_subnet in rtr_subnets: if rtr_subnet['network_id'] == network_id: prefix_len = int(rtr_subnet['cidr'].split('/')[1]) pol_subnets.append(policy_defs.InterfaceSubnet( ip_addresses=[rtr_subnet['gateway_ip']], prefix_len=prefix_len)) ndra_profile_id = self._get_subnet_ndra_profile(subnet) self.nsxpolicy.tier1.add_segment_interface( router_id, segment_id, segment_id, pol_subnets, ndra_profile_id) # add the SNAT/NO_DNAT rules for this interface if router_db.enable_snat and gw_network_id: if router_db.gw_port.get('fixed_ips'): gw_ip = router_db.gw_port['fixed_ips'][0]['ip_address'] gw_address_scope = self._get_network_address_scope( context, gw_network_id) self._add_subnet_snat_rule( context, router_id, subnet, gw_address_scope, gw_ip) self._add_subnet_no_dnat_rule(context, router_id, subnet) if subnet.get('ip_version') == 6 and gw_network_id: # if this is an ipv6 subnet and router has GW, # we need to add advertisement rule self._update_router_advertisement_rules( router_id, rtr_subnets, True) # update firewall rules self.update_router_firewall(context, router_id, router_db) except Exception as ex: with excutils.save_and_reraise_exception(): LOG.error('Failed to create router interface for network ' '%(id)s on NSX backend. Exception: %(e)s', {'id': network_id, 'e': ex}) self.remove_router_interface( context, router_id, interface_info) return info def remove_router_interface(self, context, router_id, interface_info): # First get the subnet, which is needed for removing the SNAT rule subnet = subnet_id = None if 'port_id' in interface_info: port_id = interface_info['port_id'] port = self._get_port(context, port_id) if port.get('fixed_ips'): subnet_id = port['fixed_ips'][0]['subnet_id'] elif 'subnet_id' in interface_info: subnet_id = interface_info['subnet_id'] if subnet_id: subnet = self.get_subnet(context, subnet_id) # Update the neutron router first info = super(NsxPolicyPlugin, self).remove_router_interface( context, router_id, interface_info) network_id = info['network_id'] overlay_net = self._is_overlay_network(context, network_id) segment_id = self._get_network_nsx_segment_id(context, network_id) rtr_subnets = self._load_router_subnet_cidrs_from_db( context.elevated(), router_id) net_rtr_subnets = [sub for sub in rtr_subnets if sub['network_id'] == network_id] try: if overlay_net: # Update the segment subnets, and Remove the tier1 router from # this segment it its the last subnet of this network # (it is possible to have both IPv4 & 6 subnets) seg_subnets = self._get_segment_subnets( context, network_id, interface_subnets=net_rtr_subnets) if not net_rtr_subnets: # Remove the tier1 connectivity of this segment # This must be done is a separate call as it uses PUT self.nsxpolicy.segment.remove_connectivity_and_subnets( segment_id) # update remaining (DHCP/ipv4/6) subnets if seg_subnets: self.nsxpolicy.segment.update(segment_id, subnets=seg_subnets) # will update the router only if needed self._update_slaac_on_router(context, router_id, subnet, rtr_subnets, delete=True) else: # VLAN interface pol_subnets = [] for rtr_subnet in net_rtr_subnets: prefix_len = int(rtr_subnet['cidr'].split('/')[1]) pol_subnets.append(policy_defs.InterfaceSubnet( ip_addresses=[rtr_subnet['gateway_ip']], prefix_len=prefix_len)) if pol_subnets: # This will update segment interface self.nsxpolicy.tier1.add_segment_interface( router_id, segment_id, segment_id, pol_subnets) else: self.nsxpolicy.tier1.remove_segment_interface( router_id, segment_id) # try to delete the SNAT/NO_DNAT rules of this subnet router_db = self._get_router(context, router_id) if (subnet and router_db.gw_port and router_db.enable_snat and subnet['ip_version'] == 4): self._del_subnet_snat_rule(router_id, subnet) self._del_subnet_no_dnat_rule(router_id, subnet) if subnet and subnet.get('ip_version') == 6 and router_db.gw_port: # if this is an ipv6 subnet and router has GW, # we need to remove advertisement rule self._update_router_advertisement_rules( router_id, rtr_subnets, True) # update firewall rules self.update_router_firewall(context, router_id, router_db) except nsx_lib_exc.ManagerError as e: # If there is a failure in deleting the resource, fail the neutron # operation even though the neutron object was already deleted. # This way the user will be aware of zombie resources that may fail # future actions. # TODO(asarfaty): Handle specific errors msg = (_('Failed to remove router interface for network ' '%(id)s on NSX backend. Exception: %(e)s') % {'id': network_id, 'e': e}) raise nsx_exc.NsxPluginException(err_msg=msg) return info def _get_fip_snat_rule_id(self, fip_id): return 'S-' + fip_id def _get_fip_dnat_rule_id(self, fip_id): return 'D-' + fip_id def _get_nat_firewall_match(self): if cfg.CONF.nsx_p.firewall_match_internal_addr: return policy_constants.NAT_FIREWALL_MATCH_INTERNAL return policy_constants.NAT_FIREWALL_MATCH_EXTERNAL def _add_fip_nat_rules(self, tier1_id, fip_id, ext_ip, int_ip): def _do_add_fip_nat(): firewall_match = self._get_nat_firewall_match() self.nsxpolicy.tier1_nat_rule.create_or_overwrite( 'snat for fip %s' % fip_id, tier1_id, nat_rule_id=self._get_fip_snat_rule_id(fip_id), action=policy_constants.NAT_ACTION_SNAT, translated_network=ext_ip, source_network=int_ip, sequence_number=NAT_RULE_PRIORITY_FIP, firewall_match=firewall_match) self.nsxpolicy.tier1_nat_rule.create_or_overwrite( 'dnat for fip %s' % fip_id, tier1_id, nat_rule_id=self._get_fip_dnat_rule_id(fip_id), action=policy_constants.NAT_ACTION_DNAT, translated_network=int_ip, destination_network=ext_ip, sequence_number=NAT_RULE_PRIORITY_FIP, firewall_match=firewall_match) self._run_under_transaction(_do_add_fip_nat) def _delete_fip_nat_rules(self, tier1_id, fip_id): def _do_delete_fip_nat(): self.nsxpolicy.tier1_nat_rule.delete( tier1_id, nat_rule_id=self._get_fip_snat_rule_id(fip_id)) self.nsxpolicy.tier1_nat_rule.delete( tier1_id, nat_rule_id=self._get_fip_dnat_rule_id(fip_id)) self._run_under_transaction(_do_delete_fip_nat) def _update_lb_vip(self, port, vip_address): # update the load balancer virtual server's VIP with # floating ip, but don't add NAT rules device_id = port['device_id'] if device_id.startswith(oct_const.DEVICE_ID_PREFIX): device_id = device_id[len(oct_const.DEVICE_ID_PREFIX):] tags_to_search = [{'scope': 'os-lbaas-lb-id', 'tag': device_id}] vs_client = self.nsxpolicy.load_balancer.virtual_server vs_list = self.nsxpolicy.search_by_tags( tags_to_search, vs_client.entry_def.resource_type() )['results'] for vs in vs_list: vs_client.update(vs['id'], ip_address=vip_address) def create_floatingip(self, context, floatingip): # First do some validations fip_data = floatingip['floatingip'] port_id = fip_data.get('port_id') if port_id: port_data = self.get_port(context, port_id) self._assert_on_assoc_floatingip_to_special_ports( fip_data, port_data) new_fip = self._create_floating_ip_wrapper(context, floatingip) router_id = new_fip['router_id'] if not router_id: return new_fip if port_id: device_owner = port_data.get('device_owner') fip_address = new_fip['floating_ip_address'] if (device_owner == const.DEVICE_OWNER_LOADBALANCERV2 or device_owner == oct_const.DEVICE_OWNER_OCTAVIA or device_owner == lb_const.VMWARE_LB_VIP_OWNER): try: self._update_lb_vip(port_data, fip_address) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): super(NsxPolicyPlugin, self).delete_floatingip( context, new_fip['id']) return new_fip try: self._add_fip_nat_rules( router_id, new_fip['id'], new_fip['floating_ip_address'], new_fip['fixed_ip_address']) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): self.delete_floatingip(context, new_fip['id']) return new_fip def delete_floatingip(self, context, fip_id): fip = self.get_floatingip(context, fip_id) router_id = fip['router_id'] port_id = fip['port_id'] is_lb_port = False if port_id: port_data = self.get_port(context, port_id) device_owner = port_data.get('device_owner') fixed_ip_address = fip['fixed_ip_address'] if (device_owner == const.DEVICE_OWNER_LOADBALANCERV2 or device_owner == oct_const.DEVICE_OWNER_OCTAVIA or device_owner == lb_const.VMWARE_LB_VIP_OWNER): # If the port is LB VIP port, after deleting the FIP, # update the virtual server VIP back to fixed IP. is_lb_port = True try: self._update_lb_vip(port_data, fixed_ip_address) except nsx_lib_exc.ManagerError as e: LOG.error("Exception when updating vip ip_address" "on vip_port %(port)s: %(err)s", {'port': port_id, 'err': e}) if router_id and not is_lb_port: self._delete_fip_nat_rules(router_id, fip_id) super(NsxPolicyPlugin, self).delete_floatingip(context, fip_id) def update_floatingip(self, context, fip_id, floatingip): fip_data = floatingip['floatingip'] old_fip = self.get_floatingip(context, fip_id) old_port_id = old_fip['port_id'] new_status = (const.FLOATINGIP_STATUS_ACTIVE if fip_data.get('port_id') else const.FLOATINGIP_STATUS_DOWN) updated_port_id = fip_data.get('port_id') if updated_port_id: updated_port_data = self.get_port(context, updated_port_id) self._assert_on_assoc_floatingip_to_special_ports( fip_data, updated_port_data) new_fip = super(NsxPolicyPlugin, self).update_floatingip( context, fip_id, floatingip) router_id = new_fip['router_id'] new_port_id = new_fip['port_id'] # Delete old configuration NAT / vip is_lb_port = False if old_port_id: old_port_data = self.get_port(context, old_port_id) old_device_owner = old_port_data['device_owner'] old_fixed_ip = old_fip['fixed_ip_address'] if (old_device_owner == const.DEVICE_OWNER_LOADBALANCERV2 or old_device_owner == oct_const.DEVICE_OWNER_OCTAVIA or old_device_owner == lb_const.VMWARE_LB_VIP_OWNER): # If the port is LB VIP port, after deleting the FIP, # update the virtual server VIP back to fixed IP. is_lb_port = True self._update_lb_vip(old_port_data, old_fixed_ip) if (not is_lb_port and old_fip['router_id'] and (not router_id or old_fip['router_id'] != router_id)): # Delete the old rules (if the router did not change - rewriting # the rules with _add_fip_nat_rules is enough) self._delete_fip_nat_rules(old_fip['router_id'], fip_id) # Update LB VIP if the new port is LB port is_lb_port = False if new_port_id: new_port_data = self.get_port(context, new_port_id) new_dev_own = new_port_data['device_owner'] new_fip_address = new_fip['floating_ip_address'] if (new_dev_own == const.DEVICE_OWNER_LOADBALANCERV2 or new_dev_own == oct_const.DEVICE_OWNER_OCTAVIA or new_dev_own == lb_const.VMWARE_LB_VIP_OWNER): is_lb_port = True self._update_lb_vip(new_port_data, new_fip_address) if router_id and not is_lb_port: self._add_fip_nat_rules( router_id, new_fip['id'], new_fip['floating_ip_address'], new_fip['fixed_ip_address']) if new_fip['status'] != new_status: new_fip['status'] = new_status self.update_floatingip_status(context, fip_id, new_status) return new_fip def disassociate_floatingips(self, context, port_id): fip_qry = context.session.query(l3_db_models.FloatingIP) fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) for fip_db in fip_dbs: if not fip_db.router_id: continue if fip_db.router_id: # Delete the old rules self._delete_fip_nat_rules(fip_db.router_id, fip_db.id) self.update_floatingip_status(context, fip_db.id, const.FLOATINGIP_STATUS_DOWN) super(NsxPolicyPlugin, self).disassociate_floatingips( context, port_id, do_notify=False) def _prepare_default_rules(self): """Create a default group & communication map in the default domain""" # Run this code only on one worker at the time with locking.LockManager.get_lock('nsx_p_prepare_default_rules'): # Return if the objects were already created try: self.nsxpolicy.comm_map.get(NSX_P_GLOBAL_DOMAIN_ID, NSX_P_DEFAULT_SECTION) self.nsxpolicy.group.get(NSX_P_GLOBAL_DOMAIN_ID, NSX_P_DEFAULT_GROUP) except nsx_lib_exc.ResourceNotFound: LOG.info("Going to create default group & " "communication map") exists = False else: LOG.info("Going to update default group & " "communication map") exists = True # Create the group only if not exists - no need to update it if not exists: # Create the default group membership criteria to match all # neutron ports by scope (and no tag) scope_and_tag = "%s|" % (NSX_P_PORT_RESOURCE_TYPE) conditions = [self.nsxpolicy.group.build_condition( cond_val=scope_and_tag, cond_key=policy_constants.CONDITION_KEY_TAG, cond_member_type=policy_constants.CONDITION_MEMBER_PORT)] # Create the default OpenStack group # (This will not fail if the group already exists) try: self.nsxpolicy.group.create_or_overwrite_with_conditions( name=NSX_P_DEFAULT_GROUP, domain_id=NSX_P_GLOBAL_DOMAIN_ID, group_id=NSX_P_DEFAULT_GROUP, description=NSX_P_DEFAULT_GROUP_DESC, conditions=conditions) except Exception as e: msg = (_("Failed to create NSX default group: %(e)s") % { 'e': e}) raise nsx_exc.NsxPluginException(err_msg=msg) # create default section and rules # (even if already exists - may need to update rules) logged = cfg.CONF.nsx_p.log_security_groups_blocked_traffic scope = [self.nsxpolicy.group.get_path( NSX_P_GLOBAL_DOMAIN_ID, NSX_P_DEFAULT_GROUP)] rule_id = 1 dhcp_client_rule = self.nsxpolicy.comm_map.build_entry( 'DHCP Reply', NSX_P_GLOBAL_DOMAIN_ID, NSX_P_DEFAULT_SECTION, rule_id, sequence_number=rule_id, service_ids=['DHCP-Client'], action=policy_constants.ACTION_ALLOW, scope=scope, direction=nsxlib_consts.IN, logged=logged) rule_id += 1 dhcp_server_rule = self.nsxpolicy.comm_map.build_entry( 'DHCP Request', NSX_P_GLOBAL_DOMAIN_ID, NSX_P_DEFAULT_SECTION, rule_id, sequence_number=rule_id, service_ids=['DHCP-Server'], action=policy_constants.ACTION_ALLOW, scope=scope, direction=nsxlib_consts.OUT, logged=logged) rule_id += 1 nd_rule = self.nsxpolicy.comm_map.build_entry( 'IPv6 Neighbor Discovery', NSX_P_GLOBAL_DOMAIN_ID, NSX_P_DEFAULT_SECTION, rule_id, sequence_number=rule_id, service_ids=['IPv6-ICMP_Neighbor_Solicitation', 'IPv6-ICMP_Neighbor_Advertisement', 'IPv6-ICMP_Version_2_Multicast_Listener', 'IPv6-ICMP_Multicast_Listener_Query', 'IPv6-ICMP_Multicast_Listener_Done', 'IPv6-ICMP_Multicast_Listener_Report', IPV6_RA_SERVICE], action=policy_constants.ACTION_ALLOW, ip_protocol=nsxlib_consts.IPV6, scope=scope, direction=nsxlib_consts.IN_OUT, logged=logged) rule_id += 1 dhcpv6_server_rule = self.nsxpolicy.comm_map.build_entry( 'DHCPv6 Request', NSX_P_GLOBAL_DOMAIN_ID, NSX_P_DEFAULT_SECTION, rule_id, sequence_number=rule_id, service_ids=['DHCPv6_Server'], action=policy_constants.ACTION_ALLOW, ip_protocol=nsxlib_consts.IPV6, scope=scope, direction=nsxlib_consts.OUT, logged=logged) rule_id += 1 dhcpv6_client_rule = self.nsxpolicy.comm_map.build_entry( 'DHCPv6 Reply', NSX_P_GLOBAL_DOMAIN_ID, NSX_P_DEFAULT_SECTION, rule_id, sequence_number=rule_id, service_ids=['DHCPv6_Client'], action=policy_constants.ACTION_ALLOW, ip_protocol=nsxlib_consts.IPV6, scope=scope, direction=nsxlib_consts.IN, logged=logged) rule_id += 1 block_rule = self.nsxpolicy.comm_map.build_entry( 'Block All', NSX_P_GLOBAL_DOMAIN_ID, NSX_P_DEFAULT_SECTION, rule_id, sequence_number=rule_id, service_ids=None, action=policy_constants.ACTION_DENY, scope=scope, direction=nsxlib_consts.IN_OUT, logged=logged) rules = [dhcp_client_rule, dhcp_server_rule, dhcpv6_client_rule, dhcpv6_server_rule, nd_rule, block_rule] try: # This will not fail if the map already exists self.nsxpolicy.comm_map.create_with_entries( name=NSX_P_DEFAULT_SECTION, domain_id=NSX_P_GLOBAL_DOMAIN_ID, map_id=NSX_P_DEFAULT_SECTION, description=NSX_P_DEFAULT_SECTION_DESC, category=NSX_P_DEFAULT_SECTION_CATEGORY, entries=rules) except Exception as e: msg = (_("Failed to create NSX default communication map: " "%(e)s") % {'e': e}) raise nsx_exc.NsxPluginException(err_msg=msg) def _prepare_exclude_list_group(self): try: self.nsxpolicy.group.get(NSX_P_GLOBAL_DOMAIN_ID, NSX_P_EXCLUDE_LIST_GROUP) except nsx_lib_exc.ResourceNotFound: LOG.info("Going to create exclude list group") else: LOG.debug("Verified exclude list group already exists") return # Create the group membership criteria to match excluded neutron # ports by scope and tag scope_and_tag = "%s|%s" % (security.PORT_SG_SCOPE, NSX_P_EXCLUDE_LIST_TAG) conditions = [self.nsxpolicy.group.build_condition( cond_val=scope_and_tag, cond_key=policy_constants.CONDITION_KEY_TAG, cond_member_type=policy_constants.CONDITION_MEMBER_PORT)] # Create the exclude list group # (This will not fail if the group already exists) try: self.nsxpolicy.group.create_or_overwrite_with_conditions( name=NSX_P_EXCLUDE_LIST_GROUP, domain_id=NSX_P_GLOBAL_DOMAIN_ID, group_id=NSX_P_EXCLUDE_LIST_GROUP, conditions=conditions) except Exception as e: msg = (_("Failed to create NSX exclude list group: %(e)s") % { 'e': e}) raise nsx_exc.NsxPluginException(err_msg=msg) def _add_exclude_list_group(self): member = self.nsxpolicy.group.get_path( domain_id=NSX_P_GLOBAL_DOMAIN_ID, group_id=NSX_P_EXCLUDE_LIST_GROUP) exclude_list = self.nsxpolicy.exclude_list.get() if member in exclude_list['members']: LOG.debug("Verified that group %s was already added to the " "NSX exclude list", member) return LOG.info("Going to add group %s to the NSX exclude list", member) members = exclude_list['members'] members.append(member) try: self.nsxpolicy.exclude_list.create_or_overwrite(members=members) except Exception as e: msg = (_("Failed to add group to the NSX exclude list: %(e)s") % { 'e': e}) raise nsx_exc.NsxPluginException(err_msg=msg) def _prepare_exclude_list(self): """Create exclude list for ports without port security Create a group for excluded ports and add it to the NSX exclude list """ # Run this code only on one worker at the time with locking.LockManager.get_lock('nsx_p_prepare_exclude_list'): self._prepare_exclude_list_group() self._add_exclude_list_group() def _create_security_group_backend_resources(self, context, secgroup, entries): """Create communication map (=section) and group (=NS group) Both will have the security group id as their NSX id. """ sg_id = secgroup['id'] tags = self.nsxpolicy.build_v3_tags_payload( secgroup, resource_type='os-neutron-secg-id', project_name=secgroup.get('tenant_id')) nsx_name = utils.get_name_and_uuid(secgroup['name'] or 'securitygroup', sg_id) # Create the groups membership criteria for ports by scope & tag scope_and_tag = "%s|%s" % (NSX_P_SECURITY_GROUP_TAG, sg_id) condition = self.nsxpolicy.group.build_condition( cond_val=scope_and_tag, cond_key=policy_constants.CONDITION_KEY_TAG, cond_member_type=policy_constants.CONDITION_MEMBER_PORT) category = NSX_P_REGULAR_SECTION_CATEGORY if secgroup.get(provider_sg.PROVIDER) is True: category = NSX_P_PROVIDER_SECTION_CATEGORY try: def _do_create_sg(): # Create the group self.nsxpolicy.group.create_or_overwrite_with_conditions( nsx_name, NSX_P_GLOBAL_DOMAIN_ID, group_id=sg_id, description=secgroup.get('description'), conditions=[condition], tags=tags) # create the communication map (=section) and entries (=rules) self.nsxpolicy.comm_map.create_or_overwrite_map_only( nsx_name, NSX_P_GLOBAL_DOMAIN_ID, map_id=sg_id, description=secgroup.get('description'), tags=tags, category=category) for entry in entries: self.nsxpolicy.comm_map.create_entry_from_def(entry) self._run_under_transaction(_do_create_sg) except Exception as e: msg = (_("Failed to create NSX resources for SG %(sg)s: " "%(e)s") % {'sg': sg_id, 'e': e}) raise nsx_exc.NsxPluginException(err_msg=msg) def _get_rule_ip_protocol(self, sg_rule): ethertype = sg_rule.get('ethertype') if ethertype == const.IPv4: return nsxlib_consts.IPV4 if ethertype == const.IPv6: return nsxlib_consts.IPV6 return nsxlib_consts.IPV4_IPV6 def _get_rule_service_id(self, context, sg_rule, tags): """Return the NSX Policy service id matching the SG rule""" srv_id = None l4_protocol = nsxlib_utils.get_l4_protocol_name(sg_rule['protocol']) srv_name = 'Service for OS rule %s' % sg_rule['id'] if l4_protocol in [nsxlib_consts.TCP, nsxlib_consts.UDP]: # If port_range_min is not specified then we assume all ports are # matched, relying on neutron to perform validation. if sg_rule['port_range_min'] is None: destination_ports = [] elif sg_rule['port_range_min'] != sg_rule['port_range_max']: # NSX API requires a non-empty range (e.g - '22-23') destination_ports = ['%(port_range_min)s-%(port_range_max)s' % sg_rule] else: destination_ports = ['%(port_range_min)s' % sg_rule] srv_id = self.nsxpolicy.service.create_or_overwrite( srv_name, service_id=sg_rule['id'], description=sg_rule.get('description'), protocol=l4_protocol, dest_ports=destination_ports, tags=tags) elif l4_protocol in [nsxlib_consts.ICMPV4, nsxlib_consts.ICMPV6]: # Validate the icmp type & code version = 4 if l4_protocol == nsxlib_consts.ICMPV4 else 6 icmp_type = sg_rule['port_range_min'] icmp_code = sg_rule['port_range_max'] nsxlib_utils.validate_icmp_params( icmp_type, icmp_code, icmp_version=version, strict=True) srv_id = self.nsxpolicy.icmp_service.create_or_overwrite( srv_name, service_id=sg_rule['id'], description=sg_rule.get('description'), version=version, icmp_type=icmp_type, icmp_code=icmp_code, tags=tags) elif l4_protocol: srv_id = self.nsxpolicy.ip_protocol_service.create_or_overwrite( srv_name, service_id=sg_rule['id'], description=sg_rule.get('description'), protocol_number=l4_protocol, tags=tags) return srv_id def _get_sg_rule_remote_ip_group_id(self, sg_rule): return '%s_remote_group' % sg_rule['id'] def _get_sg_rule_local_ip_group_id(self, sg_rule): return '%s_local_group' % sg_rule['id'] def _create_security_group_backend_rule(self, context, map_id, sg_rule, secgroup_logging, is_provider_sg=False, create_related_resource=True): """Create backend resources for a DFW rule All rule resources (service, groups) will be created The rule itself will be created if create_rule=True. Else this method will return the rule entry structure for future use. """ # The id of the map and group is the same as the security group id this_group_id = map_id # There is no rule name in neutron. Using ID instead nsx_name = sg_rule['id'] direction = (nsxlib_consts.IN if sg_rule.get('direction') == 'ingress' else nsxlib_consts.OUT) self._fix_sg_rule_dict_ips(sg_rule) source = None destination = this_group_id tags = self.nsxpolicy.build_v3_tags_payload( sg_rule, resource_type='os-neutron-secgr-id', project_name=sg_rule.get('tenant_id')) if sg_rule.get('remote_group_id'): # This is the ID of a security group that already exists, # so it should be known to the policy manager source = sg_rule.get('remote_group_id') elif sg_rule.get('remote_ip_prefix'): # Create a group for the remote IPs remote_ip = sg_rule['remote_ip_prefix'] remote_group_id = self._get_sg_rule_remote_ip_group_id(sg_rule) if create_related_resource: expr = self.nsxpolicy.group.build_ip_address_expression( [remote_ip]) self.nsxpolicy.group.create_or_overwrite_with_conditions( remote_group_id, NSX_P_GLOBAL_DOMAIN_ID, group_id=remote_group_id, description='%s for OS rule %s' % (remote_ip, sg_rule['id']), conditions=[expr], tags=tags) source = remote_group_id if sg_rule.get(sg_prefix.LOCAL_IP_PREFIX): # Create a group for the local ips local_ip = sg_rule[sg_prefix.LOCAL_IP_PREFIX] local_group_id = self._get_sg_rule_local_ip_group_id(sg_rule) if create_related_resource: expr = self.nsxpolicy.group.build_ip_address_expression( [local_ip]) self.nsxpolicy.group.create_or_overwrite_with_conditions( local_group_id, NSX_P_GLOBAL_DOMAIN_ID, group_id=local_group_id, description='%s for OS rule %s' % (local_ip, sg_rule['id']), conditions=[expr], tags=tags) destination = local_group_id if direction == nsxlib_consts.OUT: # Swap source and destination source, destination = destination, source if create_related_resource: service = self._get_rule_service_id(context, sg_rule, tags) else: if nsxlib_utils.get_l4_protocol_name(sg_rule['protocol']): service = sg_rule['id'] else: service = None ip_protocol = self._get_rule_ip_protocol(sg_rule) logging = (cfg.CONF.nsx_p.log_security_groups_allowed_traffic or secgroup_logging) scope = [self.nsxpolicy.group.get_path(NSX_P_GLOBAL_DOMAIN_ID, this_group_id)] action = (policy_constants.ACTION_DENY if is_provider_sg else policy_constants.ACTION_ALLOW) # Just return the rule entry without creating it rule_entry = self.nsxpolicy.comm_map.build_entry( nsx_name, NSX_P_GLOBAL_DOMAIN_ID, map_id, entry_id=sg_rule['id'], description=sg_rule.get('description'), service_ids=[service] if service else None, ip_protocol=ip_protocol, action=action, source_groups=[source] if source else None, dest_groups=[destination] if destination else None, scope=scope, tag=sg_rule.get('project_id'), direction=direction, logged=logging) return rule_entry def create_security_group(self, context, security_group, default_sg=False): secgroup = security_group['security_group'] # Make sure the ID is initialized, as it is used for the backend # objects too secgroup['id'] = secgroup.get('id') or uuidutils.generate_uuid() project_id = secgroup['tenant_id'] if not default_sg: self._ensure_default_security_group(context, project_id) # create the Neutron SG with db_api.CONTEXT_WRITER.using(context): if secgroup.get(provider_sg.PROVIDER) is True: secgroup_db = self.create_provider_security_group( context, security_group) else: secgroup_db = ( super(NsxPolicyPlugin, self).create_security_group( context, security_group, default_sg)) self._process_security_group_properties_create(context, secgroup_db, secgroup, default_sg) if cfg.CONF.api_replay_mode: self._handle_api_replay_default_sg(context, secgroup_db) try: # create all the rule entries sg_rules = secgroup_db['security_group_rules'] secgroup_logging = secgroup.get(sg_logging.LOGGING, False) backend_rules = [] # Create all the rules resources in a single transaction for sg_rule in sg_rules: rule_entry = self._create_security_group_backend_rule( context, secgroup_db['id'], sg_rule, secgroup_logging) backend_rules.append(rule_entry) # Create Group & communication map on the NSX self._create_security_group_backend_resources( context, secgroup, backend_rules) except Exception as e: with excutils.save_and_reraise_exception(): LOG.exception("Failed to create backend SG rules " "for security-group %(name)s (%(id)s), " "rolling back changes. Error: %(e)s", {'name': secgroup_db['name'], 'id': secgroup_db['id'], 'e': e}) # rollback SG creation (which will also delete the backend # objects) super(NsxPolicyPlugin, self).delete_security_group( context, secgroup['id']) return secgroup_db def update_security_group(self, context, sg_id, security_group): self._prevent_non_admin_edit_provider_sg(context, sg_id) sg_data = security_group['security_group'] # update the neutron security group with db_api.CONTEXT_WRITER.using(context): secgroup_res = super(NsxPolicyPlugin, self).update_security_group( context, sg_id, security_group) self._process_security_group_properties_update( context, secgroup_res, sg_data) domain_id = NSX_P_GLOBAL_DOMAIN_ID # Update the name and description on NSX backend if 'name' in sg_data or 'description' in sg_data: nsx_name = utils.get_name_and_uuid( secgroup_res['name'] or 'securitygroup', sg_id) try: self.nsxpolicy.group.update( domain_id, sg_id, name=nsx_name, description=secgroup_res.get('description', '')) self.nsxpolicy.comm_map.update( domain_id, sg_id, name=nsx_name, description=secgroup_res.get('description', '')) except Exception as e: LOG.warning("Failed to update SG %s NSX resources: %s", sg_id, e) # Go on with the update anyway (it's just the name & desc) # If the logging of the SG changed - update the backend rules if sg_logging.LOGGING in sg_data: logged = (sg_data[sg_logging.LOGGING] or cfg.CONF.nsx_p.log_security_groups_allowed_traffic) self.nsxpolicy.comm_map.update_entries_logged(domain_id, sg_id, logged) return secgroup_res def delete_security_group(self, context, sg_id): self._prevent_non_admin_edit_provider_sg(context, sg_id) sg = self.get_security_group(context, sg_id) super(NsxPolicyPlugin, self).delete_security_group(context, sg_id) domain_id = NSX_P_GLOBAL_DOMAIN_ID try: self.nsxpolicy.comm_map.delete(domain_id, sg_id) self.nsxpolicy.group.delete(domain_id, sg_id) for rule in sg['security_group_rules']: self._delete_security_group_rule_backend_resources( context, rule) except nsx_lib_exc.ResourceNotFound: # If the resource was not found on the backend do not worry about # it. The conditions has already been logged, so there is no need # to do further logging pass except nsx_lib_exc.ManagerError as e: # If there is a failure in deleting the resource, fail the neutron # operation even though the neutron object was already deleted. # This way the user will be aware of zombie resources that may fail # future actions. msg = (_("Backend security group objects deletion for neutron " "security group %(id)s failed. The object was however " "removed from the Neutron database: %(e)s") % {'id': sg_id, 'e': e}) raise nsx_exc.NsxPluginException(err_msg=msg) def create_security_group_rule(self, context, security_group_rule): bulk_rule = {'security_group_rules': [security_group_rule]} return self.create_security_group_rule_bulk(context, bulk_rule)[0] def create_security_group_rule_bulk(self, context, security_group_rules): sg_rules = security_group_rules['security_group_rules'] for r in sg_rules: self._check_local_ip_prefix(context, r['security_group_rule']) # Tenant & security group are the same for all rules in the bulk example_rule = sg_rules[0]['security_group_rule'] sg_id = example_rule['security_group_id'] sg = self.get_security_group(context, sg_id) self._prevent_non_admin_edit_provider_sg(context, sg_id) with db_api.CONTEXT_WRITER.using(context): rules_db = (super(NsxPolicyPlugin, self).create_security_group_rule_bulk_native( context, security_group_rules)) for i, r in enumerate(sg_rules): self._process_security_group_rule_properties( context, rules_db[i], r['security_group_rule']) is_provider_sg = sg.get(provider_sg.PROVIDER) secgroup_logging = self._is_security_group_logged(context, sg_id) category = (NSX_P_PROVIDER_SECTION_CATEGORY if is_provider_sg else NSX_P_REGULAR_SECTION_CATEGORY) # Create the NSX backend rules in a single transaction def _do_update_rules(): # Build new rules and relevant objects backend_rules = [] for rule_data in rules_db: rule_entry = self._create_security_group_backend_rule( context, sg_id, rule_data, secgroup_logging, is_provider_sg=is_provider_sg) backend_rules.append(rule_entry) # Add the old rules for rule in sg['security_group_rules']: rule_entry = self._create_security_group_backend_rule( context, sg_id, rule, secgroup_logging, is_provider_sg=is_provider_sg, create_related_resource=False) backend_rules.append(rule_entry) # Update the policy with all the rules. self.nsxpolicy.comm_map.update_with_entries( NSX_P_GLOBAL_DOMAIN_ID, sg_id, entries=backend_rules, category=category) self._run_under_transaction(_do_update_rules) return rules_db def _delete_security_group_rule_backend_resources( self, context, rule_db): rule_id = rule_db['id'] # try to delete the service of this rule, if exists if rule_db['protocol']: try: self.nsxpolicy.service.delete(rule_id) except nsx_lib_exc.ResourceNotFound: pass # Try to delete the remote ip prefix group, if exists if rule_db['remote_ip_prefix']: try: remote_group_id = self._get_sg_rule_remote_ip_group_id(rule_db) self.nsxpolicy.group.delete(NSX_P_GLOBAL_DOMAIN_ID, remote_group_id) except nsx_lib_exc.ResourceNotFound: pass # Try to delete the local ip prefix group, if exists if self._get_security_group_rule_local_ip(context, rule_id): try: local_group_id = self._get_sg_rule_local_ip_group_id(rule_db) self.nsxpolicy.group.delete(NSX_P_GLOBAL_DOMAIN_ID, local_group_id) except nsx_lib_exc.ResourceNotFound: pass def delete_security_group_rule(self, context, rule_id): rule_db = self._get_security_group_rule(context, rule_id) sg_id = rule_db['security_group_id'] self._prevent_non_admin_edit_provider_sg(context, sg_id) # Delete the rule itself try: self.nsxpolicy.comm_map.delete_entry( policy_constants.DEFAULT_DOMAIN, sg_id, rule_id) self._delete_security_group_rule_backend_resources( context, rule_db) except nsx_lib_exc.ResourceNotFound: # Go on with the deletion anyway pass except nsx_lib_exc.ManagerError as e: msg = (_("Backend security group rule deletion for neutron " "rule %(id)s failed: %(e)s") % {'id': rule_id, 'e': e}) nsx_exc.NsxPluginException(err_msg=msg) super(NsxPolicyPlugin, self).delete_security_group_rule( context, rule_id) def _is_overlay_network(self, context, network_id): """Return True if this is an overlay network 1. No binding ("normal" overlay networks will have no binding) 2. Geneve network 3. nsx network where the backend network is connected to an overlay TZ """ bindings = nsx_db.get_network_bindings(context.session, network_id) # With NSX plugin, "normal" overlay networks will have no binding if not bindings: # using the default/AZ overlay_tz return True binding = bindings[0] if binding.binding_type == utils.NsxV3NetworkTypes.GENEVE: return True if binding.binding_type == utils.NsxV3NetworkTypes.NSX_NETWORK: # check the backend network segment = self.nsxpolicy.segment.get(binding.phy_uuid) tz = self._get_nsx_net_tz_id(segment) if tz: # This call is cached on the nsxlib side type = self.nsxpolicy.transport_zone.get_transport_type( tz) return type == nsxlib_consts.TRANSPORT_TYPE_OVERLAY return False def _is_ens_tz(self, tz_id): # This call is cached on the nsxlib side mode = self.nsxpolicy.transport_zone.get_host_switch_mode(tz_id) return mode == nsxlib_consts.HOST_SWITCH_MODE_ENS def _has_native_dhcp_metadata(self): return True def _get_tier0_uplink_cidrs(self, tier0_id): # return a list of tier0 uplink ip/prefix addresses return self.nsxpolicy.tier0.get_uplink_cidrs(tier0_id) def _get_neutron_net_ids_by_nsx_id(self, context, lswitch_id): """Translate nsx ls IDs given by Nova to neutron network ids. Since there is no DB mapping for this, the plugin will query the NSX for this, and cache the results. """ if lswitch_id not in NET_NSX_2_NEUTRON_ID_CACHE: segments_path = self.nsxpolicy.search_resource_by_realized_id( lswitch_id, "RealizedLogicalSwitch") if not segments_path or len(segments_path) != 1: LOG.warning("Could not find policy segment with realized id " "%s", lswitch_id) return [] neutron_id = p_utils.path_to_id(segments_path[0]) if neutron_id: # Cache the result NET_NSX_2_NEUTRON_ID_CACHE[lswitch_id] = neutron_id NET_NEUTRON_2_NSX_ID_CACHE[neutron_id] = lswitch_id if NET_NSX_2_NEUTRON_ID_CACHE.get(lswitch_id): return [NET_NSX_2_NEUTRON_ID_CACHE[lswitch_id]] return [] def _get_net_tz(self, context, net_id): bindings = nsx_db.get_network_bindings(context.session, net_id) if bindings: bind_type = bindings[0].binding_type if bind_type == utils.NsxV3NetworkTypes.NSX_NETWORK: # If it is an NSX network, return the TZ of the backend segment segment_id = bindings[0].phy_uuid return self.nsxpolicy.segment.get_transport_zone_id(segment_id) elif bind_type == utils.NetworkTypes.L3_EXT: # External network has tier0 as phy_uuid return else: return bindings[0].phy_uuid else: # Get the default one for the network AZ az = self.get_network_az_by_net_id(context, net_id) return az._default_overlay_tz_uuid def _validate_router_tz(self, context, tier0_uuid, subnets): # make sure the related GW (Tier0 router) belongs to the same TZ # as the subnets attached to the Tier1 router if not subnets or not tier0_uuid: return tier0_tzs = self.nsxpolicy.tier0.get_transport_zones(tier0_uuid) if not tier0_tzs: return for sub in subnets: tz_uuid = self._get_net_tz(context, sub['network_id']) if tz_uuid not in tier0_tzs: msg = (_("Tier0 router %(rtr)s transport zone should match " "transport zone %(tz)s of the network %(net)s") % { 'rtr': tier0_uuid, 'tz': tz_uuid, 'net': sub['network_id']}) raise n_exc.InvalidInput(error_message=msg) def _get_net_dhcp_relay(self, context, net_id): # No dhcp relay support yet return None def update_router_firewall(self, context, router_id, router_db=None, from_fw=False): """Rewrite all the rules in the router edge firewall This method should be called on FWaaS v2 updates, and on router interfaces changes. When FWaaS is disabled, there is no need to update the NSX router FW, as the default rule is allow-all. """ if (self.fwaas_callbacks and self.fwaas_callbacks.fwaas_enabled): if not router_db: router_db = self._get_router(context, router_id) # find all the relevant ports of the router for FWaaS v2 # TODO(asarfaty): Add vm ports as well ports = self._get_router_interfaces(context, router_id) # let the fwaas callbacks update the router FW return self.fwaas_callbacks.update_router_firewall( context, router_id, router_db, ports, called_from_fw=from_fw) def update_port_nsx_tags(self, context, port_id, tags, is_delete=False): """Update backend NSX segment port with tags from the tagging plugin""" # Make sure it is a backend port ctx = n_context.get_admin_context() port_data = self.get_port(ctx, port_id) if not self._is_backend_port(ctx, port_data): LOG.info("Ignoring tags on port %s: this port has no backend " "NSX logical port", port_id) return # Get the current tags on this port segment_id = self._get_network_nsx_segment_id( ctx, port_data['network_id']) lport = self.nsxpolicy.segment_port.get(segment_id, port_id) port_tags = lport.get('tags') orig_len = len(port_tags) # Update and validate the list of tags extra_tags = self._translate_external_tags(tags, port_id) if is_delete: port_tags = [tag for tag in port_tags if tag not in extra_tags] else: port_tags.extend( [tag for tag in extra_tags if tag not in port_tags]) if len(port_tags) > nsxlib_utils.MAX_TAGS: LOG.warning("Cannot add external tags to port %s: " "too many tags", port_id) # Update the NSX port if len(port_tags) != orig_len: self.nsxpolicy.segment_port.update( segment_id, port_id, tags=port_tags) def get_extra_fw_rules(self, context, router_id, port_id): """Return firewall rules that should be added to the router firewall This method should return a list of allow firewall rules that are required in order to enable different plugin features with north/south traffic. The returned rules will be added after the FWaaS rules, and before the default drop rule. Only rules relevant for port_id router interface port should be returned, and the rules should be ingress/egress (but not both) and include the source/dest nsx logical port. """ extra_rules = [] # VPN rules: vpn_plugin = directory.get_plugin(plugin_const.VPN) if vpn_plugin: vpn_driver = vpn_plugin.drivers[vpn_plugin.default_provider] vpn_rules = ( vpn_driver._generate_ipsecvpn_firewall_rules( self.plugin_type(), context, router_id=router_id)) if vpn_rules: extra_rules.extend(vpn_rules) return extra_rules def _validate_net_mdproxy_tz(self, az, tz_uuid, mdproxy_uuid): """Validate that the network TZ matches the mdproxy edge cluster""" if not self.nsxlib: # No passthrough api support return True if az.use_policy_md: # Policy obj md_ec_path = self.nsxpolicy.md_proxy.get( mdproxy_uuid).get('edge_cluster_path') md_ec = p_utils.path_to_id(md_ec_path) else: # MP obj md_ec = self.nsxlib.native_md_proxy.get( mdproxy_uuid).get('edge_cluster_id') ec_nodes = self.nsxpolicy.edge_cluster.get_edge_node_ids(md_ec) ec_tzs = [] for tn_uuid in ec_nodes: ec_tzs.extend(self.nsxlib.transport_node.get_transport_zones( tn_uuid)) if tz_uuid not in ec_tzs: return False return True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2022538 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/0000755000175000017500000000000000000000000022206 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/__init__.py0000644000175000017500000000000000000000000024305 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/availability_zones.py0000644000175000017500000003742400000000000026462 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import availability_zones as common_az from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils as c_utils DEFAULT_NAME = common_az.DEFAULT_NAME LOG = logging.getLogger(__name__) class NsxVAvailabilityZone(common_az.ConfiguredAvailabilityZone): def init_from_config_line(self, config_line): values = config_line.split(':') if len(values) < 4 or len(values) > 5: raise nsx_exc.NsxInvalidConfiguration( opt_name="availability_zones", opt_value=config_line, reason=_("Expected 4 or 5 values per zone")) self.resource_pool = values[1] self.datastore_id = values[2] # validate the edge_ha if values[3].lower() == "true": self.edge_ha = True elif values[3].lower() == "false": self.edge_ha = False else: raise nsx_exc.NsxInvalidConfiguration( opt_name="availability_zones", opt_value=config_line, reason=_("Expected the 4th value to be true/false")) # HA datastore id is relevant only with edge_ha if not self.edge_ha and len(values) == 5: raise nsx_exc.NsxInvalidConfiguration( opt_name="availability_zones", opt_value=config_line, reason=_("Expected HA datastore ID only when edge_ha is " "enabled")) self.ha_datastore_id = values[4] if len(values) == 5 else None # Some parameters are not supported in this format. # using the global ones instead. self.ha_placement_random = cfg.CONF.nsxv.ha_placement_random self.datacenter_moid = cfg.CONF.nsxv.datacenter_moid self.backup_edge_pool = cfg.CONF.nsxv.backup_edge_pool self.external_network = cfg.CONF.nsxv.external_network self.vdn_scope_id = cfg.CONF.nsxv.vdn_scope_id self.dvs_id = cfg.CONF.nsxv.dvs_id self.edge_host_groups = cfg.CONF.nsxv.edge_host_groups self.exclusive_dhcp_edge = cfg.CONF.nsxv.exclusive_dhcp_edge self.bind_floatingip_to_all_interfaces = ( cfg.CONF.nsxv.bind_floatingip_to_all_interfaces) # No support for metadata per az self.az_metadata_support = False self.mgt_net_moid = None self.mgt_net_proxy_ips = [] self.mgt_net_proxy_netmask = None self.mgt_net_default_gateway = None def init_from_config_section(self, az_name): az_info = config.get_nsxv_az_opts(az_name) self.resource_pool = az_info.get('resource_pool_id') if not self.resource_pool: raise nsx_exc.NsxInvalidConfiguration( opt_name="resource_pool_id", opt_value='None', reason=(_("resource_pool_id for availability zone %s " "must be defined") % az_name)) self.datastore_id = az_info.get('datastore_id') if not self.datastore_id: raise nsx_exc.NsxInvalidConfiguration( opt_name="datastore_id", opt_value='None', reason=(_("datastore_id for availability zone %s " "must be defined") % az_name)) self.edge_ha = az_info.get('edge_ha', False) # The HA datastore can be empty self.ha_datastore_id = (az_info.get('ha_datastore_id') if self.edge_ha else None) if self.ha_datastore_id and not self.edge_ha: raise nsx_exc.NsxInvalidConfiguration( opt_name="ha_datastore_id", opt_value=self.ha_datastore_id, reason=_("Expected HA datastore ID only when edge_ha is " "enabled for availability zone %s") % az_name) # The optional parameters will get the global values if not # defined for this AZ self.ha_placement_random = az_info.get('ha_placement_random') if self.ha_placement_random is None: self.ha_placement_random = ( cfg.CONF.nsxv.ha_placement_random) self.datacenter_moid = az_info.get('datacenter_moid') if not self.datacenter_moid: self.datacenter_moid = cfg.CONF.nsxv.datacenter_moid self.backup_edge_pool = az_info.get('backup_edge_pool', []) if not self.backup_edge_pool: self.backup_edge_pool = cfg.CONF.nsxv.backup_edge_pool self.external_network = az_info.get('external_network') if not self.external_network: self.external_network = cfg.CONF.nsxv.external_network self.vdn_scope_id = az_info.get('vdn_scope_id') if not self.vdn_scope_id: self.vdn_scope_id = cfg.CONF.nsxv.vdn_scope_id self.dvs_id = az_info.get('dvs_id') if not self.dvs_id: self.dvs_id = cfg.CONF.nsxv.dvs_id self.edge_host_groups = az_info.get('edge_host_groups', []) if not self.edge_host_groups: self.edge_host_groups = cfg.CONF.nsxv.edge_host_groups self.exclusive_dhcp_edge = az_info.get('exclusive_dhcp_edge', False) self.bind_floatingip_to_all_interfaces = az_info.get( 'bind_floatingip_to_all_interfaces', False) # Support for metadata per az only if configured, and different # from the global one self.mgt_net_proxy_ips = az_info.get('mgt_net_proxy_ips') if self.mgt_net_proxy_ips: # make sure there are no over lapping ips with the # global configuration if (set(self.mgt_net_proxy_ips) & set(cfg.CONF.nsxv.mgt_net_proxy_ips)): raise nsx_exc.NsxInvalidConfiguration( opt_name="mgt_net_proxy_ips", opt_value='None', reason=(_("mgt_net_proxy_ips for availability zone " "%s must be different from global one") % az_name)) self.az_metadata_support = True self.mgt_net_moid = az_info.get('mgt_net_moid') if not self.mgt_net_moid: self.mgt_net_moid = cfg.CONF.nsxv.mgt_net_moid self.mgt_net_proxy_netmask = az_info.get( 'mgt_net_proxy_netmask') if not self.mgt_net_proxy_netmask: self.mgt_net_proxy_netmask = ( cfg.CONF.nsxv.mgt_net_proxy_netmask) self.mgt_net_default_gateway = az_info.get( 'mgt_net_default_gateway') if not self.mgt_net_default_gateway: self.mgt_net_default_gateway = ( cfg.CONF.nsxv.mgt_net_default_gateway) else: self.az_metadata_support = False self.mgt_net_moid = None self.mgt_net_proxy_ips = [] self.mgt_net_proxy_netmask = None self.mgt_net_default_gateway = None def init_defaults(self): # use the default configuration self.resource_pool = cfg.CONF.nsxv.resource_pool_id self.datastore_id = cfg.CONF.nsxv.datastore_id self.edge_ha = cfg.CONF.nsxv.edge_ha self.ha_datastore_id = cfg.CONF.nsxv.ha_datastore_id self.ha_placement_random = cfg.CONF.nsxv.ha_placement_random self.datacenter_moid = cfg.CONF.nsxv.datacenter_moid self.backup_edge_pool = cfg.CONF.nsxv.backup_edge_pool self.az_metadata_support = True self.mgt_net_moid = cfg.CONF.nsxv.mgt_net_moid self.mgt_net_proxy_ips = cfg.CONF.nsxv.mgt_net_proxy_ips self.mgt_net_proxy_netmask = cfg.CONF.nsxv.mgt_net_proxy_netmask self.mgt_net_default_gateway = ( cfg.CONF.nsxv.mgt_net_default_gateway) self.external_network = cfg.CONF.nsxv.external_network self.vdn_scope_id = cfg.CONF.nsxv.vdn_scope_id self.dvs_id = cfg.CONF.nsxv.dvs_id self.edge_host_groups = cfg.CONF.nsxv.edge_host_groups self.exclusive_dhcp_edge = cfg.CONF.nsxv.exclusive_dhcp_edge self.bind_floatingip_to_all_interfaces = ( cfg.CONF.nsxv.bind_floatingip_to_all_interfaces) def supports_metadata(self): # Return True if this az has it's own metadata configuration # If False - it uses the global metadata (if defined) return self.az_metadata_support def _validate_opt_connectivity(self, cluster_info, cluster_field, az_value): for obj in cluster_info.get(cluster_field, []): if obj['id'] == az_value: return True return False def validate_az_connectivity(self, vcns): info = vcns.get_tz_connectivity_info(self.vdn_scope_id) if not info or not info.get('clustersInfo'): LOG.warning("Couldn't get TZ %s connectivity information to " "validate the configuration", self.vdn_scope_id) return LOG.info("Validating connectivity of availability zone %s With TZ %s, " "clusters %s, DVS %s external net %s and mdproxy net %s", self.name, self.vdn_scope_id, cfg.CONF.nsxv.cluster_moid, self.dvs_id, self.external_network, self.mgt_net_moid) # Look for each configured cluster for configured_cluster in cfg.CONF.nsxv.cluster_moid: found_cluster = False for cluster_info in info['clustersInfo']: if cluster_info.get('clusterId') == configured_cluster: found_cluster = True # Validate the external network: external_net_standard = self._validate_opt_connectivity( cluster_info, 'standardNetworks', self.external_network) external_net_portgroup = self._validate_opt_connectivity( cluster_info, 'distributedVirtualPortGroups', self.external_network) if (not external_net_standard and not external_net_portgroup): raise nsx_exc.NsxInvalidConfiguration( opt_name='external_network', opt_value=self.external_network, reason=(_("Edge cluster %(ec)s in not connected " "to external network %(val)s in AZ " "%(az)s") % { 'ec': configured_cluster, 'val': self.external_network, 'az': self.name})) # Validate mgt_net_moid if self.mgt_net_moid: mgt_net_standard = self._validate_opt_connectivity( cluster_info, 'standardNetworks', self.mgt_net_moid) mgt_net_portgroup = self._validate_opt_connectivity( cluster_info, 'distributedVirtualPortGroups', self.mgt_net_moid) if not mgt_net_standard and not mgt_net_portgroup: raise nsx_exc.NsxInvalidConfiguration( opt_name='mgt_net_moid', opt_value=self.mgt_net_moid, reason=(_("Edge cluster %(ec)s in not " "connected to mgt_net_moid %(val)s " "in AZ %(az)s") % { 'ec': configured_cluster, 'val': self.mgt_net_moid, 'az': self.name})) # Validate DVS if self.dvs_id and not self._validate_opt_connectivity( cluster_info, 'distributedVirtualSwitches', self.dvs_id): raise nsx_exc.NsxInvalidConfiguration( opt_name='dvs_id', opt_value=self.dvs_id, reason=(_("Edge cluster %(ec)s in not connected " "to dvs_id %(val)s in AZ %(az)s") % { 'ec': configured_cluster, 'val': self.dvs_id, 'az': self.name})) break # Didn't find the edge cluster if not found_cluster: raise nsx_exc.NsxInvalidConfiguration( opt_name='vdn_scope_id', opt_value=self.vdn_scope_id, reason=(_("Edge cluster %(ec)s in not connected " "to vdn_scope_id %(val)s in AZ %(az)s") % { 'ec': configured_cluster, 'val': self.vdn_scope_id, 'az': self.name})) class NsxVAvailabilityZones(common_az.ConfiguredAvailabilityZones): def __init__(self, use_tvd_config=False): if use_tvd_config: default_azs = cfg.CONF.nsx_tvd.nsx_v_default_availability_zones else: default_azs = cfg.CONF.default_availability_zones super(NsxVAvailabilityZones, self).__init__( cfg.CONF.nsxv.availability_zones, NsxVAvailabilityZone, default_availability_zones=default_azs) def get_inventory(self): """Return a set of relevant resources in all the availability zones """ resources = set() for az in self.list_availability_zones_objects(): if az.resource_pool: resources.add(az.resource_pool) if az.datastore_id: resources.add(az.datastore_id) if az.ha_datastore_id: resources.add(az.ha_datastore_id) return resources def get_unique_non_default_param(self, param_name): """Return a set of all configured values of one of az params Ignore the value of the default AZ """ resources = set() default_val = None for az in self.list_availability_zones_objects(): az_val = getattr(az, param_name) if az.is_default(): default_val = az_val elif az_val: resources.add(az_val) # remove the default value if default_val: resources.discard(default_val) return resources def get_additional_vdn_scope(self): return self.get_unique_non_default_param("vdn_scope_id") def get_additional_mgt_net(self): return self.get_unique_non_default_param("mgt_net_moid") def get_additional_ext_net(self): return self.get_unique_non_default_param("external_network") def get_additional_datacenter(self): return self.get_unique_non_default_param("datacenter_moid") def get_additional_dvs_ids(self): return self.get_unique_non_default_param("dvs_id") def validate_connectivity(self, vcns): if (not c_utils.is_nsxv_version_6_4_6(vcns.get_version()) or not cfg.CONF.nsxv.cluster_moid): return for az in self.list_availability_zones_objects(): az.validate_az_connectivity(vcns) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2022538 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/drivers/0000755000175000017500000000000000000000000023664 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/drivers/__init__.py0000644000175000017500000000000000000000000025763 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/drivers/abstract_router_driver.py0000644000175000017500000001301700000000000031016 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from neutron.db import l3_db from neutron.db import models_v2 from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import nsxv_constants from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield import edge_utils @six.add_metaclass(abc.ABCMeta) class RouterAbstractDriver(object): """Abstract router driver that expose API for nsxv plugin.""" @abc.abstractmethod def get_type(self): pass @abc.abstractmethod def create_router(self, context, lrouter, appliance_size=None, allow_metadata=True): pass @abc.abstractmethod def update_router(self, context, router_id, router): pass @abc.abstractmethod def delete_router(self, context, router_id): pass @abc.abstractmethod def update_routes(self, context, router_id, nexthop): pass @abc.abstractmethod def _update_router_gw_info(self, context, router_id, info): pass @abc.abstractmethod def add_router_interface(self, context, router_id, interface_info): pass @abc.abstractmethod def remove_router_interface(self, context, router_id, interface_info): pass @abc.abstractmethod def _update_edge_router(self, context, router_id): pass class RouterBaseDriver(RouterAbstractDriver): def __init__(self, plugin): self.plugin = plugin self.nsx_v = plugin.nsx_v self.edge_manager = plugin.edge_manager self.vcns = self.nsx_v.vcns self._availability_zones = nsx_az.NsxVAvailabilityZones() def _notify_after_router_edge_association(self, context, router): registry.publish(nsxv_constants.SERVICE_EDGE, events.AFTER_CREATE, self, payload=events.DBEventPayload( context, states=(router,))) def _notify_before_router_edge_association(self, context, router, edge_id=None): registry.publish(nsxv_constants.SERVICE_EDGE, events.BEFORE_DELETE, self, payload=events.DBEventPayload( context, states=(router,), resource_id=edge_id)) def _get_external_network_id_by_router(self, context, router_id): """Get router's external network id if it has.""" router = self.plugin.get_router(context, router_id) ports_qry = context.session.query(models_v2.Port) gw_ports = ports_qry.filter_by( device_id=router_id, device_owner=l3_db.DEVICE_OWNER_ROUTER_GW, id=router['gw_port_id']).all() if gw_ports: return gw_ports[0]['network_id'] def _get_edge_id_or_raise(self, context, router_id): edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: error = (_("Failed to get router %(rid)s edge Id") % {'rid': router_id}) raise nsxv_exc.NsxPluginException(err_msg=error) return edge_id def update_nat_rules(self, context, router, router_id): self.plugin._update_nat_rules(context, router, router_id) def update_router_interface_ip(self, context, router_id, port_id, int_net_id, old_ip, new_ip, subnet_mask): """Update the fixed ip of a router interface. This implementation will not work for distributed routers, and there is a different implementation in that driver class """ # get the edge-id of this router edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: # This may be a shared router that was not attached to an edge yet return # find out if the port is uplink or internal router = self.plugin._get_router(context, router_id) is_uplink = (port_id == router.gw_port_id) # update the edge interface configuration self.edge_manager.update_interface_addr( context, edge_id, old_ip, new_ip, subnet_mask, is_uplink=is_uplink) # Also update the nat rules if is_uplink: self.update_nat_rules(context, router, router_id) def get_router_az(self, lrouter): return self.plugin.get_router_az(lrouter) def get_router_az_and_flavor_by_id(self, context, router_id): lrouter = self.plugin.get_router(context, router_id) return (self.get_router_az(lrouter), lrouter.get('flavor_id')) def get_router_az_by_id(self, context, router_id): lrouter = self.plugin.get_router(context, router_id) return self.get_router_az(lrouter) def _update_nexthop(self, context, router_id, newnexthop): """Update the router edge on gateway subnet default gateway change.""" self.plugin._update_routes(context, router_id, newnexthop) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py0000644000175000017500000004105700000000000031542 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import log as logging from neutron_lib.api import validators from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import l3 as l3_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import routersize from vmware_nsx.plugins.nsx_v.drivers import ( abstract_router_driver as router_driver) from vmware_nsx.plugins.nsx_v import plugin as nsx_v from vmware_nsx.plugins.nsx_v.vshield import edge_utils LOG = logging.getLogger(__name__) class RouterDistributedDriver(router_driver.RouterBaseDriver): def get_type(self): return "distributed" def _get_edge_id(self, context, router_id): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) return binding.get('edge_id') def _update_routes_on_plr(self, context, router_id, plr_id, newnexthop): lswitch_id = edge_utils.get_internal_lswitch_id_of_plr_tlr( context, router_id) subnets = self.plugin._find_router_subnets_cidrs( context.elevated(), router_id) routes = [] for subnet in subnets: routes.append({ 'destination': subnet, 'nexthop': (edge_utils.get_vdr_transit_network_tlr_address()), 'network_id': lswitch_id }) # Add extra routes referring to external network on plr extra_routes = self.plugin._prepare_edge_extra_routes( context, router_id) routes.extend([route for route in extra_routes if route.get('external')]) edge_utils.update_routes(self.nsx_v, context, plr_id, routes, newnexthop) def _update_routes_on_tlr( self, context, router_id, newnexthop=edge_utils.get_vdr_transit_network_plr_address()): routes = [] # Add extra routes referring to internal network on tlr extra_routes = self.plugin._prepare_edge_extra_routes( context, router_id) routes.extend([route for route in extra_routes if not route.get('external')]) edge_utils.update_routes(self.nsx_v, context, router_id, routes, newnexthop) def create_router(self, context, lrouter, appliance_size=None, allow_metadata=True): az = self.get_router_az(lrouter) self.edge_manager.create_lrouter(context, lrouter, dist=True, availability_zone=az) def _validate_no_size(self, router): if validators.is_attr_set(router.get(routersize.ROUTER_SIZE)): msg = _("Cannot specify router-size for distributed router") raise n_exc.InvalidInput(error_message=msg) def update_router(self, context, router_id, router): r = router['router'] self._validate_no_size(r) is_routes_update = True if 'routes' in r else False gw_info = self.plugin._extract_external_gw(context, router, is_extract=True) super(nsx_v.NsxVPluginV2, self.plugin).update_router( context, router_id, router) if gw_info != constants.ATTR_NOT_SPECIFIED: self.plugin._update_router_gw_info(context, router_id, gw_info, is_routes_update) elif is_routes_update: # here is used to handle routes which tenant updates. router_db = self.plugin._get_router(context, router_id) nexthop = self.plugin._get_external_attachment_info( context, router_db)[2] with locking.LockManager.get_lock(self._get_edge_id(context, router_id)): self.plugin._update_subnets_and_dnat_firewall(context, router_db) self._update_routes(context, router_id, nexthop) if 'admin_state_up' in r: self.plugin._update_router_admin_state( context, router_id, self.get_type(), r['admin_state_up']) if 'name' in r: self.edge_manager.rename_lrouter(context, router_id, r['name']) # if we have a plr router - rename it too plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) if plr_id: self.edge_manager.rename_lrouter(context, plr_id, r['name']) return self.plugin.get_router(context, router_id) def delete_router(self, context, router_id): self.edge_manager.delete_lrouter(context, router_id, dist=True) def update_routes(self, context, router_id, newnexthop): with locking.LockManager.get_lock(self._get_edge_id(context, router_id)): self._update_routes(context, router_id, newnexthop) def _update_routes(self, context, router_id, newnexthop): plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) if plr_id: self._update_routes_on_plr(context, router_id, plr_id, newnexthop) self._update_routes_on_tlr(context, router_id) else: self._update_routes_on_tlr(context, router_id, newnexthop=None) def _update_nexthop(self, context, router_id, newnexthop): plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) if plr_id: self._update_routes_on_plr(context, router_id, plr_id, newnexthop) @db_api.retry_db_errors def _update_router_gw_info(self, context, router_id, info, is_routes_update=False, force_update=False): router = self.plugin._get_router(context, router_id) org_ext_net_id = router.gw_port_id and router.gw_port.network_id org_enable_snat = router.enable_snat orgaddr, orgmask, orgnexthop = ( self.plugin._get_external_attachment_info( context, router)) # verify the edge was deployed before calling super code. tlr_edge_id = self._get_edge_id_or_raise(context, router_id) super(nsx_v.NsxVPluginV2, self.plugin)._update_router_gw_info( context, router_id, info, router=router) new_ext_net_id = router.gw_port_id and router.gw_port.network_id new_enable_snat = router.enable_snat newaddr, newmask, newnexthop = ( self.plugin._get_external_attachment_info( context, router)) plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) if not new_ext_net_id: if plr_id: # delete all plr relative conf with locking.LockManager.get_lock(tlr_edge_id): self.edge_manager.delete_plr_by_tlr_id( context, plr_id, router_id) else: # Connecting plr to the tlr if new_ext_net_id is not None. if not plr_id: # Get the availability zone by ID because the router dict # retrieved by +get_router does not contain this information availability_zone = self.get_router_az_by_id( context, router['id']) with locking.LockManager.get_lock(tlr_edge_id): plr_id = self.edge_manager.create_plr_with_tlr_id( context, router_id, router.get('name'), availability_zone) if new_ext_net_id != org_ext_net_id and orgnexthop: # network changed, so need to remove default gateway # and all static routes before vnic can be configured with locking.LockManager.get_lock(tlr_edge_id): edge_utils.clear_gateway(self.nsx_v, context, plr_id) # Update external vnic if addr or mask is changed if orgaddr != newaddr or orgmask != newmask: with locking.LockManager.get_lock(tlr_edge_id): self.edge_manager.update_external_interface( self.nsx_v, context, plr_id, new_ext_net_id, newaddr, newmask) # Update SNAT rules if ext net changed # or ext net not changed but snat is changed. if (new_ext_net_id != org_ext_net_id or (new_ext_net_id == org_ext_net_id and new_enable_snat != org_enable_snat)): self.plugin._update_nat_rules(context, router, plr_id) if (new_ext_net_id != org_ext_net_id or new_enable_snat != org_enable_snat or is_routes_update): # Open firewall flows on plr self.plugin._update_subnets_and_dnat_firewall( context, router, router_id=plr_id) # update static routes in all with locking.LockManager.get_lock(tlr_edge_id): self._update_routes(context, router_id, newnexthop) if new_ext_net_id: self._notify_after_router_edge_association(context, router) def _validate_subnets_routers(self, context, router_id, interface_info): # Validate that multiple subnets are not connected to the router _nsxv_plugin = self.plugin net_id, subnet_id = _nsxv_plugin._get_interface_info(context, interface_info) router_ids = _nsxv_plugin._get_network_router_ids( context.elevated(), net_id) all_routers = _nsxv_plugin.get_routers(context, filters={'id': router_ids}) dist_routers = [router['id'] for router in all_routers if router.get('distributed') is True] if len(dist_routers) > 0: err_msg = _("network can only be attached to just one distributed " "router, the network is already attached to router " "%(router_id)s") % {'router_id': dist_routers[0]} if router_id in dist_routers: # attach to the same router again raise n_exc.InvalidInput(error_message=err_msg) else: # attach to multiple routers raise l3_exc.RouterInterfaceAttachmentConflict(reason=err_msg) # Validate that the subnet is not a v6 one subnet = self.plugin.get_subnet(context.elevated(), subnet_id) if (subnet.get('ip_version') == 6 or (subnet['cidr'] not in (constants.ATTR_NOT_SPECIFIED, None) and netaddr.IPNetwork(subnet['cidr']).version == 6)): err_msg = _("No support for IPv6 interfaces") raise n_exc.InvalidInput(error_message=err_msg) def add_router_interface(self, context, router_id, interface_info): self._validate_subnets_routers(context, router_id, interface_info) info = super(nsx_v.NsxVPluginV2, self.plugin).add_router_interface( context, router_id, interface_info) router_db = self.plugin._get_router(context, router_id) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_id = self._get_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): edge_utils.add_vdr_internal_interface(self.nsx_v, context, router_id, network_id, address_groups, router_db.admin_state_up) # Update edge's firewall rules to accept subnets flows. self.plugin._update_subnets_and_dnat_firewall(context, router_db) if router_db.gw_port: plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) if router_db.enable_snat: self.plugin._update_nat_rules(context, router_db, plr_id) # Open firewall flows on plr self.plugin._update_subnets_and_dnat_firewall( context, router_db, router_id=plr_id) # Update static routes of plr nexthop = self.plugin._get_external_attachment_info( context, router_db)[2] self._update_routes(context, router_id, nexthop) # In case of failure, rollback will be done in the plugin level return info def remove_router_interface(self, context, router_id, interface_info): info = super(nsx_v.NsxVPluginV2, self.plugin).remove_router_interface( context, router_id, interface_info) router_db = self.plugin._get_router(context, router_id) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] with locking.LockManager.get_lock(self._get_edge_id(context, router_id)): if router_db.gw_port and router_db.enable_snat: plr_id = self.edge_manager.get_plr_by_tlr_id( context, router_id) self.plugin._update_nat_rules(context, router_db, plr_id) # Open firewall flows on plr self.plugin._update_subnets_and_dnat_firewall( context, router_db, router_id=plr_id) # Update static routes of plr nexthop = self.plugin._get_external_attachment_info( context, router_db)[2] self._update_routes(context, router_id, nexthop) self.plugin._update_subnets_and_dnat_firewall(context, router_db) # Safely remove interface, VDR can have interface to only one # subnet in a given network. edge_utils.delete_interface( self.nsx_v, context, router_id, network_id, dist=True) return info def _update_edge_router(self, context, router_id): router = self.plugin._get_router(context.elevated(), router_id) plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) self.plugin._update_external_interface( context, router, router_id=plr_id) self.plugin._update_nat_rules(context, router, router_id=plr_id) self.plugin._update_subnets_and_dnat_firewall(context, router, router_id=plr_id) def update_router_interface_ip(self, context, router_id, port_id, int_net_id, old_ip, new_ip, subnet_mask): """Update the fixed ip of a distributed router interface. """ router = self.plugin._get_router(context, router_id) if port_id == router.gw_port_id: # external port / Uplink plr_id = self.edge_manager.get_plr_by_tlr_id(context, router_id) edge_id = self._get_edge_id_or_raise(context, plr_id) self.edge_manager.update_interface_addr( context, edge_id, old_ip, new_ip, subnet_mask, is_uplink=True) # Also update the nat rules self.plugin._update_nat_rules(context, router, plr_id) else: # Internal port: # get the edge-id of this router edge_id = self._get_edge_id_or_raise(context, router_id) # Get the vnic index edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, int_net_id) vnic_index = edge_vnic_binding.vnic_index self.edge_manager.update_vdr_interface_addr( context, edge_id, vnic_index, old_ip, new_ip, subnet_mask) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py0000644000175000017500000003461000000000000031224 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lib import constants as n_consts from neutron_lib.db import api as db_api from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.drivers import ( abstract_router_driver as router_driver) from vmware_nsx.plugins.nsx_v import plugin as nsx_v from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.services.lbaas.octavia import constants as oct_const LOG = logging.getLogger(__name__) class RouterExclusiveDriver(router_driver.RouterBaseDriver): def get_type(self): return "exclusive" def create_router(self, context, lrouter, appliance_size=None, allow_metadata=True): availability_zone = self.get_router_az(lrouter) self.edge_manager.create_lrouter( context, lrouter, dist=False, appliance_size=appliance_size, availability_zone=availability_zone) if allow_metadata: self.plugin.get_metadata_proxy_handler( availability_zone.name).configure_router_edge( context, lrouter['id']) def update_router(self, context, router_id, router): r = router['router'] is_routes_update = True if 'routes' in r else False gw_info = self.plugin._extract_external_gw(context, router, is_extract=True) super(nsx_v.NsxVPluginV2, self.plugin).update_router( context, router_id, router) if gw_info != n_consts.ATTR_NOT_SPECIFIED: self.plugin._update_router_gw_info(context, router_id, gw_info, is_routes_update) elif is_routes_update: # here is used to handle routes which tenant updates. router_db = self.plugin._get_router(context, router_id) nexthop = self.plugin._get_external_attachment_info( context, router_db)[2] with locking.LockManager.get_lock( self._get_router_edge_id(context, router_id)): self.plugin._update_subnets_and_dnat_firewall(context, router_db) self.update_routes(context, router_id, nexthop) if 'admin_state_up' in r: self.plugin._update_router_admin_state( context, router_id, self.get_type(), r['admin_state_up']) if 'name' in r: self.edge_manager.rename_lrouter(context, router_id, r['name']) if r.get('router_size'): self.edge_manager.resize_lrouter(context, router_id, r['router_size']) return self.plugin.get_router(context, router_id) def detach_router(self, context, router_id, router): LOG.debug("Detach exclusive router id %s", router_id) router_db = self.plugin._get_router(context, router_id) self._notify_before_router_edge_association(context, router_db) self.edge_manager.unbind_router_on_edge(context, router_id) if self.plugin.metadata_proxy_handler: az = self.get_router_az_by_id(context, router_id) metadata_proxy_handler = self.plugin.get_metadata_proxy_handler( az.name) if metadata_proxy_handler: metadata_proxy_handler.cleanup_router_edge(context, router_id) def _build_router_data_from_db(self, router_db, router): """Return a new dictionary with all DB & requested router attributes """ router_attr = router['router'].copy() fields = ['status', 'name', 'admin_state_up', 'tenant_id', 'id'] for field in fields: if field not in router['router']: router_attr[field] = getattr(router_db, field) return router_attr def attach_router(self, context, router_id, router, appliance_size=None): router_db = self.plugin._get_router(context, router_id) # Add DB attributes to the router data structure # before creating it as an exclusive router router_attr = self._build_router_data_from_db(router_db, router) allow_metadata = True if self.plugin.metadata_proxy_handler else False self.create_router(context, router_attr, allow_metadata=allow_metadata, appliance_size=appliance_size) edge_id = edge_utils.get_router_edge_id(context, router_id) LOG.debug("Exclusive router %s attached to edge %s", router_id, edge_id) # add all internal interfaces of the router on edge intf_net_ids = ( self.plugin._get_internal_network_ids_by_router(context, router_id)) with locking.LockManager.get_lock(edge_id): for network_id in intf_net_ids: address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_utils.update_internal_interface( self.nsx_v, context, router_id, network_id, address_groups, router_db.admin_state_up) # Update external interface (which also update nat rules, routes, etc) external_net_id = self._get_external_network_id_by_router(context, router_id) gw_info = None if (external_net_id): gw_info = {'network_id': external_net_id, 'enable_snat': router_db.enable_snat} self.plugin._update_router_gw_info( context, router_id, gw_info, force_update=True) def delete_router(self, context, router_id): if self.plugin.metadata_proxy_handler: # The neutron router was already deleted, so we cannot get the AZ # from it. Get it from the router-bindings DB edge_id, az_name = self.plugin._get_edge_id_and_az_by_rtr_id( context, router_id) md_proxy = self.plugin.get_metadata_proxy_handler(az_name) if md_proxy: md_proxy.cleanup_router_edge(context, router_id) self.edge_manager.delete_lrouter(context, router_id, dist=False) def update_routes(self, context, router_id, nexthop): with locking.LockManager.get_lock( self._get_router_edge_id(context, router_id)): self.plugin._update_routes(context, router_id, nexthop) @db_api.retry_db_errors def _update_router_gw_info(self, context, router_id, info, is_routes_update=False, force_update=False): router = self.plugin._get_router(context, router_id) org_ext_net_id = router.gw_port_id and router.gw_port.network_id org_enable_snat = router.enable_snat orgaddr, orgmask, orgnexthop = ( self.plugin._get_external_attachment_info( context, router)) super(nsx_v.NsxVPluginV2, self.plugin)._update_router_gw_info( context, router_id, info, router=router) new_ext_net_id = router.gw_port_id and router.gw_port.network_id new_enable_snat = router.enable_snat newaddr, newmask, newnexthop = ( self.plugin._get_external_attachment_info( context, router)) edge_id = self._get_router_edge_id(context, router_id) with locking.LockManager.get_lock(edge_id): if ((new_ext_net_id != org_ext_net_id or force_update) and orgnexthop): # network changed, so need to remove default gateway before # vnic can be configured LOG.debug("Delete default gateway %s", orgnexthop) edge_utils.clear_gateway(self.nsx_v, context, router_id) secondary = self.plugin._get_floatingips_by_router( context, router_id) # Update external vnic if addr or mask is changed if orgaddr != newaddr or orgmask != newmask or force_update: self.edge_manager.update_external_interface( self.nsx_v, context, router_id, new_ext_net_id, newaddr, newmask, secondary=secondary) # Update SNAT rules if ext net changed # or ext net not changed but snat is changed. if (new_ext_net_id != org_ext_net_id or (new_ext_net_id == org_ext_net_id and new_enable_snat != org_enable_snat) or force_update): self.plugin._update_nat_rules(context, router) if (new_ext_net_id != org_ext_net_id or new_enable_snat != org_enable_snat or is_routes_update or force_update): self.plugin._update_subnets_and_dnat_firewall(context, router) # Update static routes in all. self.plugin._update_routes(context, router_id, newnexthop) if new_ext_net_id or force_update: self._notify_after_router_edge_association(context, router) def add_router_interface(self, context, router_id, interface_info): self.plugin._check_intf_number_of_router(context, router_id) info = super(nsx_v.NsxVPluginV2, self.plugin).add_router_interface( context, router_id, interface_info) router_db = self.plugin._get_router(context, router_id) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] address_groups = self.plugin._get_address_groups( context, router_id, network_id) with locking.LockManager.get_lock( self._get_router_edge_id(context, router_id)): edge_utils.update_internal_interface( self.nsx_v, context, router_id, network_id, address_groups, router_db['admin_state_up']) # Update edge's firewall rules to accept subnets flows. self.plugin._update_subnets_and_dnat_firewall(context, router_db) if router_db.gw_port and router_db.enable_snat: # Update Nat rules on external edge vnic self.plugin._update_nat_rules(context, router_db) return info def remove_router_interface(self, context, router_id, interface_info): # If a loadbalancer is attached to this Edge appliance, we cannot # detach the subnet from the exclusive router. subnet = interface_info.get('subnet_id') if not subnet and interface_info.get('port_id'): port = self.plugin.get_port(context, interface_info['port_id']) port_subnets = [ fixed_ip['subnet_id'] for fixed_ip in port.get( 'fixed_ips', [])] subnet = port_subnets[0] if subnet and self._check_lb_on_subnet(context, subnet, router_id): error = _('Cannot delete router %(rtr)s interface while ' 'loadbalancers are provisioned on attached ' 'subnet %(subnet)s') % {'rtr': router_id, 'subnet': subnet} raise nsxv_exc.NsxPluginException(err_msg=error) info = super(nsx_v.NsxVPluginV2, self.plugin).remove_router_interface( context, router_id, interface_info) router_db = self.plugin._get_router(context, router_id) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] with locking.LockManager.get_lock( self._get_router_edge_id(context, router_id)): if router_db.gw_port and router_db.enable_snat: # First update nat rules self.plugin._update_nat_rules(context, router_db) ports = self.plugin._get_router_interface_ports_by_network( context, router_id, network_id) self.plugin._update_subnets_and_dnat_firewall(context, router_db) # No subnet on the network connects to the edge vnic if not ports: edge_utils.delete_interface(self.nsx_v, context, router_id, network_id, dist=False) else: address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_utils.update_internal_interface(self.nsx_v, context, router_id, network_id, address_groups) return info def _check_lb_on_subnet(self, context, subnet_id, router_id): # Check lbaas dev_owner_v1 = n_consts.DEVICE_OWNER_LOADBALANCER dev_owner_v2 = n_consts.DEVICE_OWNER_LOADBALANCERV2 dev_owner_oct = oct_const.DEVICE_OWNER_OCTAVIA filters = {'device_owner': [dev_owner_v1, dev_owner_v2, dev_owner_oct], 'fixed_ips': {'subnet_id': [subnet_id]}} ports = super(nsx_v.NsxVPluginV2, self.plugin).get_ports( context, filters=filters) edge_id = self._get_router_edge_id(context, router_id) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding_by_edge( context.session, edge_id) return (len(ports) >= 1) and lb_binding def _update_edge_router(self, context, router_id): router = self.plugin._get_router(context.elevated(), router_id) with locking.LockManager.get_lock( self._get_router_edge_id(context, router_id)): self.plugin._update_external_interface(context, router) self.plugin._update_nat_rules(context, router) self.plugin._update_subnets_and_dnat_firewall(context, router) def _get_router_edge_id(self, context, router_id): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) return binding['edge_id'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py0000644000175000017500000013707400000000000030473 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg from neutron.db import l3_db from neutron.db.models import l3 as l3_db_models from neutron.db import models_v2 from neutron_lib.api import validators from neutron_lib import constants from neutron_lib.db import api as db_api from neutron_lib.db import model_query from neutron_lib import exceptions as n_exc from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.db import nsxv_models from vmware_nsx.extensions import routersize from vmware_nsx.plugins.nsx_v.drivers import ( abstract_router_driver as router_driver) from vmware_nsx.plugins.nsx_v import md_proxy as nsx_v_md_proxy from vmware_nsx.plugins.nsx_v import plugin as nsx_v from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield import edge_utils LOG = logging.getLogger(__name__) class RouterSharedDriver(router_driver.RouterBaseDriver): def get_type(self): return "shared" def create_router(self, context, lrouter, appliance_size=None, allow_metadata=True): pass def _validate_no_routes(self, router): if (validators.is_attr_set(router.get('routes')) and len(router['routes']) > 0): msg = _("Cannot configure static routes on a shared router") raise n_exc.InvalidInput(error_message=msg) def _validate_no_size(self, router): if validators.is_attr_set(router.get(routersize.ROUTER_SIZE)): msg = _("Cannot specify router-size for shared router") raise n_exc.InvalidInput(error_message=msg) def update_router(self, context, router_id, router): r = router['router'] self._validate_no_routes(r) self._validate_no_size(r) # If only the name and or description are updated. We do not need to # update the backend. if set(['name', 'description']) >= set(r.keys()): return super(nsx_v.NsxVPluginV2, self.plugin).update_router( context, router_id, router) edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: return super(nsx_v.NsxVPluginV2, self.plugin).update_router( context, router_id, router) else: with locking.LockManager.get_lock(str(edge_id)): gw_info = self.plugin._extract_external_gw( context, router, is_extract=True) super(nsx_v.NsxVPluginV2, self.plugin).update_router( context, router_id, router) if gw_info != constants.ATTR_NOT_SPECIFIED: self.plugin._update_router_gw_info(context, router_id, gw_info) if 'admin_state_up' in r: # If router was deployed on a different edge then # admin-state-up is already updated on the new edge. current_edge_id = ( edge_utils.get_router_edge_id(context, router_id)) if current_edge_id == edge_id: self.plugin._update_router_admin_state(context, router_id, self.get_type(), r['admin_state_up']) return self.plugin.get_router(context, router_id) def detach_router(self, context, router_id, router): LOG.debug("Detach shared router id %s", router_id) # if it is the last shared router on this edge - add it to the pool edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: return router_db = self.plugin._get_router(context, router_id) self._notify_before_router_edge_association(context, router_db) with locking.LockManager.get_lock(str(edge_id)): self._remove_router_services_on_edge(context, router_id) with locking.LockManager.get_lock('nsx-shared-router-pool'): self._unbind_router_on_edge(context, router_id) def attach_router(self, context, router_id, router, appliance_size=None): # find the right place to add, and create a new one if necessary router_db = self.plugin._get_router(context, router_id) self._bind_router_on_available_edge( context, router_id, router_db.admin_state_up) edge_id = edge_utils.get_router_edge_id(context, router_id) LOG.debug("Shared router %s attached to edge %s", router_id, edge_id) with locking.LockManager.get_lock(str(edge_id)): self._add_router_services_on_available_edge(context, router_id) self._notify_after_router_edge_association(context, router_db) def delete_router(self, context, router_id): # make sure that the router binding is cleaned up try: nsxv_db.delete_nsxv_router_binding(context.session, router_id) except Exception as e: LOG.debug('Unable to delete router binding for %s. Error: ' '%s', router_id, e) def _get_router_routes(self, context, router_id): return self.plugin._get_extra_routes_by_router_id( context, router_id) def _get_router_next_hop(self, context, router_id): router_qry = context.session.query(l3_db_models.Router) router_db = router_qry.filter_by(id=router_id).one() return self.plugin._get_external_attachment_info( context, router_db)[2] def _update_routes_on_routers(self, context, target_router_id, router_ids, only_if_target_routes=False): if only_if_target_routes: # First check if the target router has any routes or next hop # If not - it means that nothing changes so we can skip this # backend call target_routes = self._get_router_routes(context, target_router_id) target_next_hop = self._get_router_next_hop( context, target_router_id) if not target_routes and not target_next_hop: LOG.debug("_update_routes_on_routers skipped since router %s " "has no routes", target_router_id) return nexthop = None all_routes = [] for router_id in router_ids: routes = self._get_router_routes(context, router_id) filters = {'device_id': [router_id]} ports = self.plugin.get_ports(context.elevated(), filters) self.plugin._add_network_info_for_routes(context, routes, ports) all_routes.extend(routes) if not nexthop: router_nexthop = self._get_router_next_hop(context, router_id) if router_nexthop: nexthop = router_nexthop # TODO(berlin) do rollback op. edge_utils.update_routes(self.nsx_v, context, target_router_id, all_routes, nexthop) # return a dict of each router -> list of vnics from the other routers def _get_all_routers_vnic_indices(self, context, router_ids): all_vnic_indices = {} if len(router_ids) < 1: # there are no routers return all_vnic_indices intf_ports = self.plugin.get_ports( context.elevated(), filters={'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]}) edge_id = edge_utils.get_router_edge_id(context, router_ids[0]) edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) for this_router_id in router_ids: # get networks IDs for this router router_net_ids = list( set([port['network_id'] for port in intf_ports if port['device_id'] == this_router_id])) # get vnic index for each network vnic_indices = [] for net_id in router_net_ids: vnic_indices.extend([edge_vnic_binding.vnic_index for edge_vnic_binding in edge_vnic_bindings if edge_vnic_binding.network_id == net_id ]) # make sure the list is unique: vnic_indices = list(set(vnic_indices)) # add to the result dict all_vnic_indices[this_router_id] = list(vnic_indices) return all_vnic_indices def update_nat_rules(self, context, router, router_id): edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) self._update_nat_rules_on_routers(context, router_id, router_ids) def _update_nat_rules_on_routers(self, context, target_router_id, router_ids): edge_id, az_name = self.plugin._get_edge_id_and_az_by_rtr_id( context, target_router_id) az = self._availability_zones.get_availability_zone(az_name) snats = [] dnats = [] vnics_by_router = self._get_all_routers_vnic_indices( context, router_ids) for router_id in router_ids: router_qry = context.session.query(l3_db_models.Router) router = router_qry.filter_by(id=router_id).one() if router.gw_port: snat, dnat = self.plugin._get_nat_rules(context, router) snats.extend(snat) dnats.extend(dnat) if (not az.bind_floatingip_to_all_interfaces and len(dnat) > 0): # Copy each DNAT rule to all vnics of the other routers, # to allow NAT-ed traffic between routers # no need for that if bind_floatingip_to_all_interfaces # is on (default) other_vnics = [] for other_router_id in router_ids: if other_router_id != router_id: other_vnics.extend( vnics_by_router[other_router_id]) for rule in dnat: for vnic_index in other_vnics: new_rule = rule.copy() # use explicit vnic_index new_rule['vnic_index'] = vnic_index dnats.extend([new_rule]) edge_utils.update_nat_rules( self.nsx_v, context, target_router_id, snats, dnats, az=az) def _update_external_interface_on_routers(self, context, target_router_id, router_ids): ext_net_ids = self._get_ext_net_ids(context, router_ids) if len(ext_net_ids) > 1: LOG.error("Can't configure external interface on multiple " "external networks %(networks)s for routers %(routers)s", {'networks': ext_net_ids, 'routers': router_ids}) msg = _("Can't configure external interface on multiple external " "networks") raise nsx_exc.NsxPluginException(err_msg=msg) gateway_primary_addr = None gateway_mask = None gateway_nexthop = None secondary = [] if not ext_net_ids: ext_net_id = None else: ext_net_id = ext_net_ids[0] for router_id in router_ids: router_qry = context.session.query(l3_db_models.Router) router = router_qry.filter_by(id=router_id).one() addr, mask, nexthop = self.plugin._get_external_attachment_info( context, router) if addr: if not gateway_primary_addr: gateway_primary_addr = addr else: secondary.append(addr) if mask and not gateway_mask: gateway_mask = mask if nexthop and not gateway_nexthop: gateway_nexthop = nexthop secondary.extend(self.plugin._get_floatingips_by_router( context, router_id)) LOG.debug('Configure ext interface as following, ext_net: %s, ' 'primaryAddress: %s, netmask: %s, nexthop: %s, secondary: ' '%s.', ext_net_id, gateway_primary_addr, gateway_mask, gateway_nexthop, secondary) self.edge_manager.update_external_interface( self.nsx_v, context, target_router_id, ext_net_id, gateway_primary_addr, gateway_mask, secondary) def _update_subnets_and_dnat_firewall_on_routers(self, context, target_router_id, router_ids, allow_external=True): fw_rules = [] for router_id in router_ids: # Add FW rules per single router router_qry = context.session.query(l3_db_models.Router) router = router_qry.filter_by(id=router_id).one() # subnet rules to allow east-west traffic subnet_rules = self.plugin._get_subnet_fw_rules(context, router) if subnet_rules: fw_rules.extend(subnet_rules) # DNAT rules dnat_rule = self.plugin._get_dnat_fw_rule(context, router) if dnat_rule: fw_rules.append(dnat_rule) # Add rule for not NAT-ed allocation pools alloc_pool_rule = self.plugin._get_allocation_pools_fw_rule( context, router) if alloc_pool_rule: fw_rules.append(alloc_pool_rule) # Add no-snat rules nosnat_fw_rules = self.plugin._get_nosnat_subnets_fw_rules( context, router) fw_rules.extend(nosnat_fw_rules) # If metadata service is enabled, block access to inter-edge network if self.plugin.metadata_proxy_handler: fw_rules += nsx_v_md_proxy.get_router_fw_rules() # TODO(asarfaty): Add fwaas rules when fwaas supports shared routers fw = {'firewall_rule_list': fw_rules} edge_utils.update_firewall(self.nsx_v, context, target_router_id, fw, allow_external=allow_external) def update_routes(self, context, router_id, nexthop): edge_id = edge_utils.get_router_edge_id(context, router_id) if edge_id: router_db = self.plugin._get_router(context, router_id) available_router_ids, conflict_router_ids = ( self._get_available_and_conflicting_ids(context, router_id)) is_conflict = self.edge_manager.is_router_conflict_on_edge( context, router_id, conflict_router_ids, [], 0) if is_conflict: self._notify_before_router_edge_association(context, router_db) with locking.LockManager.get_lock(str(edge_id)): self._remove_router_services_on_edge(context, router_id) with locking.LockManager.get_lock( 'nsx-shared-router-pool'): self._unbind_router_on_edge(context, router_id) self._bind_router_on_available_edge( context, router_id, router_db.admin_state_up) new_edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(new_edge_id)): self._add_router_services_on_available_edge(context, router_id) self._notify_after_router_edge_association(context, router_db) else: with locking.LockManager.get_lock(str(edge_id)): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) if router_ids: self._update_routes_on_routers( context, router_id, router_ids) def _get_ext_net_ids(self, context, router_ids): ext_net_ids = [] for router_id in router_ids: router_qry = context.session.query(l3_db_models.Router) router_db = router_qry.filter_by(id=router_id).one() ext_net_id = router_db.gw_port_id and router_db.gw_port.network_id if ext_net_id and ext_net_id not in ext_net_ids: ext_net_ids.append(ext_net_id) return ext_net_ids def _get_shared_routers(self, context): shared_routers = [] routers_qry = context.session.query(l3_db_models.Router).all() for r in routers_qry: nsx_attr = (context.session.query( nsxv_models.NsxvRouterExtAttributes).filter_by( router_id=r['id']).first()) if nsx_attr and nsx_attr['router_type'] == 'shared': shared_routers.append(r) return shared_routers def _get_available_and_conflicting_ids(self, context, router_id): """Query all conflicting router ids with existing router id. The router with static routes will be conflict with all other routers. The routers with different gateway will be conflict. The routers with overlapping interface will be conflict. In not share_edges_between_tenants: The routers of different tenants will be in conflict with the router """ # 1. Check gateway # 2. Check subnet interface # 3. Check static routes router_list = [] src_router_dict = {} ports_qry = context.session.query(models_v2.Port) intf_ports = ports_qry.filter_by( device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF).all() gw_ports = ports_qry.filter_by( device_owner=l3_db.DEVICE_OWNER_ROUTER_GW).all() shared_routers = self._get_shared_routers(context) for r in shared_routers: router_dict = {} router_dict['id'] = r['id'] router_dict['gateway'] = None router_dict['tenant_id'] = r['tenant_id'] for gwp in gw_ports: if gwp['id'] == r['gw_port_id']: try: router_dict['gateway'] = ( gwp['fixed_ips'][0]['subnet_id']) except IndexError: LOG.error("Skipping GW port %s with no fixed IP", gwp['id']) subnet_ids = [p['fixed_ips'][0]['subnet_id'] for p in intf_ports if p['device_id'] == r['id']] router_dict['subnet_ids'] = subnet_ids extra_routes = self.plugin._get_extra_routes_by_router_id( context, r['id']) destinations = [routes['destination'] for routes in extra_routes] router_dict['destinations'] = destinations LOG.debug('The router configuration is %s for router %s', router_dict, router_dict['id']) if router_id != r['id']: router_list.append(router_dict) else: src_router_dict = router_dict # Router with static routes is conflict with other routers available_routers = [] conflict_routers = [] if src_router_dict['destinations'] != []: conflict_routers = [r['id'] for r in router_list] return (available_routers, conflict_routers) subnets_qry = context.session.query(models_v2.Subnet).all() conflict_cidr_set = [] for subnet in subnets_qry: if subnet['id'] in src_router_dict['subnet_ids']: conflict_cidr_set.append(subnet['cidr']) if (src_router_dict['gateway'] is not None and subnet['id'] == src_router_dict['gateway']): conflict_cidr_set.append(subnet['cidr']) conflict_ip_set = netaddr.IPSet(conflict_cidr_set) # Check conflict router ids with gateway and interface for r in router_list: if r['destinations'] != []: conflict_routers.append(r['id']) else: cidr_set = [] for subnet in subnets_qry: if subnet['id'] in r['subnet_ids']: cidr_set.append(subnet['cidr']) ip_set = netaddr.IPSet(cidr_set) if (src_router_dict['gateway'] is None or r['gateway'] is None or src_router_dict['gateway'] == r['gateway']): if (conflict_ip_set & ip_set): conflict_routers.append(r['id']) else: if (not cfg.CONF.nsxv.share_edges_between_tenants and src_router_dict['tenant_id'] != r['tenant_id']): # routers of other tenants are conflicting conflict_routers.append(r['id']) else: available_routers.append(r['id']) else: conflict_routers.append(r['id']) return (available_routers, conflict_routers) def _get_conflict_network_and_router_ids_by_intf(self, context, router_id): """Collect conflicting networks and routers based on interface ports. Collect conflicting networks which has overlapping subnet attached to another router. Collect conflict routers which has overlap network attached to it. Returns: conflict_network_ids: networks which has overlapping ips conflict_router_ids: routers which has overlapping interfaces intf_num: interfaces number attached on the router """ conflict_network_ids = [] conflict_router_ids = [] ports_qry = context.session.query(models_v2.Port) intf_ports = ports_qry.filter_by( device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF).all() router_net_ids = list( set([port['network_id'] for port in intf_ports if port['device_id'] == router_id])) if cfg.CONF.allow_overlapping_ips: router_intf_ports = [port for port in intf_ports if port['device_id'] == router_id] subnet_ids = [] for port in router_intf_ports: subnet_ids.append(port['fixed_ips'][0]['subnet_id']) subnets_qry = context.session.query(models_v2.Subnet).all() subnets = [subnet for subnet in subnets_qry if subnet['id'] in subnet_ids] conflict_network_ids.extend( self.plugin._get_conflict_network_ids_by_overlapping( context, subnets)) other_router_ports = [port for port in intf_ports if port['device_id'] != router_id] for port in other_router_ports: if port['network_id'] in router_net_ids: conflict_router_ids.append(port['device_id']) conflict_router_ids = list(set(conflict_router_ids)) conflict_network_ids = list(set(conflict_network_ids)) intf_num = len(router_net_ids) return (conflict_network_ids, conflict_router_ids, intf_num) def _get_conflict_network_ids_by_ext_net(self, context, router_id): """Collect conflicting networks based on external network. Collect conflicting networks which has overlapping subnet with the router's external network """ conflict_network_ids = [] ext_net_id = self._get_external_network_id_by_router(context, router_id) if ext_net_id: ext_net = self.plugin._get_network(context, ext_net_id) if ext_net.subnets: ext_subnet = ext_net.subnets[0] if ext_subnet: conflict_network_ids.extend( self.plugin._get_conflict_network_ids_by_overlapping( context, [ext_subnet])) return conflict_network_ids def _get_conflict_router_ids_by_ext_net(self, context, conflict_network_ids): """Collect conflict routers based on its external network. Collect conflict router if it has external network and the external network is in conflict_network_ids """ ext_net_filters = {'router:external': [True]} ext_nets = self.plugin.get_networks( context.elevated(), filters=ext_net_filters) ext_net_ids = [ext_net.get('id') for ext_net in ext_nets] conflict_ext_net_ids = list(set(ext_net_ids) & set(conflict_network_ids)) gw_ports_filter = {'network_id': conflict_ext_net_ids, 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_GW]} ports_qry = context.session.query(models_v2.Port) gw_ports = model_query.apply_filters( ports_qry, models_v2.Port, gw_ports_filter).all() return list(set([gw_port['device_id'] for gw_port in gw_ports])) def _get_optional_and_conflict_router_ids_by_gw(self, context, router_id): """Collect conflict routers and optional routers based on GW port. Collect conflict router if it has different external network, else, collect optional router if it is not distributed and exclusive Returns: optional_router_ids: routers we can use its edge for the shared router. conflict_router_ids: conflict routers which has different gateway """ ext_net_id = self._get_external_network_id_by_router(context, router_id) routers = context.session.query(l3_db_models.Router).all() optional_router_ids = [] conflict_router_ids = [] if ext_net_id: ports_qry = context.session.query(models_v2.Port) all_gw_ports = ports_qry.filter_by( device_owner=l3_db.DEVICE_OWNER_ROUTER_GW).all() metadata_nets = nsxv_db.get_nsxv_internal_networks( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE) metadata_net_ids = [metadata_net['network_id'] for metadata_net in metadata_nets] # filter out metadata gw_ports all_gw_ports = [gw_port for gw_port in all_gw_ports if gw_port['network_id'] not in metadata_net_ids] for gw_port in all_gw_ports: if gw_port and gw_port['network_id'] != ext_net_id: conflict_router_ids.append(gw_port['device_id']) for router in routers: router_res = {} self.plugin._extend_nsx_router_dict(router_res, router) if (router['id'] not in conflict_router_ids and router_res.get('router_type') == 'shared'): optional_router_ids.append(router['id']) return optional_router_ids, conflict_router_ids def _bind_router_on_available_edge(self, context, router_id, admin_state): with locking.LockManager.get_lock('nsx-shared-router-pool'): conflict_network_ids, conflict_router_ids, intf_num = ( self._get_conflict_network_and_router_ids_by_intf(context, router_id)) conflict_network_ids_by_ext_net = ( self._get_conflict_network_ids_by_ext_net(context, router_id)) conflict_network_ids.extend(conflict_network_ids_by_ext_net) optional_router_ids, new_conflict_router_ids = ( self._get_available_and_conflicting_ids(context, router_id)) conflict_router_ids.extend(new_conflict_router_ids) conflict_router_ids = list(set(conflict_router_ids)) az, flavor_id = self.get_router_az_and_flavor_by_id(context, router_id) new = self.edge_manager.bind_router_on_available_edge( context, router_id, optional_router_ids, conflict_router_ids, conflict_network_ids, intf_num, az) # configure metadata service on the router. if self.plugin.metadata_proxy_handler and new: md_proxy_handler = self.plugin.get_metadata_proxy_handler( az.name) if md_proxy_handler: md_proxy_handler.configure_router_edge(context, router_id) edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): # add all internal interfaces of the router on edge intf_net_ids = ( self.plugin._get_internal_network_ids_by_router(context, router_id)) for network_id in intf_net_ids: address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_utils.update_internal_interface( self.nsx_v, context, router_id, network_id, address_groups, admin_state) if flavor_id: # if several routers share same edge, they might have # different flavors with conflicting syslog settings. # in this case, each new router association will override # previous syslog settings on the edge self.edge_manager.update_syslog_by_flavor(context, router_id, flavor_id, edge_id) LOG.info("Binding shared router %(rtr)s: edge %(edge)s", {'rtr': router_id, 'edge': edge_id}) def _unbind_router_on_edge(self, context, router_id): az = self.get_router_az_by_id(context, router_id) edge_id = edge_utils.get_router_edge_id(context, router_id) self.edge_manager.reconfigure_shared_edge_metadata_port( context, router_id) self.edge_manager.unbind_router_on_edge(context, router_id) if self.plugin.metadata_proxy_handler: metadata_proxy_handler = self.plugin.get_metadata_proxy_handler( az.name) if metadata_proxy_handler: metadata_proxy_handler.cleanup_router_edge(context, router_id) LOG.info("Unbinding shared router %(rtr)s: edge %(edge)s", {'rtr': router_id, 'edge': edge_id}) def _add_router_services_on_available_edge(self, context, router_id): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) self._update_external_interface_on_routers( context, router_id, router_ids) self._update_routes_on_routers(context, router_id, router_ids, only_if_target_routes=True) self._update_nat_rules_on_routers(context, router_id, router_ids) self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) def _remove_router_services_on_edge(self, context, router_id, intf_net_id=None): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) router_ids.remove(router_id) # Refresh firewall, nats, ext_vnic as well as static routes self._update_routes_on_routers(context, router_id, router_ids, only_if_target_routes=True) self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) self._update_nat_rules_on_routers(context, router_id, router_ids) self._update_external_interface_on_routers( context, router_id, router_ids) intf_net_ids = ( self.plugin._get_internal_network_ids_by_router(context, router_id)) if intf_net_id: intf_net_ids.remove(intf_net_id) for net_id in intf_net_ids: edge_utils.delete_interface(self.nsx_v, context, router_id, net_id) @db_api.retry_db_errors def _update_router_gw_info(self, context, router_id, info, is_routes_update=False, force_update=False): router = self.plugin._get_router(context, router_id) edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: super(nsx_v.NsxVPluginV2, self.plugin)._update_router_gw_info( context, router_id, info, router=router) # UPDATE gw info only if the router has been attached to an edge else: is_migrated = False router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) org_ext_net_id = (router.gw_port_id and router.gw_port.network_id) org_enable_snat = router.enable_snat orgaddr, orgmask, orgnexthop = ( self.plugin._get_external_attachment_info( context, router)) super(nsx_v.NsxVPluginV2, self.plugin)._update_router_gw_info( context, router_id, info, router=router) new_ext_net_id = (router.gw_port_id and router.gw_port.network_id) new_enable_snat = router.enable_snat newaddr, newmask, newnexthop = ( self.plugin._get_external_attachment_info(context, router)) with locking.LockManager.get_lock(str(edge_id)): if new_ext_net_id and new_ext_net_id != org_ext_net_id: # Check whether the gw address has overlapping # with networks attached to the same edge conflict_network_ids = ( self._get_conflict_network_ids_by_ext_net( context, router_id)) is_migrated = self.edge_manager.is_router_conflict_on_edge( context, router_id, [], conflict_network_ids) if is_migrated: self._remove_router_services_on_edge(context, router_id) with locking.LockManager.get_lock( 'nsx-shared-router-pool'): self._unbind_router_on_edge(context, router_id) if not is_migrated: ext_net_ids = self._get_ext_net_ids(context, router_ids) if len(ext_net_ids) > 1: # move all routing service of the router from existing # edge to a new available edge if new_ext_net_id is # changed. self._remove_router_services_on_edge(context, router_id) with locking.LockManager.get_lock( 'nsx-shared-router-pool'): self._unbind_router_on_edge(context, router_id) is_migrated = True else: updated_routes = False # Update external vnic if addr or mask is changed if orgaddr != newaddr or orgmask != newmask: # If external gateway is removed, the default # gateway should be cleared before updating the # interface, or else the backend will fail. if (new_ext_net_id != org_ext_net_id and new_ext_net_id is None): self._update_routes_on_routers( context, router_id, router_ids) updated_routes = True self._update_external_interface_on_routers( context, router_id, router_ids) # Update SNAT rules if ext net changed # or ext net not changed but snat is changed. if ((new_ext_net_id != org_ext_net_id) or (new_ext_net_id == org_ext_net_id and new_enable_snat != org_enable_snat)): self._update_nat_rules_on_routers(context, router_id, router_ids) if (new_ext_net_id != org_ext_net_id or new_enable_snat != org_enable_snat): self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) # Update static routes in all (if not updated yet). if not updated_routes: self._update_routes_on_routers( context, router_id, router_ids) if is_migrated: self._notify_before_router_edge_association(context, router, edge_id) self._bind_router_on_available_edge( context, router_id, router.admin_state_up) edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): self._add_router_services_on_available_edge(context, router_id) self._notify_after_router_edge_association(context, router) def _base_add_router_interface(self, context, router_id, interface_info): with locking.LockManager.get_lock('nsx-shared-router-pool'): return super(nsx_v.NsxVPluginV2, self.plugin).add_router_interface( context, router_id, interface_info) def add_router_interface(self, context, router_id, interface_info): # Lock the shared router before any action that can cause the router # to be deployed on a new edge. with locking.LockManager.get_lock('router-%s' % router_id): return self._safe_add_router_interface(context, router_id, interface_info) def _safe_add_router_interface(self, context, router_id, interface_info): self.plugin._check_intf_number_of_router(context, router_id) edge_id = edge_utils.get_router_edge_id(context, router_id) router_db = self.plugin._get_router(context, router_id) if edge_id: is_migrated = False with locking.LockManager.get_lock('nsx-shared-router-pool'): info = super(nsx_v.NsxVPluginV2, self.plugin).add_router_interface( context, router_id, interface_info) with locking.LockManager.get_lock(str(edge_id)): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] # Collect all conflict networks whose cidr are overlapped # with networks attached to the router and conflict routers # which has same network with the router's. conflict_network_ids, conflict_router_ids, _ = ( self._get_conflict_network_and_router_ids_by_intf( context, router_id)) _, new_conflict_router_ids = ( self._get_available_and_conflicting_ids(context, router_id)) conflict_router_ids.extend(new_conflict_router_ids) conflict_router_ids = list(set(conflict_router_ids)) interface_ports = ( self.plugin._get_router_interface_ports_by_network( context, router_id, network_id)) # Consider whether another subnet of the same network # has been attached to the router. if len(interface_ports) > 1: is_conflict = ( self.edge_manager.is_router_conflict_on_edge( context, router_id, conflict_router_ids, conflict_network_ids, 0)) else: is_conflict = ( self.edge_manager.is_router_conflict_on_edge( context, router_id, conflict_router_ids, conflict_network_ids, 1)) if not is_conflict: address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_utils.update_internal_interface( self.nsx_v, context, router_id, network_id, address_groups, router_db.admin_state_up) if router_db.gw_port and router_db.enable_snat: self._update_nat_rules_on_routers( context, router_id, router_ids) self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) if is_conflict: self._notify_before_router_edge_association( context, router_db, edge_id) with locking.LockManager.get_lock(str(edge_id)): if len(interface_ports) > 1: self._remove_router_services_on_edge( context, router_id) else: self._remove_router_services_on_edge( context, router_id, network_id) self._unbind_router_on_edge(context, router_id) is_migrated = True if is_migrated: self._bind_router_on_available_edge( context, router_id, router_db.admin_state_up) edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): self._add_router_services_on_available_edge(context, router_id) self._notify_after_router_edge_association(context, router_db) else: info = self._base_add_router_interface(context, router_id, interface_info) # bind and configure routing service on an available edge self._bind_router_on_available_edge( context, router_id, router_db.admin_state_up) edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): self._add_router_services_on_available_edge(context, router_id) self._notify_after_router_edge_association(context, router_db) return info def remove_router_interface(self, context, router_id, interface_info): # Lock the shared router before any action that can cause the router # to be deployed on a new edge with locking.LockManager.get_lock('router-%s' % router_id): return self._safe_remove_router_interface(context, router_id, interface_info) def _safe_remove_router_interface(self, context, router_id, interface_info): edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock('nsx-shared-router-pool'): info = super( nsx_v.NsxVPluginV2, self.plugin).remove_router_interface( context, router_id, interface_info) subnet = self.plugin.get_subnet(context, info['subnet_id']) network_id = subnet['network_id'] ports = self.plugin._get_router_interface_ports_by_network( context, router_id, network_id) connected_networks = ( self.plugin._get_internal_network_ids_by_router(context, router_id)) if not ports and not connected_networks: router = self.plugin._get_router(context, router_id) self._notify_before_router_edge_association(context, router) with locking.LockManager.get_lock(str(edge_id)): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) self._update_nat_rules_on_routers(context, router_id, router_ids) self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) if not ports: edge_utils.delete_interface(self.nsx_v, context, router_id, network_id) # unbind all services if no interfaces attached to the # router if not connected_networks: self._remove_router_services_on_edge(context, router_id) self._unbind_router_on_edge(context, router_id) else: address_groups = self.plugin._get_address_groups( context, router_id, network_id) edge_utils.update_internal_interface(self.nsx_v, context, router_id, network_id, address_groups) return info def _update_edge_router(self, context, router_id): edge_id = edge_utils.get_router_edge_id(context, router_id) with locking.LockManager.get_lock(str(edge_id)): router_ids = self.edge_manager.get_routers_on_same_edge( context, router_id) if router_ids: self._update_external_interface_on_routers( context, router_id, router_ids) self._update_nat_rules_on_routers( context, router_id, router_ids) self._update_subnets_and_dnat_firewall_on_routers( context, router_id, router_ids, allow_external=True) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2022538 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/housekeeper/0000755000175000017500000000000000000000000024525 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/housekeeper/__init__.py0000644000175000017500000000000000000000000026624 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/housekeeper/error_backup_edge.py0000644000175000017500000001172000000000000030542 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from oslo_log import log from sqlalchemy.orm import exc as sa_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const LOG = log.getLogger(__name__) class ErrorBackupEdgeJob(base_job.BaseJob): def __init__(self, global_readonly, readonly_jobs): super(ErrorBackupEdgeJob, self).__init__( global_readonly, readonly_jobs) self.azs = nsx_az.NsxVAvailabilityZones() def get_project_plugin(self, plugin): return plugin.get_plugin_by_type(projectpluginmap.NsxPlugins.NSX_V) def get_name(self): return 'error_backup_edge' def get_description(self): return 'revalidate backup Edge appliances in ERROR state' def run(self, context, readonly=False): super(ErrorBackupEdgeJob, self).run(context) error_count = 0 fixed_count = 0 error_info = '' # Gather ERROR state backup edges into dict filters = {'status': [constants.ERROR]} like_filters = {'router_id': vcns_const.BACKUP_ROUTER_PREFIX + "%"} with locking.LockManager.get_lock('nsx-edge-backup-pool'): error_edge_bindings = nsxv_db.get_nsxv_router_bindings( context.session, filters=filters, like_filters=like_filters) if not error_edge_bindings: LOG.debug('Housekeeping: no backup edges in ERROR state detected') return {'error_count': 0, 'fixed_count': 0, 'error_info': 'No backup edges in ERROR state detected'} # Keep list of current broken backup edges - as it may change while # HK is running for binding in error_edge_bindings: error_count += 1 error_info = base_job.housekeeper_warning( error_info, 'Backup Edge appliance %s is in ERROR state', binding['edge_id']) if not readonly: with locking.LockManager.get_lock(binding['edge_id']): if self._handle_backup_edge(context, binding): fixed_count += 1 return {'error_count': error_count, 'fixed_count': fixed_count, 'error_info': error_info} def _handle_backup_edge(self, context, binding): dist = (binding['edge_type'] == nsxv_constants.VDR_EDGE) result = True az = self.azs.get_availability_zone( binding['availability_zone']) try: update_result = self.plugin.nsx_v.update_edge( context, binding['router_id'], binding['edge_id'], binding['router_id'], None, appliance_size=binding['appliance_size'], dist=dist, availability_zone=az) if update_result: nsxv_db.update_nsxv_router_binding( context.session, binding['router_id'], status=constants.ACTIVE) except Exception as e: LOG.error('Housekeeping: failed to recover Edge ' 'appliance %s with exception %s', binding['edge_id'], e) update_result = False if not update_result: LOG.warning('Housekeeping: failed to recover Edge ' 'appliance %s, trying to delete', binding['edge_id']) result = self._delete_edge(context, binding, dist) return result def _delete_edge(self, context, binding, dist): try: nsxv_db.update_nsxv_router_binding( context.session, binding['router_id'], status=constants.PENDING_DELETE) except sa_exc.NoResultFound: LOG.debug("Housekeeping: Router binding %s does not exist.", binding['router_id']) try: self.plugin.nsx_v.delete_edge(context, binding['router_id'], binding['edge_id'], dist=dist) return True except Exception as e: LOG.warning('Housekeeping: Failed to delete edge %s with ' 'exception %s', binding['edge_id'], e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/housekeeper/error_dhcp_edge.py0000644000175000017500000003330200000000000030213 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from oslo_log import log from oslo_utils import uuidutils from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const LOG = log.getLogger(__name__) class ErrorDhcpEdgeJob(base_job.BaseJob): def __init__(self, global_readonly, readonly_jobs): super(ErrorDhcpEdgeJob, self).__init__(global_readonly, readonly_jobs) self.error_count = 0 self.fixed_count = 0 self.fixed_sub_if_count = 0 self.error_info = '' def get_project_plugin(self, plugin): return plugin.get_plugin_by_type(projectpluginmap.NsxPlugins.NSX_V) def get_name(self): return 'error_dhcp_edge' def get_description(self): return 'revalidate DHCP Edge appliances in ERROR state' def run(self, context, readonly=False): super(ErrorDhcpEdgeJob, self).run(context) self.error_count = 0 self.fixed_count = 0 self.fixed_sub_if_count = 0 self.error_info = '' # Gather ERROR state DHCP edges into dict filters = {'status': [constants.ERROR]} error_edge_bindings = nsxv_db.get_nsxv_router_bindings( context.session, filters=filters) if not error_edge_bindings: LOG.debug('Housekeeping: no DHCP edges in ERROR state detected') return {'error_count': self.error_count, 'fixed_count': self.fixed_count, 'error_info': 'No DHCP error state edges detected'} with locking.LockManager.get_lock('nsx-dhcp-edge-pool'): edge_dict = {} for binding in error_edge_bindings: if binding['router_id'].startswith( vcns_const.DHCP_EDGE_PREFIX): bind_list = edge_dict.get(binding['edge_id'], []) bind_list.append(binding) edge_dict[binding['edge_id']] = bind_list # Get valid neutron networks and create a prefix dict. networks = [net['id'] for net in self.plugin.get_networks(context, fields=['id'])] pfx_dict = {net[:36 - len(vcns_const.DHCP_EDGE_PREFIX)]: net for net in networks} for edge_id in edge_dict.keys(): try: self._validate_dhcp_edge( context, edge_dict, pfx_dict, networks, edge_id, readonly) except Exception as e: self.error_count += 1 self.error_info = base_job.housekeeper_warning( self.error_info, 'Failed to recover DHCP Edge %s (%s)', edge_id, e) return {'error_count': self.error_count, 'fixed_count': self.fixed_count, 'error_info': self.error_info} def _validate_dhcp_edge( self, context, edge_dict, pfx_dict, networks, edge_id, readonly): # Also metadata network should be a valid network for the edge az_name = self.plugin.get_availability_zone_name_by_edge(context, edge_id) with locking.LockManager.get_lock(edge_id): vnic_binds = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) edge_networks = [bind['network_id'] for bind in vnic_binds] # Step (A) # Find router bindings which are mapped to dead networks, or # do not have interfaces registered in nsxv tables for binding in edge_dict[edge_id]: router_id = binding['router_id'] net_pfx = router_id[len(vcns_const.DHCP_EDGE_PREFIX):] net_id = pfx_dict.get(net_pfx) if net_id is None: # Delete router binding as we do not have such network # in Neutron self.error_count += 1 self.error_info = base_job.housekeeper_warning( self.error_info, 'router binding %s for edge %s has no matching ' 'neutron network', router_id, edge_id) if not readonly: nsxv_db.delete_nsxv_router_binding( context.session, binding['router_id']) self.fixed_count += 1 else: if net_id not in edge_networks: # Create vNic bind here self.error_count += 1 self.error_info = base_job.housekeeper_warning( self.error_info, 'edge %s vnic binding missing for network %s', edge_id, net_id) if not readonly: nsxv_db.allocate_edge_vnic_with_tunnel_index( context.session, edge_id, net_id, az_name) self.fixed_count += 1 # Step (B) # Find vNic bindings which reference invalid networks or aren't # bound to any router binding # Reread vNic binds as we might created more or deleted some in # step (A) vnic_binds = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) for bind in vnic_binds: if bind['network_id'] not in networks: self.error_count += 1 self.error_info = base_job.housekeeper_warning( self.error_info, 'edge vnic binding for edge %s is for invalid ' 'network id %s', edge_id, bind['network_id']) if not readonly: nsxv_db.free_edge_vnic_by_network( context.session, edge_id, bind['network_id']) self.fixed_count += 1 # Step (C) # Verify that backend is in sync with Neutron # Reread vNic binds as we might deleted some in step (B) vnic_binds = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) # Transform to network-keyed dict vnic_dict = {vnic['network_id']: { 'vnic_index': vnic['vnic_index'], 'tunnel_index': vnic['tunnel_index'] } for vnic in vnic_binds} backend_vnics = self.plugin.nsx_v.vcns.get_interfaces( edge_id)[1].get('vnics', []) if_changed = {} self._validate_edge_subinterfaces( context, edge_id, backend_vnics, vnic_dict, if_changed) self._add_missing_subinterfaces( context, edge_id, vnic_binds, backend_vnics, if_changed, readonly) if not readonly: for vnic in backend_vnics: if if_changed[vnic['index']]: self.plugin.nsx_v.vcns.update_interface(edge_id, vnic) self._update_router_bindings(context, edge_id) self.fixed_count += self.fixed_sub_if_count def _validate_edge_subinterfaces(self, context, edge_id, backend_vnics, vnic_dict, if_changed): # Validate that all the interfaces on the Edge # appliance are registered in nsxv_edge_vnic_bindings for vnic in backend_vnics: if_changed[vnic['index']] = False if (vnic['isConnected'] and vnic['type'] == 'trunk' and vnic['subInterfaces']): for sub_if in vnic['subInterfaces']['subInterfaces']: # Subinterface name field contains the net id vnic_bind = vnic_dict.get(sub_if['logicalSwitchName']) if (vnic_bind and vnic_bind['vnic_index'] == vnic['index'] and vnic_bind['tunnel_index'] == sub_if['tunnelId']): pass else: self.error_count += 1 self.error_info = base_job.housekeeper_warning( self.error_info, 'subinterface %s for vnic %s on edge %s is not ' 'defined in nsxv_edge_vnic_bindings', sub_if['tunnelId'], vnic['index'], edge_id) self.fixed_sub_if_count += 1 if_changed[vnic['index']] = True vnic['subInterfaces']['subInterfaces'].remove(sub_if) def _add_missing_subinterfaces(self, context, edge_id, vnic_binds, backend_vnics, if_changed, readonly): # Verify that all the entries in # nsxv_edge_vnic_bindings are attached on the Edge # Arrange the vnic binds in a list of lists - vnics and subinterfaces metadata_nets = [ net['network_id'] for net in nsxv_db.get_nsxv_internal_networks( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE)] for vnic_bind in vnic_binds: if vnic_bind['network_id'] in metadata_nets: continue for vnic in backend_vnics: if vnic['index'] == vnic_bind['vnic_index']: found = False tunnel_index = vnic_bind['tunnel_index'] network_id = vnic_bind['network_id'] for sub_if in (vnic.get('subInterfaces', {}).get( 'subInterfaces', [])): if sub_if['tunnelId'] == tunnel_index: found = True if sub_if.get('logicalSwitchName') != network_id: self.error_count += 1 self.error_info = base_job.housekeeper_warning( self.error_info, 'subinterface %s on vnic %s on edge %s ' 'should be connected to network %s', tunnel_index, vnic['index'], edge_id, network_id) if_changed[vnic['index']] = True if not readonly: self._recreate_vnic_subinterface( context, network_id, edge_id, vnic, tunnel_index) self.fixed_count += 1 sub_if['name'] = network_id if not found: self.error_count += 1 self.error_info = base_job.housekeeper_warning( self.error_info, 'subinterface %s on vnic %s on edge %s should be ' 'connected to network %s but is missing', tunnel_index, vnic['index'], edge_id, network_id) if_changed[vnic['index']] = True if not readonly: self._recreate_vnic_subinterface( context, network_id, edge_id, vnic, tunnel_index) self.fixed_sub_if_count += 1 def _recreate_vnic_subinterface( self, context, network_id, edge_id, vnic, tunnel_index): vnic_index = vnic['index'] network_name_item = [edge_id, str(vnic_index), str(tunnel_index)] network_name = ('-'.join(network_name_item) + uuidutils.generate_uuid())[:36] port_group_id = vnic.get('portgroupId') address_groups = self.plugin._create_network_dhcp_address_group( context, network_id) port_group_id, iface = self.plugin.edge_manager._create_sub_interface( context, network_id, network_name, tunnel_index, address_groups, port_group_id) if not vnic.get('subInterfaces'): vnic['subInterfaces'] = {'subInterfaces': []} vnic['subInterfaces']['subInterfaces'].append(iface) if vnic['type'] != 'trunk': # reinitialize the interface as it is missing config vnic['name'] = (vcns_const.INTERNAL_VNIC_NAME + str(vnic['index'])) vnic['type'] = 'trunk' vnic['portgroupId'] = port_group_id vnic['mtu'] = 1500 vnic['enableProxyArp'] = False vnic['enableSendRedirects'] = True vnic['isConnected'] = True def _update_router_bindings(self, context, edge_id): edge_router_binds = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_id) for b in edge_router_binds: nsxv_db.update_nsxv_router_binding( context.session, b['router_id'], status='ACTIVE') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/managers.py0000644000175000017500000000700400000000000024356 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import stevedore from oslo_log import log from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc LOG = log.getLogger(__name__) ROUTER_TYPE_DRIVERS = ["distributed", "exclusive", "shared"] class RouterTypeManager(stevedore.named.NamedExtensionManager): """Manage router segment types using drivers.""" def __init__(self, plugin): # Mapping from type name to DriverManager self.drivers = {} LOG.info("Configured router type driver names: %s", ROUTER_TYPE_DRIVERS) super(RouterTypeManager, self).__init__( 'vmware_nsx.neutron.nsxv.router_type_drivers', ROUTER_TYPE_DRIVERS, invoke_on_load=True, invoke_args=(plugin,)) LOG.info("Loaded type driver names: %s", self.names()) self._register_types() self._check_tenant_router_types(cfg.CONF.nsxv.tenant_router_types) def _register_types(self): for ext in self: router_type = ext.obj.get_type() if router_type in self.drivers: LOG.error("Type driver '%(new_driver)s' ignored because " "type driver '%(old_driver)s' is already " "registered for type '%(type)s'", {'new_driver': ext.name, 'old_driver': self.drivers[router_type].name, 'type': router_type}) else: self.drivers[router_type] = ext LOG.info("Registered types: %s", self.drivers.keys()) def _check_tenant_router_types(self, types): self.tenant_router_types = [] for router_type in types: if router_type in self.drivers: self.tenant_router_types.append(router_type) else: msg = _("No type driver for tenant router_type: %s. " "Service terminated!") % router_type LOG.error(msg) raise SystemExit(msg) LOG.info("Tenant router_types: %s", self.tenant_router_types) def get_tenant_router_driver(self, context, router_type): driver = self.drivers.get(router_type) if driver: return driver.obj raise nsx_exc.NoRouterAvailable() def decide_tenant_router_type(self, context, router_type=None): if router_type is None: for rt in self.tenant_router_types: driver = self.drivers.get(rt) if driver: return rt raise nsx_exc.NoRouterAvailable() elif context.is_admin: driver = self.drivers.get(router_type) if driver: return router_type elif router_type in self.tenant_router_types: driver = self.drivers.get(router_type) if driver: return router_type raise nsx_exc.NoRouterAvailable() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/md_proxy.py0000644000175000017500000007603500000000000024434 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import hmac import eventlet import netaddr from neutron_lib import constants from neutron_lib import context as neutron_context from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v.vshield import ( nsxv_loadbalancer as nsxv_lb) from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.services.lbaas.nsx_v import lbaas_common METADATA_POOL_NAME = 'MDSrvPool' METADATA_VSE_NAME = 'MdSrv' METADATA_IP_ADDR = '169.254.169.254' METADATA_TCP_PORT = 80 METADATA_HTTPS_PORT = 443 METADATA_HTTPS_VIP_PORT = 8775 INTERNAL_SUBNET = '169.254.128.0/17' MAX_INIT_THREADS = 3 NET_WAIT_INTERVAL = 240 NET_CHECK_INTERVAL = 10 EDGE_WAIT_INTERVAL = 900 EDGE_CHECK_INTERVAL = 10 LOG = logging.getLogger(__name__) DEFAULT_EDGE_FIREWALL_RULE = { 'name': 'VSERule', 'enabled': True, 'action': 'allow', 'source_vnic_groups': ['vse'], 'destination_vnic_groups': ['external']} def get_router_fw_rules(): # build the allowed destination ports list int_ports = [METADATA_TCP_PORT, METADATA_HTTPS_PORT, METADATA_HTTPS_VIP_PORT] str_ports = [str(p) for p in int_ports] # the list of ports can be extended by configuration if cfg.CONF.nsxv.metadata_service_allowed_ports: str_metadata_ports = [str(p) for p in cfg.CONF.nsxv.metadata_service_allowed_ports] str_ports = str_ports + str_metadata_ports separator = ',' dest_ports = separator.join(str_ports) fw_rules = [ DEFAULT_EDGE_FIREWALL_RULE, { 'name': 'MDServiceIP', 'enabled': True, 'action': 'allow', 'destination_ip_address': [METADATA_IP_ADDR], 'protocol': 'tcp', 'destination_port': dest_ports }, { 'name': 'VSEMDInterEdgeNet', 'enabled': True, 'action': 'allow', 'source_vnic_groups': ['vse'], 'destination_ip_address': [INTERNAL_SUBNET] }, { 'name': 'MDInterEdgeNet', 'enabled': True, 'action': 'deny', 'destination_ip_address': [INTERNAL_SUBNET] }] return fw_rules def get_db_internal_edge_ips(context, az_name): ip_list = [] edge_list = nsxv_db.get_nsxv_internal_edges_by_purpose( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE) if edge_list: # Take only the edges on this availability zone ip_list = [edge['ext_ip_address'] for edge in edge_list if nsxv_db.get_router_availability_zone( context.session, edge['router_id']) == az_name] return ip_list class NsxVMetadataProxyHandler(object): """A metadata proxy handler for a specific availability zone""" def __init__(self, nsxv_plugin, availability_zone): LOG.debug('Initializing metadata for availability zone %s', availability_zone.name) self.nsxv_plugin = nsxv_plugin context = neutron_context.get_admin_context() self.az = availability_zone # Init cannot run concurrently on multiple nodes with locking.LockManager.get_lock('nsx-metadata-init'): # if the core plugin is the TVD - we need to add project # plugin mapping for the internal project core_plugin = directory.get_plugin() if core_plugin.is_tvd_plugin(): try: core_plugin.create_project_plugin_map( context, {'project_plugin_map': {'plugin': projectpluginmap.NsxPlugins.NSX_V, 'project': nsxv_constants.INTERNAL_TENANT_ID}}, internal=True) except projectpluginmap.ProjectPluginAlreadyExists: pass self.internal_net, self.internal_subnet = ( self._get_internal_network_and_subnet(context)) LOG.debug('Metadata internal net for AZ %(az)s is %(net)s, ' 'subnet is %(sub)s', {'az': availability_zone.name, 'net': self.internal_net, 'sub': self.internal_subnet}) self.proxy_edge_ips = self._get_proxy_edges(context) LOG.debug('Metadata proxy internal IPs for AZ %(az)s is ' '%(addresses)s', {'az': availability_zone.name, 'addresses': self.proxy_edge_ips}) def _create_metadata_internal_network(self, context, cidr): # Neutron requires a network to have some tenant_id tenant_id = nsxv_constants.INTERNAL_TENANT_ID net_name = 'inter-edge-net' if not self.az.is_default(): net_name = '%s-%s' % (net_name, self.az.name) net_data = {'network': {'name': net_name, 'admin_state_up': True, 'port_security_enabled': False, 'shared': False, 'availability_zone_hints': [self.az.name], 'tenant_id': tenant_id}} net = self.nsxv_plugin.create_network(context, net_data) subnet_data = {'subnet': {'cidr': cidr, 'name': 'inter-edge-subnet', 'gateway_ip': constants.ATTR_NOT_SPECIFIED, 'allocation_pools': constants.ATTR_NOT_SPECIFIED, 'ip_version': 4, 'dns_nameservers': constants.ATTR_NOT_SPECIFIED, 'host_routes': constants.ATTR_NOT_SPECIFIED, 'enable_dhcp': False, 'network_id': net['id'], 'tenant_id': tenant_id}} subnet = self.nsxv_plugin.create_subnet( context, subnet_data) return net['id'], subnet['id'] def _get_internal_net_by_az(self, context): # Get the internal network for the current az int_net = nsxv_db.get_nsxv_internal_network_for_az( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, self.az.name) if int_net: return int_net['network_id'] def _get_internal_network_and_subnet(self, context): # Try to find internal net, internal subnet. If not found, create new internal_net = self._get_internal_net_by_az(context) internal_subnet = None if internal_net: internal_subnet = self.nsxv_plugin.get_subnets( context, fields=['id'], filters={'network_id': [internal_net]})[0]['id'] if internal_net is None or internal_subnet is None: if cfg.CONF.nsxv.metadata_initializer: # Couldn't find net, subnet - create new try: internal_net, internal_subnet = ( self._create_metadata_internal_network( context, INTERNAL_SUBNET)) except Exception as e: nsxv_db.delete_nsxv_internal_network( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, internal_net) # if network is created, clean up if internal_net: self.nsxv_plugin.delete_network(context, internal_net) error = (_("Exception %s while creating internal " "network for metadata service") % e) LOG.exception(error) raise nsxv_exc.NsxPluginException(err_msg=error) # Update the new network_id in DB nsxv_db.create_nsxv_internal_network( context.session, nsxv_constants.INTER_EDGE_PURPOSE, self.az.name, internal_net) else: error = _('Metadata initialization is incomplete on ' 'initializer node') raise nsxv_exc.NsxPluginException(err_msg=error) return internal_net, internal_subnet def _get_edge_internal_ip(self, context, rtr_id): filters = { 'network_id': [self.internal_net], 'device_id': [rtr_id]} ports = self.nsxv_plugin.get_ports(context, filters=filters) if ports: return ports[0]['fixed_ips'][0]['ip_address'] else: LOG.error("No port found for metadata for %s", rtr_id) def _get_edge_rtr_id_by_ext_ip(self, context, edge_ip): rtr_list = nsxv_db.get_nsxv_internal_edge( context.session, edge_ip) if rtr_list: return rtr_list[0]['router_id'] def _get_edge_id_by_rtr_id(self, context, rtr_id): binding = nsxv_db.get_nsxv_router_binding( context.session, rtr_id) if binding: return binding['edge_id'] def _get_proxy_edges(self, context): proxy_edge_ips = [] db_edge_ips = get_db_internal_edge_ips(context, self.az.name) if len(db_edge_ips) > len(self.az.mgt_net_proxy_ips): error = (_('Number of configured metadata proxy IPs is smaller ' 'than number of Edges which are already provisioned ' 'for availability zone %s'), self.az.name) raise nsxv_exc.NsxPluginException(err_msg=error) pool = eventlet.GreenPool(min(MAX_INIT_THREADS, len(self.az.mgt_net_proxy_ips))) # Edge IPs that exist in both lists have to be validated that their # Edge appliance settings are valid for edge_inner_ip in pool.imap( self._setup_proxy_edge_route_and_connectivity, list(set(db_edge_ips) & set(self.az.mgt_net_proxy_ips))): proxy_edge_ips.append(edge_inner_ip) # Edges that exist only in the CFG list, should be paired with Edges # that exist only in the DB list. The existing Edge from the list will # be reconfigured to match the new config edge_to_convert_ips = ( list(set(db_edge_ips) - set(self.az.mgt_net_proxy_ips))) edge_ip_to_set = ( list(set(self.az.mgt_net_proxy_ips) - set(db_edge_ips))) if edge_to_convert_ips: if cfg.CONF.nsxv.metadata_initializer: for edge_inner_ip in pool.imap( self._setup_proxy_edge_external_interface_ip, zip(edge_to_convert_ips, edge_ip_to_set)): proxy_edge_ips.append(edge_inner_ip) else: error = _('Metadata initialization is incomplete on ' 'initializer node') raise nsxv_exc.NsxPluginException(err_msg=error) # Edges that exist in the CFG list but do not have a matching DB # element will be created. remaining_cfg_ips = edge_ip_to_set[len(edge_to_convert_ips):] if remaining_cfg_ips: if cfg.CONF.nsxv.metadata_initializer: for edge_inner_ip in pool.imap( self._setup_new_proxy_edge, remaining_cfg_ips): proxy_edge_ips.append(edge_inner_ip) pool.waitall() else: error = _('Metadata initialization is incomplete on ' 'initializer node') raise nsxv_exc.NsxPluginException(err_msg=error) return proxy_edge_ips def _setup_proxy_edge_route_and_connectivity(self, rtr_ext_ip, rtr_id=None, edge_id=None): # Use separate context per each as we use this in tread context context = neutron_context.get_admin_context() if not rtr_id: rtr_id = self._get_edge_rtr_id_by_ext_ip(context, rtr_ext_ip) if not edge_id: edge_id = self._get_edge_id_by_rtr_id(context, rtr_id) if not rtr_id or not edge_id: # log this error and return without the ip, but don't fail LOG.error("Failed find edge for router %(rtr_id)s with ip " "%(rtr_ext_ip)s", {'rtr_id': rtr_id, 'rtr_ext_ip': rtr_ext_ip}) return # Read and validate DGW. If different, replace with new value try: # This may fail if the edge was deleted on backend h, routes = self.nsxv_plugin.nsx_v.vcns.get_routes(edge_id) except exceptions.ResourceNotFound as e: # log this error and return without the ip, but don't fail LOG.error("Failed to get routes for metadata proxy edge " "%(edge)s: %(err)s", {'edge': edge_id, 'err': e}) return dgw = routes.get('defaultRoute', {}).get('gatewayAddress') if dgw != self.az.mgt_net_default_gateway: if cfg.CONF.nsxv.metadata_initializer: self.nsxv_plugin._update_routes( context, rtr_id, self.az.mgt_net_default_gateway) else: error = _('Metadata initialization is incomplete on ' 'initializer node') raise nsxv_exc.NsxPluginException(err_msg=error) # Read and validate connectivity h, if_data = self.nsxv_plugin.nsx_v.get_interface( edge_id, vcns_const.EXTERNAL_VNIC_INDEX) a_groups = if_data.get('addressGroups', {}).get('addressGroups') if a_groups: cur_ip = a_groups[0].get('primaryAddress') else: cur_ip = None cur_pgroup = if_data['portgroupId'] if (if_data and cur_pgroup != self.az.mgt_net_moid or cur_ip != rtr_ext_ip): if cfg.CONF.nsxv.metadata_initializer: self.nsxv_plugin.nsx_v.update_interface( rtr_id, edge_id, vcns_const.EXTERNAL_VNIC_INDEX, self.az.mgt_net_moid, address=rtr_ext_ip, netmask=self.az.mgt_net_proxy_netmask, secondary=[]) else: error = _('Metadata initialization is incomplete on ' 'initializer node') raise nsxv_exc.NsxPluginException(err_msg=error) # Read and validate LB pool member configuration # When the Nova IP address is changed in the ini file, we should apply # this change to the LB pool lb_obj = nsxv_lb.NsxvLoadbalancer.get_loadbalancer( self.nsxv_plugin.nsx_v.vcns, edge_id) vs = lb_obj.virtual_servers.get(METADATA_VSE_NAME) update_md_proxy = False if vs: md_members = {member.payload['ipAddress']: member.payload['name'] for member in vs.default_pool.members.values()} if len(cfg.CONF.nsxv.nova_metadata_ips) == len(md_members): m_ips = md_members.keys() m_to_convert = (list(set(m_ips) - set(cfg.CONF.nsxv.nova_metadata_ips))) m_ip_to_set = (list(set(cfg.CONF.nsxv.nova_metadata_ips) - set(m_ips))) if m_to_convert or m_ip_to_set: update_md_proxy = True for m_ip in m_to_convert: m_name = md_members[m_ip] vs.default_pool.members[m_name].payload['ipAddress'] = ( m_ip_to_set.pop()) else: LOG.error('Number of metadata members should not change') try: # This may fail if the edge is powered off right now if update_md_proxy: lb_obj.submit_to_backend(self.nsxv_plugin.nsx_v.vcns, edge_id) except exceptions.RequestBad as e: # log the error and continue LOG.error("Failed to update load balancer on metadata " "proxy edge %(edge)s: %(err)s", {'edge': edge_id, 'err': e}) edge_ip = self._get_edge_internal_ip(context, rtr_id) if edge_ip: return edge_ip def _setup_proxy_edge_external_interface_ip(self, rtr_ext_ips): # Use separate context per each as we use this in tread context context = neutron_context.get_admin_context() rtr_old_ext_ip, rtr_new_ext_ip = rtr_ext_ips rtr_id = self._get_edge_rtr_id_by_ext_ip(context, rtr_old_ext_ip) edge_id = self._get_edge_id_by_rtr_id(context, rtr_id) # Replace DB entry as we cannot update the table PK nsxv_db.delete_nsxv_internal_edge(context.session, rtr_old_ext_ip) edge_ip = self._setup_proxy_edge_route_and_connectivity( rtr_new_ext_ip, rtr_id, edge_id) nsxv_db.create_nsxv_internal_edge( context.session, rtr_new_ext_ip, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, rtr_id) if edge_ip: return edge_ip def _setup_new_proxy_edge(self, rtr_ext_ip): # Use separate context per each as we use this in tread context context = neutron_context.get_admin_context() rtr_id = None try: rtr_name = 'metadata_proxy_router' if not self.az.is_default(): rtr_name = '%s-%s' % (rtr_name, self.az.name) router_data = { 'router': { 'name': rtr_name, 'admin_state_up': True, 'router_type': 'exclusive', 'availability_zone_hints': [self.az.name], 'tenant_id': nsxv_constants.INTERNAL_TENANT_ID}} rtr = self.nsxv_plugin.create_router( context, router_data, allow_metadata=False) rtr_id = rtr['id'] edge_id = self._get_edge_id_by_rtr_id(context, rtr_id) if not edge_id: LOG.error('No edge create for router - %s', rtr_id) if rtr_id: self.nsxv_plugin.delete_router(context, rtr_id) return self.nsxv_plugin.nsx_v.update_interface( rtr['id'], edge_id, vcns_const.EXTERNAL_VNIC_INDEX, self.az.mgt_net_moid, address=rtr_ext_ip, netmask=self.az.mgt_net_proxy_netmask, secondary=[]) port_data = { 'port': { 'network_id': self.internal_net, 'name': None, 'admin_state_up': True, 'device_id': rtr_id, 'device_owner': (constants.DEVICE_OWNER_NETWORK_PREFIX + 'md_interface'), 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'port_security_enabled': False, 'tenant_id': nsxv_constants.INTERNAL_TENANT_ID}} port = self.nsxv_plugin.base_create_port(context, port_data) address_groups = self._get_address_groups( context, self.internal_net, rtr_id, is_proxy=True) edge_ip = port['fixed_ips'][0]['ip_address'] with locking.LockManager.get_lock(edge_id): edge_utils.update_internal_interface( self.nsxv_plugin.nsx_v, context, rtr_id, self.internal_net, address_groups) self._setup_metadata_lb(rtr_id, port['fixed_ips'][0]['ip_address'], cfg.CONF.nsxv.nova_metadata_port, cfg.CONF.nsxv.nova_metadata_port, cfg.CONF.nsxv.nova_metadata_ips, proxy_lb=True) firewall_rules = [ DEFAULT_EDGE_FIREWALL_RULE, { 'action': 'allow', 'enabled': True, 'source_ip_address': [INTERNAL_SUBNET]}] edge_utils.update_firewall( self.nsxv_plugin.nsx_v, context, rtr_id, {'firewall_rule_list': firewall_rules}, allow_external=False) if self.az.mgt_net_default_gateway: self.nsxv_plugin._update_routes( context, rtr_id, self.az.mgt_net_default_gateway) nsxv_db.create_nsxv_internal_edge( context.session, rtr_ext_ip, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, rtr_id) return edge_ip except Exception as e: LOG.exception("Exception %s while creating internal edge " "for metadata service", e) ports = self.nsxv_plugin.get_ports( context, filters={'device_id': [rtr_id]}) for port in ports: self.nsxv_plugin.delete_port(context, port['id'], l3_port_check=True, nw_gw_port_check=True, allow_delete_internal=True) nsxv_db.delete_nsxv_internal_edge( context.session, rtr_ext_ip) if rtr_id: self.nsxv_plugin.delete_router(context, rtr_id) def _get_address_groups(self, context, network_id, device_id, is_proxy): filters = {'network_id': [network_id], 'device_id': [device_id]} ports = self.nsxv_plugin.get_ports(context, filters=filters) subnets = self.nsxv_plugin.get_subnets(context, filters=filters) address_groups = [] for subnet in subnets: address_group = {} net = netaddr.IPNetwork(subnet['cidr']) address_group['subnetMask'] = str(net.netmask) address_group['subnetPrefixLength'] = str(net.prefixlen) for port in ports: fixed_ips = port['fixed_ips'] for fip in fixed_ips: s_id = fip['subnet_id'] ip_addr = fip['ip_address'] if s_id == subnet['id'] and netaddr.valid_ipv4(ip_addr): address_group['primaryAddress'] = ip_addr break # For Edge appliances which aren't the metadata proxy Edge # we add the metadata IP address if not is_proxy and network_id == self.internal_net: address_group['secondaryAddresses'] = { 'type': 'secondary_addresses', 'ipAddress': [METADATA_IP_ADDR]} address_groups.append(address_group) return address_groups def _create_ssl_cert(self, edge_id=None): # Create a self signed certificate in the backend if both Cert details # and private key are not supplied in nsx.ini if (not cfg.CONF.nsxv.metadata_nova_client_cert and not cfg.CONF.nsxv.metadata_nova_client_priv_key): h = self.nsxv_plugin.nsx_v.vcns.create_csr(edge_id)[0] # Extract the CSR ID from header csr_id = lbaas_common.extract_resource_id(h['location']) # Create a self signed certificate cert = self.nsxv_plugin.nsx_v.vcns.create_csr_cert(csr_id)[1] cert_id = cert['objectId'] else: # Raise an error if either the Cert path or the private key is not # configured error = None if not cfg.CONF.nsxv.metadata_nova_client_cert: error = _('Metadata certificate path not configured') elif not cfg.CONF.nsxv.metadata_nova_client_priv_key: error = _('Metadata client private key not configured') if error: raise nsxv_exc.NsxPluginException(err_msg=error) pem_encoding = utils.read_file( cfg.CONF.nsxv.metadata_nova_client_cert) priv_key = utils.read_file( cfg.CONF.nsxv.metadata_nova_client_priv_key) request = { 'pemEncoding': pem_encoding, 'privateKey': priv_key} cert = self.nsxv_plugin.nsx_v.vcns.upload_edge_certificate( edge_id, request)[1] cert_id = cert.get('certificates')[0]['objectId'] return cert_id def _setup_metadata_lb(self, rtr_id, vip, v_port, s_port, member_ips, proxy_lb=False, context=None): if context is None: context = neutron_context.get_admin_context() edge_id = self._get_edge_id_by_rtr_id(context, rtr_id) LOG.debug('Setting up Edge device %s', edge_id) lb_obj = nsxv_lb.NsxvLoadbalancer() protocol = 'HTTP' ssl_pass_through = False cert_id = None # Set protocol to HTTPS with default port of 443 if metadata_insecure # is set to False. if not cfg.CONF.nsxv.metadata_insecure: protocol = 'HTTPS' if proxy_lb: v_port = METADATA_HTTPS_VIP_PORT else: v_port = METADATA_HTTPS_PORT # Create the certificate on the backend cert_id = self._create_ssl_cert(edge_id) ssl_pass_through = proxy_lb mon_type = protocol if proxy_lb else 'tcp' # Create virtual server virt_srvr = nsxv_lb.NsxvLBVirtualServer( name=METADATA_VSE_NAME, ip_address=vip, protocol=protocol, port=v_port) # For router Edge, we add X-LB-Proxy-ID header if not proxy_lb: md_app_rule = nsxv_lb.NsxvLBAppRule( 'insert-mdp', 'reqadd X-Metadata-Provider:' + edge_id) virt_srvr.add_app_rule(md_app_rule) # When shared proxy is configured, insert authentication string if cfg.CONF.nsxv.metadata_shared_secret: signature = hmac.new( bytearray(cfg.CONF.nsxv.metadata_shared_secret, 'ascii'), bytearray(edge_id, 'ascii'), hashlib.sha256).hexdigest() sign_app_rule = nsxv_lb.NsxvLBAppRule( 'insert-auth', 'reqadd X-Metadata-Provider-Signature:' + signature) virt_srvr.add_app_rule(sign_app_rule) # Create app profile # XFF is inserted in router LBs app_profile = nsxv_lb.NsxvLBAppProfile( name='MDSrvProxy', template=protocol, server_ssl_enabled=not cfg.CONF.nsxv.metadata_insecure, ssl_pass_through=ssl_pass_through, insert_xff=not proxy_lb, client_ssl_cert=cert_id) virt_srvr.set_app_profile(app_profile) # Create pool, members and monitor pool = nsxv_lb.NsxvLBPool( name=METADATA_POOL_NAME) monitor = nsxv_lb.NsxvLBMonitor(name='MDSrvMon', mon_type=mon_type.lower()) pool.add_monitor(monitor) i = 0 for member_ip in member_ips: i += 1 member = nsxv_lb.NsxvLBPoolMember( name='Member-%d' % i, ip_address=member_ip, port=s_port, monitor_port=s_port) pool.add_member(member) virt_srvr.set_default_pool(pool) lb_obj.add_virtual_server(virt_srvr) lb_obj.submit_to_backend(self.nsxv_plugin.nsx_v.vcns, edge_id) def configure_router_edge(self, context, rtr_id): LOG.debug('Configuring metadata infrastructure for %s', rtr_id) ctx = neutron_context.get_admin_context() # Connect router interface to inter-edge network port_data = { 'port': { 'network_id': self.internal_net, 'name': None, 'admin_state_up': True, 'device_id': rtr_id, 'device_owner': constants.DEVICE_OWNER_ROUTER_GW, 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'port_security_enabled': False, 'tenant_id': nsxv_constants.INTERNAL_TENANT_ID}} self.nsxv_plugin.base_create_port(ctx, port_data) address_groups = self._get_address_groups( ctx, self.internal_net, rtr_id, is_proxy=False) edge_utils.update_internal_interface( self.nsxv_plugin.nsx_v, context, rtr_id, self.internal_net, address_groups=address_groups) self._setup_metadata_lb(rtr_id, METADATA_IP_ADDR, METADATA_TCP_PORT, cfg.CONF.nsxv.nova_metadata_port, self.proxy_edge_ips, proxy_lb=False, context=context) def cleanup_router_edge(self, context, rtr_id, warn=False): filters = { 'network_id': [self.internal_net], 'device_id': [rtr_id]} ctx = context.elevated() ports = self.nsxv_plugin.get_ports(ctx, filters=filters) if ports: if warn: LOG.warning("cleanup_router_edge found port %(port)s for " "router %(router)s - deleting it now.", {'port': ports[0]['id'], 'router': rtr_id}) try: self.nsxv_plugin.delete_port( ctx, ports[0]['id'], l3_port_check=False, allow_delete_internal=True) except Exception as e: LOG.error("Failed to delete md_proxy port %(port)s: " "%(e)s", {'port': ports[0]['id'], 'e': e}) def is_md_subnet(self, subnet_id): return self.internal_subnet == subnet_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/plugin.py0000644000175000017500000074503200000000000024071 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from distutils import version import xml.etree.ElementTree as et import netaddr from neutron_lib.agent import topics from neutron_lib.api.definitions import address_scope from neutron_lib.api.definitions import agent as agent_apidef from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import dvr as dvr_apidef from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import extra_dhcp_opt as ext_edo from neutron_lib.api.definitions import extraroute from neutron_lib.api.definitions import flavors as flavors_apidef from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import l3_flavors from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import network_availability_zone from neutron_lib.api.definitions import port as port_def from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings as pbin from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api.definitions import router_availability_zone from neutron_lib.api.definitions import subnet as subnet_def from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib.api.validators import availability_zone as az_validator from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context as n_context from neutron_lib.db import api as db_api from neutron_lib.db import constants as db_const from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import allowedaddresspairs as addr_exc from neutron_lib.exceptions import flavors as flav_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.exceptions import multiprovidernet as mpnet_exc from neutron_lib.exceptions import port_security as psec_exc from neutron_lib.objects import registry as obj_reg from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from neutron_lib.plugins import utils from neutron_lib import rpc as n_rpc from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import netutils from oslo_utils import uuidutils import six from six import moves from sqlalchemy.orm import exc as sa_exc from neutron.api import extensions as neutron_extensions from neutron.common import ipv6_utils from neutron.common import utils as n_utils from neutron.db import agents_db from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db.availability_zone import router as router_az_db from neutron.db import dns_db from neutron.db import external_net_db from neutron.db import extradhcpopt_db from neutron.db import extraroute_db from neutron.db import l3_db from neutron.db import l3_gwmode_db from neutron.db.models import l3 as l3_db_models from neutron.db.models import securitygroup as securitygroup_model from neutron.db import models_v2 from neutron.db import portsecurity_db from neutron.db import securitygroups_db from neutron.db import vlantransparent_db from neutron.extensions import securitygroup as ext_sg from neutron.quota import resource_registry from neutron.services.flavors import flavors_plugin from vmware_nsx.dvs import dvs from vmware_nsx.services.qos.common import utils as qos_com_utils from vmware_nsx.services.qos.nsx_v import driver as qos_driver from vmware_nsx.services.qos.nsx_v import utils as qos_utils import vmware_nsx from vmware_nsx._i18n import _ from vmware_nsx.common import availability_zones as nsx_com_az from vmware_nsx.common import config # noqa from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import l3_rpc_agent_api from vmware_nsx.common import locking from vmware_nsx.common import managers as nsx_managers from vmware_nsx.common import nsx_constants from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.db import ( extended_security_group_rule as extend_sg_rule) from vmware_nsx.db import ( routertype as rt_rtr) from vmware_nsx.db import db as nsx_db from vmware_nsx.db import extended_security_group as extended_secgroup from vmware_nsx.db import maclearning as mac_db from vmware_nsx.db import nsx_portbindings_db as pbin_db from vmware_nsx.db import nsxv_db from vmware_nsx.db import vnic_index_db from vmware_nsx.extensions import ( advancedserviceproviders as as_providers) from vmware_nsx.extensions import ( vnicindex as ext_vnic_idx) from vmware_nsx.extensions import dhcp_mtu as ext_dhcp_mtu from vmware_nsx.extensions import dns_search_domain as ext_dns_search_domain from vmware_nsx.extensions import housekeeper as hk_ext from vmware_nsx.extensions import maclearning as mac_ext from vmware_nsx.extensions import nsxpolicy from vmware_nsx.extensions import projectpluginmap from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.extensions import routersize from vmware_nsx.extensions import routertype from vmware_nsx.extensions import secgroup_rule_local_ip_prefix from vmware_nsx.extensions import securitygrouplogging as sg_logging from vmware_nsx.extensions import securitygrouppolicy as sg_policy from vmware_nsx.plugins.common.housekeeper import housekeeper from vmware_nsx.plugins.common import plugin as nsx_plugin_common from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v import managers from vmware_nsx.plugins.nsx_v import md_proxy as nsx_v_md_proxy from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield.common import ( exceptions as vsh_exc) from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield import securitygroup_utils from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.services.flowclassifier.nsx_v import utils as fc_utils from vmware_nsx.services.fwaas.common import utils as fwaas_utils from vmware_nsx.services.fwaas.nsx_v import fwaas_callbacks_v2 from vmware_nsx.services.lbaas.nsx_v.implementation import healthmon_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import l7policy_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import l7rule_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import listener_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import loadbalancer_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import member_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import pool_mgr from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common from vmware_nsx.services.lbaas.octavia import constants as oct_const from vmware_nsx.services.lbaas.octavia import octavia_listener LOG = logging.getLogger(__name__) PORTGROUP_PREFIX = 'dvportgroup' ROUTER_SIZE = routersize.ROUTER_SIZE VALID_EDGE_SIZES = routersize.VALID_EDGE_SIZES SUBNET_RULE_NAME = 'Subnet Rule' DNAT_RULE_NAME = 'DNAT Rule' ALLOCATION_POOL_RULE_NAME = 'Allocation Pool Rule' NO_SNAT_RULE_NAME = 'No SNAT Rule' UNSUPPORTED_RULE_NAMED_PROTOCOLS = [constants.PROTO_NAME_DCCP, constants.PROTO_NAME_PGM, constants.PROTO_NAME_VRRP, constants.PROTO_NAME_UDPLITE, constants.PROTO_NAME_EGP, constants.PROTO_NAME_IPIP, constants.PROTO_NAME_OSPF, constants.PROTO_NAME_IPV6_ROUTE, constants.PROTO_NAME_IPV6_ENCAP, constants.PROTO_NAME_IPV6_FRAG, constants.PROTO_NAME_IPV6_OPTS, constants.PROTO_NAME_IPV6_NONXT] PROTOCOLS_SUPPORTING_PORTS = [constants.PROTO_NUM_TCP, constants.PROTO_NUM_UDP, constants.PROTO_NUM_ICMP, constants.PROTO_NUM_IPV6_ICMP] @resource_extend.has_resource_extenders class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin, agents_db.AgentDbMixin, nsx_plugin_common.NsxPluginBase, rt_rtr.RouterType_mixin, external_net_db.External_net_db_mixin, extraroute_db.ExtraRoute_db_mixin, extradhcpopt_db.ExtraDhcpOptMixin, router_az_db.RouterAvailabilityZoneMixin, l3_gwmode_db.L3_NAT_db_mixin, pbin_db.NsxPortBindingMixin, portsecurity_db.PortSecurityDbMixin, extend_sg_rule.ExtendedSecurityGroupRuleMixin, securitygroups_db.SecurityGroupDbMixin, extended_secgroup.ExtendedSecurityGroupPropertiesMixin, vnic_index_db.VnicIndexDbMixin, dns_db.DNSDbMixin, nsxpolicy.NsxPolicyPluginBase, vlantransparent_db.Vlantransparent_db_mixin, nsx_com_az.NSXAvailabilityZonesPluginCommon, mac_db.MacLearningDbMixin, hk_ext.Housekeeper): supported_extension_aliases = [agent_apidef.ALIAS, addr_apidef.ALIAS, address_scope.ALIAS, pbin.ALIAS, ext_dns_search_domain.ALIAS, dvr_apidef.ALIAS, "ext-gw-mode", mpnet_apidef.ALIAS, psec.ALIAS, pnet.ALIAS, "quotas", extnet_apidef.ALIAS, ext_edo.ALIAS, extraroute.ALIAS, l3_apidef.ALIAS, "security-group", secgroup_rule_local_ip_prefix.ALIAS, sg_logging.ALIAS, routertype.ALIAS, routersize.ALIAS, ext_vnic_idx.ALIAS, as_providers.ALIAS, "subnet_allocation", az_def.ALIAS, network_availability_zone.ALIAS, router_availability_zone.ALIAS, l3_flavors.ALIAS, flavors_apidef.ALIAS, ext_dhcp_mtu.ALIAS, mac_ext.ALIAS, hk_ext.ALIAS, "port-security-groups-filtering"] __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=securitygroup_model.SecurityGroup, security_group_rule=securitygroup_model.SecurityGroupRule, router=l3_db_models.Router, floatingip=l3_db_models.FloatingIP) def __init__(self): self._is_sub_plugin = tvd_utils.is_tvd_core_plugin() self.init_is_complete = False self.octavia_listener = None self.octavia_stats_collector = None self.housekeeper = None super(NsxVPluginV2, self).__init__() if self._is_sub_plugin: extension_drivers = cfg.CONF.nsx_tvd.nsx_v_extension_drivers else: extension_drivers = cfg.CONF.nsx_extension_drivers self._extension_manager = nsx_managers.ExtensionManager( extension_drivers=extension_drivers) # Bind the dummy L3 notifications self.l3_rpc_notifier = l3_rpc_agent_api.L3NotifyAPI() self._extension_manager.initialize() self.supported_extension_aliases.extend( self._extension_manager.extension_aliases()) self.metadata_proxy_handler = None config.validate_nsxv_config_options() self._network_vlans = utils.parse_network_vlan_ranges( cfg.CONF.nsxv.network_vlan_ranges) neutron_extensions.append_api_extensions_path( [vmware_nsx.NSX_EXT_PATH]) # This needs to be set prior to binding callbacks if cfg.CONF.nsxv.use_dvs_features: self._vcm = dvs.VCManager() else: self._vcm = None # Create the client to interface with the NSX-v _nsx_v_callbacks = edge_utils.NsxVCallbacks(self) self.nsx_v = vcns_driver.VcnsDriver(_nsx_v_callbacks) # Use the existing class instead of creating a new instance self.lbv2_driver = self.nsx_v # Ensure that edges do concurrency self._ensure_lock_operations() self._validate_nsx_version() # Configure aggregate publishing self._aggregate_publishing() # Configure edge reservations self._configure_reservations() self.edge_manager = edge_utils.EdgeManager(self.nsx_v, self) self.nsx_sg_utils = securitygroup_utils.NsxSecurityGroupUtils( self.nsx_v) self.init_availability_zones() self._validate_config() self._use_nsx_policies = False if cfg.CONF.nsxv.use_nsx_policies: if not c_utils.is_nsxv_version_6_2(self.nsx_v.vcns.get_version()): error = (_("NSX policies are not supported for version " "%(ver)s.") % {'ver': self.nsx_v.vcns.get_version()}) raise nsx_exc.NsxPluginException(err_msg=error) # Support NSX policies in default security groups self._use_nsx_policies = True # enable the extension self.supported_extension_aliases.append(sg_policy.ALIAS) self.supported_extension_aliases.append(nsxpolicy.ALIAS) # Support transparent VLANS from 6.3.0 onwards. The feature is only # supported if the global configuration flag vlan_transparent is # True if cfg.CONF.vlan_transparent: if c_utils.is_nsxv_version_6_3(self.nsx_v.vcns.get_version()): self.supported_extension_aliases.append(vlan_apidef.ALIAS) else: LOG.warning("Transparent support only from " "NSX 6.3 onwards") self.sg_container_id = self._create_security_group_container() self.default_section = self._create_cluster_default_fw_section() self._router_managers = managers.RouterTypeManager(self) # Make sure starting rpc listeners (for QoS and other agents) # will happen only once self.start_rpc_listeners_called = False self.fwaas_callbacks = None # Service insertion driver register self._si_handler = fc_utils.NsxvServiceInsertionHandler(self) registry.subscribe(self.add_vms_to_service_insertion, fc_utils.SERVICE_INSERTION_RESOURCE, events.AFTER_CREATE) # Subscribe to subnet pools changes registry.subscribe( self.on_subnetpool_address_scope_updated, resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE) if c_utils.is_nsxv_version_6_2(self.nsx_v.vcns.get_version()): self.supported_extension_aliases.append(provider_sg.ALIAS) # Bind QoS notifications qos_driver.register(self) registry.subscribe(self.spawn_complete, resources.PROCESS, events.AFTER_SPAWN) # subscribe the init complete method last, so it will be called only # if init was successful registry.subscribe(self.init_complete, resources.PROCESS, events.AFTER_INIT) @staticmethod def plugin_type(): return projectpluginmap.NsxPlugins.NSX_V @staticmethod def is_tvd_plugin(): return False def spawn_complete(self, resource, event, trigger, payload=None): # Init the FWaaS support with RPC listeners for the original process self._init_fwaas(with_rpc=True) # The rest of this method should run only once, but after init_complete if not self.init_is_complete: self.init_complete(None, None, None) if not self._is_sub_plugin: self.octavia_stats_collector = ( octavia_listener.NSXOctaviaStatisticsCollector( self, self._get_octavia_stats_getter())) def init_complete(self, resource, event, trigger, payload=None): with locking.LockManager.get_lock('plugin-init-complete'): if self.init_is_complete: # Should be called only once per worker return has_metadata_cfg = ( cfg.CONF.nsxv.nova_metadata_ips and cfg.CONF.nsxv.mgt_net_moid and cfg.CONF.nsxv.mgt_net_proxy_ips and cfg.CONF.nsxv.mgt_net_proxy_netmask) if has_metadata_cfg: # Init md_proxy handler per availability zone self.metadata_proxy_handler = {} for az in self.get_azs_list(): # create metadata handler only if the az supports it. # if not, the global one will be used if az.supports_metadata(): self.metadata_proxy_handler[az.name] = ( nsx_v_md_proxy.NsxVMetadataProxyHandler( self, az)) LOG.debug('Metadata is configured for AZs %s', self.metadata_proxy_handler.keys()) else: LOG.debug('No metadata configuration available!') self.housekeeper = housekeeper.NsxHousekeeper( hk_ns='vmware_nsx.neutron.nsxv.housekeeper.jobs', hk_jobs=cfg.CONF.nsxv.housekeeping_jobs, hk_readonly=cfg.CONF.nsxv.housekeeping_readonly, hk_readonly_jobs=cfg.CONF.nsxv.housekeeping_readonly_jobs) # Init octavia listener and endpoints if not self._is_sub_plugin: octavia_objects = self._get_octavia_objects() self.octavia_listener = octavia_listener.NSXOctaviaListener( **octavia_objects) # Init the FWaaS support without RPC listeners # for the spawn workers self._init_fwaas(with_rpc=False) self.init_is_complete = True def _get_octavia_objects(self): return { 'loadbalancer': loadbalancer_mgr.EdgeLoadBalancerManagerFromDict( self.nsx_v), 'listener': listener_mgr.EdgeListenerManagerFromDict(self.nsx_v), 'pool': pool_mgr.EdgePoolManagerFromDict(self.nsx_v), 'member': member_mgr.EdgeMemberManagerFromDict(self.nsx_v), 'healthmonitor': healthmon_mgr.EdgeHealthMonitorManagerFromDict( self.nsx_v), 'l7policy': l7policy_mgr.EdgeL7PolicyManagerFromDict(self.nsx_v), 'l7rule': l7rule_mgr.EdgeL7RuleManagerFromDict(self.nsx_v)} def _get_octavia_stats_getter(self): return listener_mgr.stats_getter def _validate_nsx_version(self): ver = self.nsx_v.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.3'): error = _("Plugin version doesn't support NSX version %s.") % ver raise nsx_exc.NsxPluginException(err_msg=error) def get_metadata_proxy_handler(self, az_name): if not self.metadata_proxy_handler: return None if az_name in self.metadata_proxy_handler: return self.metadata_proxy_handler[az_name] # fallback to the global handler # Note(asarfaty): in case this is called during init_complete the # default availability zone may still not exist. return self.metadata_proxy_handler.get(nsx_az.DEFAULT_NAME) def add_vms_to_service_insertion(self, sg_id): def _add_vms_to_service_insertion(*args, **kwargs): """Adding existing VMs to the service insertion security group Adding all current compute ports with port security to the service insertion security group in order to classify their traffic by the security redirect rules """ sg_id = args[0] context = n_context.get_admin_context() filters = {'device_owner': ['compute:None']} ports = self.get_ports(context, filters=filters) for port in ports: # Only add compute ports with device-id, vnic & port security if (validators.is_attr_set( port.get(ext_vnic_idx.VNIC_INDEX)) and validators.is_attr_set(port.get('device_id')) and port[psec.PORTSECURITY]): try: vnic_idx = port[ext_vnic_idx.VNIC_INDEX] device_id = port['device_id'] vnic_id = self._get_port_vnic_id(vnic_idx, device_id) self._add_member_to_security_group(sg_id, vnic_id) except Exception as e: LOG.info('Could not add port %(port)s to service ' 'insertion security group. Exception ' '%(err)s', {'port': port['id'], 'err': e}) # Doing this in a separate thread to not slow down the init process # in case there are many compute ports c_utils.spawn_n(_add_vms_to_service_insertion, sg_id) def start_rpc_listeners(self): if self.start_rpc_listeners_called: # If called more than once - we should not create it again return self.conn.consume_in_threads() LOG.info("NSXV plugin: starting RPC listeners") self.endpoints = [agents_db.AgentExtRpcCallback()] self.topic = topics.PLUGIN self.conn = n_rpc.Connection() self.conn.create_consumer(self.topic, self.endpoints, fanout=False) self.start_rpc_listeners_called = True return self.conn.consume_in_threads() def _init_fwaas(self, with_rpc): # Bind FWaaS callbacks to the driver if fwaas_utils.is_fwaas_v2_plugin_enabled(): LOG.info("NSXv FWaaS v2 plugin enabled") self.fwaas_callbacks = fwaas_callbacks_v2.NsxvFwaasCallbacksV2( with_rpc) def _create_security_group_container(self): name = "OpenStack Security Group container" with locking.LockManager.get_lock('security-group-container-init'): container_id = self.nsx_v.vcns.get_security_group_id(name) if not container_id: description = ("OpenStack Security Group Container, " "managed by Neutron nsx-v plugin.") container = {"securitygroup": {"name": name, "description": description}} h, container_id = ( self.nsx_v.vcns.create_security_group(container)) return container_id def _find_router_driver(self, context, router_id): router_qry = context.session.query(l3_db_models.Router) router_db = router_qry.filter_by(id=router_id).one() return self._get_router_driver(context, router_db) def _get_router_driver(self, context, router_db): router_type_dict = {} self._extend_nsx_router_dict(router_type_dict, router_db) router_type = None if router_type_dict.get("distributed", False): router_type = "distributed" else: router_type = router_type_dict.get("router_type") return self._router_managers.get_tenant_router_driver( context, router_type) def _decide_router_type(self, context, r): router_type = None if (validators.is_attr_set(r.get("distributed")) and r.get("distributed")): router_type = "distributed" if validators.is_attr_set(r.get("router_type")): err_msg = _('Can not support router_type extension for ' 'distributed router') raise n_exc.InvalidInput(error_message=err_msg) elif validators.is_attr_set(r.get("router_type")): router_type = r.get("router_type") router_type = self._router_managers.decide_tenant_router_type( context, router_type) if router_type == "distributed": r["distributed"] = True r["router_type"] = "exclusive" else: r["distributed"] = False r["router_type"] = router_type @staticmethod @resource_extend.extends([l3_apidef.ROUTERS]) def _extend_nsx_router_dict(router_res, router_db): router_type_obj = rt_rtr.RouterType_mixin() router_type_obj._extend_nsx_router_dict( router_res, router_db, router_type_obj.nsx_attributes) def _get_cluster_default_fw_section_rules(self): """Build Default cluster rules""" rules = [{'name': 'Default DHCP rule for OS Security Groups', 'action': 'allow', 'services': [('17', '67', None, None), ('17', '68', None, None)]}, {'name': 'Default ICMPv6 rule for OS Security Groups', 'action': 'allow', 'services': [('58', None, constants.ICMPV6_TYPE_NS, None), ('58', None, constants.ICMPV6_TYPE_NA, None), ('58', None, constants.ICMPV6_TYPE_RA, None), ('58', None, constants.ICMPV6_TYPE_MLD_QUERY, None)]}, {'name': 'Default DHCPv6 rule for OS Security Groups', 'action': 'allow', 'services': [('17', '546', None, None), ('17', '547', None, None)]}] if cfg.CONF.nsxv.cluster_moid: applied_to_ids = cfg.CONF.nsxv.cluster_moid applied_to_type = 'ClusterComputeResource' else: applied_to_ids = [self.sg_container_id] applied_to_type = 'SecurityGroup' rule_list = [] for rule in rules: rule_config = self.nsx_sg_utils.get_rule_config( applied_to_ids, rule['name'], rule['action'], applied_to_type, services=rule['services'], logged=cfg.CONF.nsxv.log_security_groups_allowed_traffic) rule_list.append(rule_config) igmp_names = ['IGMP Membership Query', 'IGMP V2 Membership Report', 'IGMP V3 Membership Report', 'IGMP Leave Group'] igmp_ids = [] for name in igmp_names: igmp_id = self._get_appservice_id(name) if igmp_id: igmp_ids.append(igmp_id) if igmp_ids: rules = [{'name': 'Default IGMP rule for OS Security Groups', 'action': 'allow', 'service_ids': igmp_ids}] for rule in rules: rule_config = self.nsx_sg_utils.get_rule_config( applied_to_ids, rule['name'], rule['action'], applied_to_type, application_services=rule['service_ids'], logged=cfg.CONF.nsxv.log_security_groups_allowed_traffic) rule_list.append(rule_config) # Default security-group rules block_rule = self.nsx_sg_utils.get_rule_config( [self.sg_container_id], 'Block All', 'deny', logged=cfg.CONF.nsxv.log_security_groups_blocked_traffic) rule_list.append(block_rule) return rule_list def _create_cluster_default_fw_section(self, update_section=False): section_name = 'OS Cluster Security Group section' with locking.LockManager.get_lock('default-section-init'): section_id = self.nsx_v.vcns.get_section_id(section_name) if section_id and not update_section: # No need to update an existing section, unless the # configuration changed return section_id rule_list = self._get_cluster_default_fw_section_rules() section = self.nsx_sg_utils.get_section_with_rules( section_name, rule_list, section_id) section_req_body = self.nsx_sg_utils.to_xml_string(section) if section_id: self.nsx_v.vcns.update_section_by_id( section_id, 'ip', section_req_body) else: # cluster section does not exists. Create it above the # default l3 section try: l3_id = self.nsx_v.vcns.get_default_l3_id() h, c = self.nsx_v.vcns.create_section( 'ip', section_req_body, insert_before=l3_id) section_id = self.nsx_sg_utils.parse_and_get_section_id(c) except Exception as e: # another controller might have already created one section_id = self.nsx_v.vcns.get_section_id(section_name) if not section_id: with excutils.save_and_reraise_exception(): LOG.error("Failed to create default section: %s", e) return section_id def _create_dhcp_static_binding(self, context, neutron_port_db): network_id = neutron_port_db['network_id'] device_owner = neutron_port_db['device_owner'] if device_owner.startswith("compute"): s_bindings = self.edge_manager.create_static_binding( context, neutron_port_db) self.edge_manager.create_dhcp_bindings( context, neutron_port_db['id'], network_id, s_bindings) def _delete_dhcp_static_binding(self, context, neutron_port_db, log_error=True): port_id = neutron_port_db['id'] network_id = neutron_port_db['network_id'] try: self.edge_manager.delete_dhcp_binding( context, port_id, network_id, neutron_port_db['mac_address']) except Exception as e: msg = ("Unable to delete static bindings for port %(id)s" "Error: %(e)s" % {'id': port_id, 'e': e}) if log_error: LOG.error(msg) else: LOG.info(msg) def _validate_network_qos(self, context, network, backend_network): err_msg = None if validators.is_attr_set(network.get(qos_consts.QOS_POLICY_ID)): if not backend_network: err_msg = (_("Cannot configure QOS on external networks")) if not cfg.CONF.nsxv.use_dvs_features: err_msg = (_("Cannot configure QOS " "without enabling use_dvs_features")) if err_msg: raise n_exc.InvalidInput(error_message=err_msg) self._validate_qos_policy_id( context, network.get(qos_consts.QOS_POLICY_ID)) def _get_network_az_from_net_data(self, net_data): if az_def.AZ_HINTS in net_data and net_data[az_def.AZ_HINTS]: return self._availability_zones_data.get_availability_zone( net_data[az_def.AZ_HINTS][0]) return self.get_default_az() def _get_network_az_dvs_id(self, net_data): az = self._get_network_az_from_net_data(net_data) return az.dvs_id def _get_network_vdn_scope_id(self, net_data): az = self._get_network_az_from_net_data(net_data) return az.vdn_scope_id def _validate_dvs_id(self, dvs_id): if not self.nsx_v.vcns.validate_dvs( dvs_id, dvs_list=self.existing_dvs): # try to retrieve the dvs list again in case 1 was added self.existing_dvs = self.nsx_v.vcns.get_dvs_list() if not self.nsx_v.vcns.validate_dvs( dvs_id, dvs_list=self.existing_dvs): return False return True def _validate_provider_create(self, context, network): if not validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): return az_dvs = self._get_network_az_dvs_id(network) for segment in network[mpnet_apidef.SEGMENTS]: network_type = segment.get(pnet.NETWORK_TYPE) physical_network = segment.get(pnet.PHYSICAL_NETWORK) segmentation_id = segment.get(pnet.SEGMENTATION_ID) network_type_set = validators.is_attr_set(network_type) segmentation_id_set = validators.is_attr_set(segmentation_id) physical_network_set = validators.is_attr_set(physical_network) err_msg = None if not network_type_set: err_msg = _("%s required") % pnet.NETWORK_TYPE elif network_type == c_utils.NsxVNetworkTypes.FLAT: if segmentation_id_set: err_msg = _("Segmentation ID cannot be specified with " "flat network type") if physical_network_set: # Validate the DVS Id if not self._validate_dvs_id(physical_network): err_msg = (_("DVS Id %s could not be found") % physical_network) elif network_type == c_utils.NsxVNetworkTypes.VLAN: if not segmentation_id_set: if physical_network_set: if physical_network not in self._network_vlans: err_msg = _("Invalid physical network for " "segmentation ID allocation") else: err_msg = _("Segmentation ID must be specified with " "vlan network type") elif (segmentation_id_set and not utils.is_valid_vlan_tag(segmentation_id)): err_msg = (_("%(segmentation_id)s out of range " "(%(min_id)s through %(max_id)s)") % {'segmentation_id': segmentation_id, 'min_id': constants.MIN_VLAN_TAG, 'max_id': constants.MAX_VLAN_TAG}) elif (segmentation_id_set and physical_network_set and not self._validate_dvs_id(physical_network)): err_msg = (_("DVS Id %s could not be found") % physical_network) else: # Verify segment is not already allocated bindings = nsxv_db.get_network_bindings_by_vlanid( context.session, segmentation_id) if bindings: dvs_ids = self._get_dvs_ids(physical_network, az_dvs) for phy_uuid in dvs_ids: for binding in bindings: if binding['phy_uuid'] == phy_uuid: raise n_exc.VlanIdInUse( vlan_id=segmentation_id, physical_network=phy_uuid) elif network_type == c_utils.NsxVNetworkTypes.VXLAN: # Currently unable to set the segmentation id if segmentation_id_set: err_msg = _("Segmentation ID cannot be set with VXLAN") elif network_type == c_utils.NsxVNetworkTypes.PORTGROUP: external = network.get(extnet_apidef.EXTERNAL) if segmentation_id_set: err_msg = _("Segmentation ID cannot be set with portgroup") if not physical_network_set: err_msg = _("Physical network must be set") elif not self.nsx_v.vcns.validate_network(physical_network): err_msg = _("Physical network doesn't exist") # A provider network portgroup will need the network name to # match the portgroup name elif ((not validators.is_attr_set(external) or validators.is_attr_set(external) and not external) and not self.nsx_v.vcns.validate_network_name( physical_network, network['name'])): err_msg = _("Portgroup name must match network name") # make sure no other neutron network is using it bindings = ( nsxv_db.get_network_bindings_by_physical_net_and_type( context.elevated().session, physical_network, network_type)) if bindings: err_msg = (_('protgroup %s is already used by ' 'another network') % physical_network) else: err_msg = (_("%(net_type_param)s %(net_type_value)s not " "supported") % {'net_type_param': pnet.NETWORK_TYPE, 'net_type_value': network_type}) if err_msg: raise n_exc.InvalidInput(error_message=err_msg) # TODO(salvatore-orlando): Validate tranport zone uuid # which should be specified in physical_network def _validate_network_type(self, context, network_id, net_types): bindings = nsxv_db.get_network_bindings(context.session, network_id) multiprovider = nsx_db.is_multiprovider_network(context.session, network_id) if bindings: if not multiprovider: return bindings[0].binding_type in net_types else: for binding in bindings: if binding.binding_type not in net_types: return False return True return False def _extend_network_dict_provider(self, context, network, multiprovider=None, bindings=None): if 'id' not in network: return if not bindings: bindings = nsxv_db.get_network_bindings(context.session, network['id']) if not multiprovider: multiprovider = nsx_db.is_multiprovider_network(context.session, network['id']) # With NSX plugin 'normal' overlay networks will have no binding # TODO(salvatore-orlando) make sure users can specify a distinct # phy_uuid as 'provider network' for STT net type if bindings: if not multiprovider: # network came in through provider networks api network[pnet.NETWORK_TYPE] = bindings[0].binding_type network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id else: # network come in though multiprovider networks api network[mpnet_apidef.SEGMENTS] = [ {pnet.NETWORK_TYPE: binding.binding_type, pnet.PHYSICAL_NETWORK: binding.phy_uuid, pnet.SEGMENTATION_ID: binding.vlan_id} for binding in bindings] # update availability zones network[az_def.COLLECTION_NAME] = ( self._get_network_availability_zones(context, network)) def _get_subnet_as_providers(self, context, subnet, nw_dict=None): net_id = subnet.get('network_id') if net_id is None: net_id = self.get_subnet(context, subnet['id']).get('network_id') if nw_dict: providers = nw_dict.get(net_id, []) else: as_provider_data = nsxv_db.get_edge_vnic_bindings_by_int_lswitch( context.session, net_id) providers = [asp['edge_id'] for asp in as_provider_data] return providers def get_subnet(self, context, id, fields=None): subnet = super(NsxVPluginV2, self).get_subnet(context, id, fields) if not context.is_admin: return subnet elif fields and as_providers.ADV_SERVICE_PROVIDERS in fields: subnet[as_providers.ADV_SERVICE_PROVIDERS] = ( self._get_subnet_as_providers(context, subnet)) return subnet def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): subnets = super(NsxVPluginV2, self).get_subnets(context, filters, fields, sorts, limit, marker, page_reverse) if not context.is_admin or (not filters and not fields): return subnets new_subnets = [] if ((fields and as_providers.ADV_SERVICE_PROVIDERS in fields) or (filters and filters.get(as_providers.ADV_SERVICE_PROVIDERS))): # This ugly mess should reduce DB calls with network_id field # as filter - as network_id is not indexed vnic_binds = nsxv_db.get_edge_vnic_bindings_with_networks( context.session) nw_dict = {} for vnic_bind in vnic_binds: if nw_dict.get(vnic_bind['network_id']): nw_dict[vnic_bind['network_id']].append( vnic_bind['edge_id']) else: nw_dict[vnic_bind['network_id']] = [vnic_bind['edge_id']] # We only deal metadata provider field when: # - adv_service_provider is explicitly retrieved # - adv_service_provider is used in a filter for subnet in subnets: as_provider = self._get_subnet_as_providers( context, subnet, nw_dict) md_filter = ( None if filters is None else filters.get(as_providers.ADV_SERVICE_PROVIDERS)) if md_filter is None or len(set(as_provider) & set(md_filter)): # Include metadata_providers only if requested in results if fields and as_providers.ADV_SERVICE_PROVIDERS in fields: subnet[as_providers.ADV_SERVICE_PROVIDERS] = ( as_provider) new_subnets.append(subnet) else: # No need to handle metadata providers field return subnets return new_subnets def _convert_to_transport_zones_dict(self, network): """Converts the provider request body to multiprovider. Returns: True if request is multiprovider False if provider and None if neither. """ if any(validators.is_attr_set(network.get(f)) for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)): if validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): raise mpnet_exc.SegmentsSetInConjunctionWithProviders() # convert to transport zone list network[mpnet_apidef.SEGMENTS] = [ {pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE], pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK], pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}] del network[pnet.NETWORK_TYPE] del network[pnet.PHYSICAL_NETWORK] del network[pnet.SEGMENTATION_ID] return False if validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)): return True def _delete_backend_network(self, moref, dvs_id=None): """Deletes the backend NSX network. This can either be a VXLAN or a VLAN network. The type is determined by the prefix of the moref. The dvs_id is relevant only if it is a vlan network """ if moref.startswith(PORTGROUP_PREFIX): self.nsx_v.delete_port_group(dvs_id, moref) else: self.nsx_v.delete_virtual_wire(moref) def _get_vlan_network_name(self, net_data, dvs_id): if net_data.get('name') is None: net_data['name'] = '' # Maximum name length is 80 characters. 'id' length is 36 # maximum prefix for name plus dvs-id is 43 if net_data['name'] == '': prefix = dvs_id[:43] else: prefix = ('%s-%s' % (dvs_id, net_data['name']))[:43] return '%s-%s' % (prefix, net_data['id']) def _update_network_teaming(self, dvs_id, net_id, net_moref): if self._vcm: try: h, switch = self.nsx_v.vcns.get_vdn_switch(dvs_id) except Exception: LOG.warning('DVS %s not registered on NSX. Unable to ' 'update teaming for network %s', dvs_id, net_id) return try: self._vcm.update_port_groups_config( dvs_id, net_id, net_moref, self._vcm.update_port_group_spec_teaming, switch) except Exception as e: LOG.error('Unable to update teaming information for ' 'net %(net_id)s. Error: %(e)s', {'net_id': net_id, 'e': e}) def _create_vlan_network_at_backend(self, net_data, dvs_id): network_name = self._get_vlan_network_name(net_data, dvs_id) segment = net_data[mpnet_apidef.SEGMENTS][0] vlan_tag = 0 if (segment.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN): vlan_tag = segment.get(pnet.SEGMENTATION_ID, 0) portgroup = {'vlanId': vlan_tag, 'networkBindingType': 'Static', 'networkName': network_name, 'networkType': 'Isolation'} config_spec = {'networkSpec': portgroup} try: h, c = self.nsx_v.vcns.create_port_group(dvs_id, config_spec) except Exception as e: error = (_("Failed to create port group on DVS: %(dvs_id)s. " "Reason: %(reason)s") % {'dvs_id': dvs_id, 'reason': e.response}) raise nsx_exc.NsxPluginException(err_msg=error) self._update_network_teaming(dvs_id, net_data['id'], c) return c def _get_dvs_ids(self, physical_network, default_dvs): """Extract DVS-IDs provided in the physical network field. If physical network attribute is not set, return the pre configured dvs-id from nsx.ini file, otherwise convert physical network string to a list of unique DVS-IDs. """ if not validators.is_attr_set(physical_network): return [default_dvs] # Return unique DVS-IDs only and ignore duplicates return list(set( dvs.strip() for dvs in physical_network.split(',') if dvs)) def _add_member_to_security_group(self, sg_id, vnic_id): with locking.LockManager.get_lock('neutron-security-ops' + str(sg_id)): try: self.nsx_v.vcns.add_member_to_security_group( sg_id, vnic_id) LOG.info("Added %(sg_id)s member to NSX security " "group %(vnic_id)s", {'sg_id': sg_id, 'vnic_id': vnic_id}) except Exception: with excutils.save_and_reraise_exception(): LOG.error("NSX security group %(sg_id)s member add " "failed %(vnic_id)s.", {'sg_id': sg_id, 'vnic_id': vnic_id}) def _add_security_groups_port_mapping(self, session, vnic_id, added_sgids): if vnic_id is None or added_sgids is None: return for add_sg in added_sgids: nsx_sg_id = nsx_db.get_nsx_security_group_id(session, add_sg, moref=True) if nsx_sg_id is None: LOG.warning("NSX security group not found for %s", add_sg) else: self._add_member_to_security_group(nsx_sg_id, vnic_id) def _remove_member_from_security_group(self, sg_id, vnic_id): with locking.LockManager.get_lock('neutron-security-ops' + str(sg_id)): try: h, c = self.nsx_v.vcns.remove_member_from_security_group( sg_id, vnic_id) except Exception: LOG.debug("NSX security group %(nsx_sg_id)s member " "delete failed %(vnic_id)s", {'nsx_sg_id': sg_id, 'vnic_id': vnic_id}) def _delete_security_groups_port_mapping(self, session, vnic_id, deleted_sgids): if vnic_id is None or deleted_sgids is None: return # Remove vnic from delete security groups binding for del_sg in deleted_sgids: nsx_sg_id = nsx_db.get_nsx_security_group_id(session, del_sg, moref=True) if nsx_sg_id is None: LOG.warning("NSX security group not found for %s", del_sg) else: self._remove_member_from_security_group(nsx_sg_id, vnic_id) def _update_security_groups_port_mapping(self, session, port_id, vnic_id, current_sgids, new_sgids): new_sgids = new_sgids or [] current_sgids = current_sgids or [] # If no vnic binding is found, nothing can be done, so return if vnic_id is None: return deleted_sgids = set() added_sgids = set() # Find all delete security group from port binding for curr_sg in current_sgids: if curr_sg not in new_sgids: deleted_sgids.add(curr_sg) # Find all added security group from port binding for new_sg in new_sgids: if new_sg not in current_sgids: added_sgids.add(new_sg) self._delete_security_groups_port_mapping(session, vnic_id, deleted_sgids) self._add_security_groups_port_mapping(session, vnic_id, added_sgids) def _get_port_vnic_id(self, port_index, device_id): # The vnic-id format which is expected by NSXv return '%s.%03d' % (device_id, port_index) def init_availability_zones(self): self._availability_zones_data = nsx_az.NsxVAvailabilityZones( use_tvd_config=self._is_sub_plugin) def _list_availability_zones(self, context, filters=None): result = {} for az in self.get_azs_names(): # Add this availability zone as a router & network resource if filters: if 'name' in filters and az not in filters['name']: continue for res in ['network', 'router']: if 'resource' not in filters or res in filters['resource']: result[(az, res)] = True return result def _validate_availability_zones_in_obj(self, context, resource_type, obj_data): if az_def.AZ_HINTS in obj_data: self.validate_availability_zones(context, resource_type, obj_data[az_def.AZ_HINTS], force=True) def validate_availability_zones(self, context, resource_type, availability_zones, force=False): """Verify that the availability zones exist, and only 1 hint was set. """ # This method is called directly from this plugin but also from # registered callbacks if self._is_sub_plugin and not force: # validation should be done together for both plugins return return self.validate_obj_azs(availability_zones) def _prepare_spoofguard_policy(self, network_type, net_data, net_morefs): # The method will determine if a portgroup is already assigned to a # spoofguard policy. If so, it will return the predefined policy. If # not a new spoofguard policy will be created if network_type == c_utils.NsxVNetworkTypes.PORTGROUP: pcs = self.nsx_v.vcns.get_spoofguard_policies()[1].get('policies', []) for policy in pcs: for ep in policy['enforcementPoints']: if ep['id'] == net_morefs[0]: return policy['policyId'], True LOG.warning("No spoofguard policy will be created for %s", net_data['id']) return None, False # Always use enabled spoofguard policy. ports with disabled port # security will be added to the exclude list sg_policy_id = self.nsx_v.vcns.create_spoofguard_policy( net_morefs, net_data['id'], True)[1] return sg_policy_id, False def _get_physical_network(self, network_type, net_data): if network_type == c_utils.NsxVNetworkTypes.VXLAN: return self._get_network_vdn_scope_id(net_data) else: # Use the dvs_id of the availability zone return self._get_network_az_dvs_id(net_data) def _generate_segment_id(self, context, physical_network, net_data): bindings = nsxv_db.get_network_bindings_by_physical_net( context.session, physical_network) vlan_ranges = self._network_vlans.get(physical_network, []) if vlan_ranges: vlan_ids = set() for vlan_min, vlan_max in vlan_ranges: vlan_ids |= set(moves.range(vlan_min, vlan_max + 1)) else: vlan_min = constants.MIN_VLAN_TAG vlan_max = constants.MAX_VLAN_TAG vlan_ids = set(moves.range(vlan_min, vlan_max + 1)) used_ids_in_range = set([binding.vlan_id for binding in bindings if binding.vlan_id in vlan_ids]) free_ids = list(vlan_ids ^ used_ids_in_range) if len(free_ids) == 0: raise n_exc.NoNetworkAvailable() net_data[mpnet_apidef.SEGMENTS][0][pnet.SEGMENTATION_ID] = free_ids[0] def create_network(self, context, network): net_data = network['network'] tenant_id = net_data['tenant_id'] self._ensure_default_security_group(context, tenant_id) # Process the provider network extension provider_type = self._convert_to_transport_zones_dict(net_data) self._validate_provider_create(context, net_data) self._validate_availability_zones_in_obj(context, 'network', net_data) net_data['id'] = str(uuidutils.generate_uuid()) external = net_data.get(extnet_apidef.EXTERNAL) backend_network = (not validators.is_attr_set(external) or validators.is_attr_set(external) and not external) network_type = None generate_segmenation_id = False lock_vlan_creation = False if provider_type is not None: segment = net_data[mpnet_apidef.SEGMENTS][0] network_type = segment.get(pnet.NETWORK_TYPE) if network_type == c_utils.NsxVNetworkTypes.VLAN: physical_network = segment.get(pnet.PHYSICAL_NETWORK) if physical_network in self._network_vlans: lock_vlan_creation = True if not validators.is_attr_set( segment.get(pnet.SEGMENTATION_ID)): generate_segmenation_id = True if lock_vlan_creation: with locking.LockManager.get_lock( 'vlan-networking-%s' % physical_network): if generate_segmenation_id: self._generate_segment_id(context, physical_network, net_data) else: segmentation_id = segment.get(pnet.SEGMENTATION_ID) if nsxv_db.get_network_bindings_by_ids(context.session, segmentation_id, physical_network): raise n_exc.VlanIdInUse( vlan_id=segmentation_id, physical_network=physical_network) return self._create_network(context, network, net_data, provider_type, external, backend_network, network_type) else: return self._create_network(context, network, net_data, provider_type, external, backend_network, network_type) def _create_network(self, context, network, net_data, provider_type, external, backend_network, network_type): # A external network should be created in the case that we have a flat, # vlan or vxlan network. For port groups we do not make any changes. external_backend_network = ( external and provider_type is not None and network_type != c_utils.NsxVNetworkTypes.PORTGROUP) self._validate_network_qos(context, net_data, backend_network) # Update the transparent vlan if configured vlt = False if extensions.is_extension_supported(self, 'vlan-transparent'): vlt = vlan_apidef.get_vlan_transparent(net_data) if backend_network or external_backend_network: #NOTE(abhiraut): Consider refactoring code below to have more # readable conditions. if (provider_type is None or network_type == c_utils.NsxVNetworkTypes.VXLAN): virtual_wire = {"name": net_data['id'], "tenantId": "virtual wire tenant"} if vlt: virtual_wire["guestVlanAllowed"] = True config_spec = {"virtualWireCreateSpec": virtual_wire} vdn_scope_id = self._get_network_vdn_scope_id(net_data) if provider_type is not None: segment = net_data[mpnet_apidef.SEGMENTS][0] if validators.is_attr_set( segment.get(pnet.PHYSICAL_NETWORK)): vdn_scope_id = segment.get(pnet.PHYSICAL_NETWORK) if not (self.nsx_v.vcns. validate_vdn_scope(vdn_scope_id)): raise nsx_exc.NsxResourceNotFound( res_name='vdn_scope_id', res_id=vdn_scope_id) h, c = self.nsx_v.vcns.create_virtual_wire(vdn_scope_id, config_spec) net_morefs = [c] dvs_net_ids = [net_data['id']] elif network_type == c_utils.NsxVNetworkTypes.PORTGROUP: if vlt: raise NotImplementedError(_("Transparent support only " "for VXLANs")) segment = net_data[mpnet_apidef.SEGMENTS][0] net_morefs = [segment.get(pnet.PHYSICAL_NETWORK)] dvs_net_ids = [net_data['name']] else: segment = net_data[mpnet_apidef.SEGMENTS][0] physical_network = segment.get(pnet.PHYSICAL_NETWORK) # Retrieve the list of dvs-ids from physical network. # If physical_network attr is not set, retrieve a list # consisting of a single dvs-id pre-configured in nsx.ini az_dvs = self._get_network_az_dvs_id(net_data) dvs_ids = self._get_dvs_ids(physical_network, az_dvs) dvs_net_ids = [] # Save the list of netmorefs from the backend net_morefs = [] dvs_pg_mappings = {} for dvs_id in dvs_ids: try: net_moref = self._create_vlan_network_at_backend( dvs_id=dvs_id, net_data=net_data) except nsx_exc.NsxPluginException: with excutils.save_and_reraise_exception(): # Delete VLAN networks on other DVSes if it # fails to be created on one DVS and reraise # the original exception. for dvsmoref, netmoref in six.iteritems( dvs_pg_mappings): self._delete_backend_network( netmoref, dvsmoref) dvs_pg_mappings[dvs_id] = net_moref net_morefs.append(net_moref) dvs_net_ids.append(self._get_vlan_network_name( net_data, dvs_id)) if vlt: try: self._vcm.update_port_groups_config( dvs_id, net_data['id'], net_moref, self._vcm.update_port_group_spec_trunk, {}) except Exception: with excutils.save_and_reraise_exception(): # Delete VLAN networks on other DVSes if it # fails to be created on one DVS and reraise # the original exception. for dvsmoref, netmoref in six.iteritems( dvs_pg_mappings): self._delete_backend_network( netmoref, dvsmoref) try: net_data[psec.PORTSECURITY] = net_data.get(psec.PORTSECURITY, True) if not cfg.CONF.nsxv.spoofguard_enabled: LOG.info("Network %s will have port security disabled", net_data['id']) net_data[psec.PORTSECURITY] = False # Create SpoofGuard policy for network anti-spoofing # allow_multiple_addresses will be overridden in case the user # requires allowing multiple or cidr-based allowed address pairs # defined per port but doesn't want to disable spoofguard globally sg_policy_id = None allow_multiple_addresses = (not net_data[psec.PORTSECURITY] and cfg.CONF. nsxv.allow_multiple_ip_addresses) if (cfg.CONF.nsxv.spoofguard_enabled and backend_network and not allow_multiple_addresses): # This variable is set as the method below may result in a # exception and we may need to rollback predefined = False sg_policy_id, predefined = self._prepare_spoofguard_policy( network_type, net_data, net_morefs) with db_api.CONTEXT_WRITER.using(context): new_net = super(NsxVPluginV2, self).create_network(context, network) self._extension_manager.process_create_network( context, net_data, new_net) # Process port security extension self._process_network_port_security_create( context, net_data, new_net) if vlt: super(NsxVPluginV2, self).update_network(context, new_net['id'], {'network': {'vlan_transparent': vlt}}) # update the network with the availability zone hints if az_def.AZ_HINTS in net_data: az_hints = az_validator.convert_az_list_to_string( net_data[az_def.AZ_HINTS]) super(NsxVPluginV2, self).update_network(context, new_net['id'], {'network': {az_def.AZ_HINTS: az_hints}}) new_net[az_def.AZ_HINTS] = az_hints # still no availability zones until subnets creation new_net[az_def.COLLECTION_NAME] = [] # DB Operations for setting the network as external self._process_l3_create(context, new_net, net_data) if (net_data.get(mpnet_apidef.SEGMENTS) and isinstance(provider_type, bool)): net_bindings = [] for tz in net_data[mpnet_apidef.SEGMENTS]: network_type = tz.get(pnet.NETWORK_TYPE) segmentation_id = tz.get(pnet.SEGMENTATION_ID, 0) segmentation_id_set = validators.is_attr_set( segmentation_id) if not segmentation_id_set: segmentation_id = 0 physical_network = tz.get(pnet.PHYSICAL_NETWORK, '') physical_net_set = validators.is_attr_set( physical_network) if not physical_net_set: if external_backend_network: physical_network = net_morefs[0] else: physical_network = self._get_physical_network( network_type, net_data) net_bindings.append(nsxv_db.add_network_binding( context.session, new_net['id'], network_type, physical_network, segmentation_id)) if provider_type: nsx_db.set_multiprovider_network(context.session, new_net['id']) self._extend_network_dict_provider(context, new_net, provider_type, net_bindings) if backend_network or external_backend_network: # Save moref in the DB for future access if (network_type == c_utils.NsxVNetworkTypes.VLAN or network_type == c_utils.NsxVNetworkTypes.FLAT): # Save netmoref to dvs id mappings for VLAN network # type for future access. for dvs_id, netmoref in six.iteritems(dvs_pg_mappings): nsx_db.add_neutron_nsx_network_mapping( session=context.session, neutron_id=new_net['id'], nsx_switch_id=netmoref, dvs_id=dvs_id) else: for net_moref in net_morefs: nsx_db.add_neutron_nsx_network_mapping( context.session, new_net['id'], net_moref) if (cfg.CONF.nsxv.spoofguard_enabled and backend_network and sg_policy_id): nsxv_db.map_spoofguard_policy_for_network( context.session, new_net['id'], sg_policy_id) except Exception: with excutils.save_and_reraise_exception(): # Delete the backend network if backend_network or external_backend_network: if (cfg.CONF.nsxv.spoofguard_enabled and sg_policy_id and not predefined): self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id) # Ensure that an predefined portgroup will not be deleted if network_type == c_utils.NsxVNetworkTypes.VXLAN: for net_moref in net_morefs: self._delete_backend_network(net_moref) elif (network_type and network_type != c_utils.NsxVNetworkTypes.PORTGROUP): for dvsmrf, netmrf in six.iteritems(dvs_pg_mappings): self._delete_backend_network(netmrf, dvsmrf) LOG.exception('Failed to create network') # If init is incomplete calling _update_qos_network() will result a # deadlock. # That situation happens when metadata init is creating a network # on its 1st execution. # Therefore we skip this code during init. if backend_network and self.init_is_complete: # Update the QOS restrictions of the backend network self._update_qos_on_created_network(context, net_data, new_net) # this extra lookup is necessary to get the # latest db model for the extension functions net_model = self._get_network(context, new_net['id']) resource_extend.apply_funcs('networks', new_net, net_model) return new_net def _update_qos_on_created_network(self, context, net_data, new_net): qos_policy_id = qos_com_utils.set_qos_policy_on_new_net( context, net_data, new_net) if qos_policy_id: # update the QoS data on the backend self._update_qos_on_backend_network( context, net_data['id'], qos_policy_id) def _update_qos_on_backend_network(self, context, net_id, qos_policy_id): # Translate the QoS rule data into Nsx values qos_data = qos_utils.NsxVQosRule( context=context, qos_policy_id=qos_policy_id) # default dvs for this network az = self.get_network_az_by_net_id(context, net_id) az_dvs_id = az.dvs_id # get the network moref/s from the db net_mappings = nsx_db.get_nsx_network_mappings( context.session, net_id) for mapping in net_mappings: # update the qos restrictions of the network self._vcm.update_port_groups_config( mapping.dvs_id or az_dvs_id, net_id, mapping.nsx_id, self._vcm.update_port_group_spec_qos, qos_data) def _cleanup_dhcp_edge_before_deletion(self, context, net_id): if self.metadata_proxy_handler: # Find if this is the last network which is bound # to DHCP Edge. If it is - cleanup Edge metadata config dhcp_edge = nsxv_db.get_dhcp_edge_network_binding( context.session, net_id) if dhcp_edge: edge_vnics = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, dhcp_edge['edge_id']) # If the DHCP Edge is connected to two networks: # the deleted network and the inter-edge network, we can delete # the inter-edge interface if len(edge_vnics) == 2: rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge( context.session, dhcp_edge['edge_id']) if rtr_binding: rtr_id = rtr_binding['router_id'] az_name = rtr_binding['availability_zone'] md_proxy = self.get_metadata_proxy_handler(az_name) if md_proxy: md_proxy.cleanup_router_edge(context, rtr_id) else: self.edge_manager.reconfigure_shared_edge_metadata_port( context, (vcns_const.DHCP_EDGE_PREFIX + net_id)[:36]) def _is_neutron_spoofguard_policy(self, net_id, moref, policy_id): # A neutron policy will have the network UUID as the name of the # policy try: policy = self.nsx_v.vcns.get_spoofguard_policy(policy_id)[1] except Exception: LOG.error("Policy does not exists for %s", policy_id) # We will not attempt to delete a policy that does not exist return False if policy: for ep in policy['enforcementPoints']: if ep['id'] == moref and policy['name'] == net_id: return True return False def _validate_internal_network(self, context, network_id): if nsxv_db.get_nsxv_internal_network_by_id( context.elevated().session, network_id): msg = (_("Cannot delete internal network %s or its subnets and " "ports") % network_id) raise n_exc.InvalidInput(error_message=msg) def delete_network(self, context, id): mappings = nsx_db.get_nsx_network_mappings(context.session, id) bindings = nsxv_db.get_network_bindings(context.session, id) if cfg.CONF.nsxv.spoofguard_enabled: sg_policy_id = nsxv_db.get_spoofguard_policy_id( context.session, id) self._validate_internal_network(context, id) # Update the DHCP edge for metadata and clean the vnic in DHCP edge # if there is only no other existing port besides DHCP port filters = {'network_id': [id]} ports = self.get_ports(context, filters=filters) auto_del = [p['id'] for p in ports if p['device_owner'] in [constants.DEVICE_OWNER_DHCP]] is_dhcp_backend_deleted = False if auto_del: filters = {'network_id': [id], 'enable_dhcp': [True]} sids = self.get_subnets(context, filters=filters, fields=['id']) if len(sids) > 0: try: self._cleanup_dhcp_edge_before_deletion(context, id) self.edge_manager.delete_dhcp_edge_service(context, id) is_dhcp_backend_deleted = True except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to delete network') for port_id in auto_del: try: self.delete_port(context.elevated(), port_id, force_delete_dhcp=True) except Exception as e: LOG.warning('Unable to delete port %(port_id)s. ' 'Reason: %(e)s', {'port_id': port_id, 'e': e}) with db_api.CONTEXT_WRITER.using(context): self._process_l3_delete(context, id) # We would first delete subnet db if the backend dhcp service is # deleted in case of entering delete_subnet logic and retrying # to delete backend dhcp service again. if is_dhcp_backend_deleted: subnets = self._get_subnets_by_network(context, id) for subnet in subnets: self.base_delete_subnet(context, subnet['id']) super(NsxVPluginV2, self).delete_network(context, id) # Do not delete a predefined port group that was attached to # an external network if (bindings and bindings[0].binding_type == c_utils.NsxVNetworkTypes.PORTGROUP): if cfg.CONF.nsxv.spoofguard_enabled and sg_policy_id: if self._is_neutron_spoofguard_policy(id, mappings[0].nsx_id, sg_policy_id): self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id) return # Delete the backend network if necessary. This is done after # the base operation as that may throw an exception in the case # that there are ports defined on the network. if mappings: if cfg.CONF.nsxv.spoofguard_enabled and sg_policy_id: self.nsx_v.vcns.delete_spoofguard_policy(sg_policy_id) edge_utils.check_network_in_use_at_backend(context, id) for mapping in mappings: self._delete_backend_network( mapping.nsx_id, mapping.dvs_id) def _extend_get_network_dict_provider(self, context, net): self._extend_network_dict_provider(context, net) net[qos_consts.QOS_POLICY_ID] = qos_com_utils.get_network_policy_id( context, net['id']) def get_network(self, context, id, fields=None): with db_api.CONTEXT_READER.using(context): # goto to the plugin DB and fetch the network network = self._get_network(context, id) # Don't do field selection here otherwise we won't be able # to add provider networks fields net_result = self._make_network_dict(network, context=context) self._extend_get_network_dict_provider(context, net_result) return db_utils.resource_fields(net_result, fields) def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} with db_api.CONTEXT_READER.using(context): networks = ( super(NsxVPluginV2, self).get_networks( context, filters, fields, sorts, limit, marker, page_reverse)) for net in networks: self._extend_get_network_dict_provider(context, net) return (networks if not fields else [db_utils.resource_fields(network, fields) for network in networks]) def _raise_if_updates_provider_attributes(self, original_network, attrs, az_dvs): """Raise exception if provider attributes are present. For the NSX-V we want to allow changing the physical network of vlan type networks. """ if (original_network.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN and validators.is_attr_set( attrs.get(pnet.PHYSICAL_NETWORK)) and not validators.is_attr_set( attrs.get(pnet.NETWORK_TYPE)) and not validators.is_attr_set( attrs.get(pnet.SEGMENTATION_ID))): return c_utils.raise_if_updates_provider_attributes(attrs) def _update_vlan_network_dvs_ids(self, context, network, new_physical_network, az_dvs): """Update the dvs ids of a vlan provider network The new values will replace the old ones. Actions done in this function: - Create a backend network for each new dvs - Delete the backend networks for the old ones. - Return the relevant information in order to later also update the spoofguard policy, qos, network object and DB Returns: - dvs_list_changed True/False - dvs_pg_mappings - updated mapping of the elements dvs->moref """ dvs_pg_mappings = {} current_dvs_ids = set(self._get_dvs_ids( network[pnet.PHYSICAL_NETWORK], az_dvs)) new_dvs_ids = set(self._get_dvs_ids( new_physical_network, az_dvs)) additional_dvs_ids = new_dvs_ids - current_dvs_ids removed_dvs_ids = current_dvs_ids - new_dvs_ids if not additional_dvs_ids and not removed_dvs_ids: # no changes in the list of DVS return False, dvs_pg_mappings self._convert_to_transport_zones_dict(network) # get the current mapping as in the DB db_mapping = nsx_db.get_nsx_network_mappings( context.session, network['id']) for db_map in db_mapping: dvs_pg_mappings[db_map.dvs_id] = db_map.nsx_id # delete old backend networks for dvs_id in removed_dvs_ids: nsx_id = dvs_pg_mappings.get(dvs_id) if nsx_id: #Note(asarfaty) This may fail if there is a vm deployed, but # since the delete is done offline we will not catch it here self._delete_backend_network(nsx_id, dvs_id) del dvs_pg_mappings[dvs_id] # create all the new backend networks for dvs_id in additional_dvs_ids: try: net_moref = self._create_vlan_network_at_backend( dvs_id=dvs_id, net_data=network) except nsx_exc.NsxPluginException: with excutils.save_and_reraise_exception(): # Delete VLAN networks on other DVSes if it # fails to be created on one DVS and reraise # the original exception. for dvsmoref, netmoref in six.iteritems(dvs_pg_mappings): self._delete_backend_network(netmoref, dvsmoref) dvs_pg_mappings[dvs_id] = net_moref return True, dvs_pg_mappings def _update_network_validate_port_sec(self, context, net_id, net_attrs): if psec.PORTSECURITY in net_attrs and not net_attrs[psec.PORTSECURITY]: # check if there are compute ports on this network port_filters = {'network_id': [net_id], 'device_owner': ['compute:None']} compute_ports = self.get_ports(context, filters=port_filters) if compute_ports: LOG.warning("Disabling port-security on network %s would " "require instance in the network to have VM tools " "installed in order for security-groups to " "function properly.", net_id) def allow_multiple_addresses_configure_spoofguard(self, context, id, net_attrs, net_morefs): # User requires multiple addresses to be assigned to compute port # and therefore, the spoofguard policy is being removed for this net. orig_net = self.get_network(context, id) if not net_attrs[psec.PORTSECURITY]: sg_policy = nsxv_db.get_spoofguard_policy_id(context.session, orig_net['id']) if sg_policy: try: self.nsx_v.vcns.delete_spoofguard_policy(sg_policy) nsxv_db.del_nsxv_spoofguard_binding(context.session, sg_policy) except Exception as e: LOG.error('Unable to delete spoofguard policy ' '%(sg_policy)s. Error: %(e)s', {'sg_policy': sg_policy, 'e': e}) else: LOG.warning("Could not locate spoofguard policy for " "network %s", id) # User requires port-security-enabled set to True and thus requires # spoofguard installed for this network else: # Verifying that all ports are legal, i.e. not CIDR/subnet, and # that the same IP address is not used multiple times for a given # neutron network filters = {'network_id': [id]} ports = self.get_ports(context, filters=filters) valid_ports = [] ip_addresses = set() if ports: for port in ports: for ap in port[addr_apidef.ADDRESS_PAIRS]: if len(ap['ip_address'].split('/')) > 1: msg = _('Port %s contains CIDR/subnet, ' 'which is not supported at the ' 'backend ') % port['id'] raise n_exc.BadRequest( resource='networks', msg=msg) else: set_len = len(ip_addresses) ip_addresses.add(ap['ip_address']) if len(ip_addresses) == set_len: msg = _('IP address %(ip)s is allowed ' 'by more than 1 logical port. ' 'This is not supported by the ' 'backend. Port security cannot ' 'be enabled for network %(net)s') % { 'ip': ap['ip_address'], 'net': id} LOG.error(msg) raise n_exc.BadRequest( resource='networks', msg=msg) valid_ports.append(port) try: sg_policy_id, predefined = ( self._prepare_spoofguard_policy( orig_net.get(pnet.NETWORK_TYPE), orig_net, net_morefs)) if sg_policy_id: nsxv_db.map_spoofguard_policy_for_network( context.session, orig_net['id'], sg_policy_id) except Exception as e: msg = _('Unable to create spoofguard policy, error: %(' 'error)s, ' 'net_morefs=%(net_morefs)s, network_id= %(' 'network_id)s') % {'error': e, 'net_morefs': net_morefs, 'network_id': orig_net} raise n_exc.BadRequest(resource='spoofguard policy', msg=msg) try: for port in valid_ports: vnic_idx = port.get(ext_vnic_idx.VNIC_INDEX) device_id = port['device_id'] vnic_id = self._get_port_vnic_id(vnic_idx, device_id) self._update_vnic_assigned_addresses(context.session, port, vnic_id) except Exception as e: msg = _('Unable to add port to spoofguard policy error ' '%s') % e raise n_exc.BadRequest(resource='spoofguard policy', msg=msg) def update_network(self, context, id, network): net_attrs = network['network'] orig_net = self.get_network(context, id) az_dvs = self._get_network_az_dvs_id(orig_net) self._raise_if_updates_provider_attributes( orig_net, net_attrs, az_dvs) if net_attrs.get("admin_state_up") is False: raise NotImplementedError(_("admin_state_up=False networks " "are not supported.")) ext_net = self._get_network(context, id) if not ext_net.external: net_morefs = nsx_db.get_nsx_switch_ids(context.session, id) else: net_morefs = [] backend_network = True if len(net_morefs) > 0 else False self._validate_network_qos(context, net_attrs, backend_network) # PortSecurity validation checks psec_update = (psec.PORTSECURITY in net_attrs and orig_net[psec.PORTSECURITY] != net_attrs[psec.PORTSECURITY]) if psec_update: self._update_network_validate_port_sec(context, id, net_attrs) # Change spoofguard accordingly - either remove if # port-security-enabled was set to false or add (with relevant ports) # if set to true. if (cfg.CONF.nsxv.spoofguard_enabled and cfg.CONF.nsxv.allow_multiple_ip_addresses and psec_update): self.allow_multiple_addresses_configure_spoofguard(context, id, net_attrs, net_morefs) # Check if the physical network of a vlan provider network was updated updated_morefs = False if (net_attrs.get(pnet.PHYSICAL_NETWORK) and orig_net.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN): (updated_morefs, new_dvs_pg_mappings) = self._update_vlan_network_dvs_ids( context, orig_net, net_attrs[pnet.PHYSICAL_NETWORK], az_dvs) if updated_morefs: net_morefs = list(new_dvs_pg_mappings.values()) with db_api.CONTEXT_WRITER.using(context): net_res = super(NsxVPluginV2, self).update_network(context, id, network) self._extension_manager.process_update_network(context, net_attrs, net_res) self._process_network_port_security_update( context, net_attrs, net_res) self._process_l3_update(context, net_res, net_attrs) self._extend_network_dict_provider(context, net_res) if updated_morefs: # delete old mapping before recreating all nsx_db.delete_neutron_nsx_network_mapping( session=context.session, neutron_id=id) # Save netmoref to dvs id mappings for VLAN network # type for future access. dvs_ids = [] for dvs_id, netmoref in six.iteritems(new_dvs_pg_mappings): nsx_db.add_neutron_nsx_network_mapping( session=context.session, neutron_id=id, nsx_switch_id=netmoref, dvs_id=dvs_id) dvs_ids.append(dvs_id) all_dvs = ', '.join(sorted(dvs_ids)) net_res[pnet.PHYSICAL_NETWORK] = all_dvs vlan_id = net_res.get(pnet.SEGMENTATION_ID) nsxv_db.update_network_binding_phy_uuid( context.session, id, net_res.get(pnet.NETWORK_TYPE), vlan_id, all_dvs) # Updating SpoofGuard policy if exists, on failure revert to network # old state if (not ext_net.external and cfg.CONF.nsxv.spoofguard_enabled and updated_morefs): policy_id = nsxv_db.get_spoofguard_policy_id(context.session, id) try: # Always use enabled spoofguard policy. ports with disabled # port security will be added to the exclude list self.nsx_v.vcns.update_spoofguard_policy( policy_id, net_morefs, id, True) except Exception: with excutils.save_and_reraise_exception(): revert_update = db_utils.resource_fields( orig_net, ['shared', psec.PORTSECURITY]) self._process_network_port_security_update( context, revert_update, net_res) super(NsxVPluginV2, self).update_network( context, id, {'network': revert_update}) # Handle QOS updates (Value can be None, meaning to delete the # current policy), or moref updates with an existing qos policy if (not ext_net.external and (qos_consts.QOS_POLICY_ID in net_attrs) or (updated_morefs and orig_net.get(qos_consts.QOS_POLICY_ID))): # update the qos data qos_policy_id = (net_attrs[qos_consts.QOS_POLICY_ID] if qos_consts.QOS_POLICY_ID in net_attrs else orig_net.get(qos_consts.QOS_POLICY_ID)) self._update_qos_on_backend_network(context, id, qos_policy_id) # attach the policy to the network in neutron DB qos_com_utils.update_network_policy_binding( context, id, qos_policy_id) net_res[qos_consts.QOS_POLICY_ID] = ( qos_com_utils.get_network_policy_id(context, id)) # Handle case of network name update - this only is relevant for # networks that we create - not portgroup providers if (net_attrs.get('name') and orig_net.get('name') != net_attrs.get('name') and (orig_net.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.VLAN or orig_net.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.FLAT)): # Only update networks created by plugin mappings = nsx_db.get_nsx_network_mappings(context.session, id) for mapping in mappings: network_name = self._get_vlan_network_name(net_res, mapping.dvs_id) try: self._vcm.update_port_groups_config( mapping.dvs_id, id, mapping.nsx_id, self._dvs.update_port_group_spec_name, network_name) except Exception as e: LOG.error('Unable to update name for net %(net_id)s. ' 'Error: %(e)s', {'net_id': id, 'e': e}) return net_res def _validate_unique_address_pair_across_network(self, context, port, address_pairs): network_id = port['network_id'] filters = {'network_id': [network_id]} valid_existing_ports = [] existing_fixed_and_addr_pairs = [] for exist_port in self.get_ports(context, filters=filters): if exist_port['id'] != port['id']: valid_existing_ports.append(exist_port) for valid_port in valid_existing_ports: for fixed in valid_port.get('fixed_ips', []): existing_fixed_and_addr_pairs.append(fixed['ip_address']) for addr_pair in valid_port.get('allowed_address_pairs', []): existing_fixed_and_addr_pairs.append(addr_pair['ip_address']) fixed_ips_list = port.get('fixed_ips', []) # validate ip collision with fixed ips for fixed_ip in fixed_ips_list: ip = fixed_ip.get('ip_address') if ip in existing_fixed_and_addr_pairs: msg = _('IP address %s entered as fixed ip already ' 'exists in the network. Duplicate IP addresses is not ' 'supported at backend') % ip raise n_exc.InvalidInput(error_message=msg) # validate ip collision with address pair for pair in address_pairs: ip = pair.get('ip_address') if ip in existing_fixed_and_addr_pairs: msg = _('IP address %s entered as address pair already ' 'exists in the network. Duplicate IP addresses is not ' 'supported at backend') % ip raise n_exc.InvalidInput(error_message=msg) def _verify_cidr_defined(self, attrs): for ap in attrs[addr_apidef.ADDRESS_PAIRS]: # Check that the IP address is a subnet if len(ap['ip_address'].split('/')) > 1: msg = _('NSXv does not support CIDR as address pairs') raise n_exc.BadRequest(resource='address_pairs', msg=msg) def _validate_address_pairs(self, context, attrs, db_port): # Ground rule - if spoofguard exists: all tests must take place. policy_id = nsxv_db.get_spoofguard_policy_id(context.session, db_port['network_id']) if policy_id: self._validate_unique_address_pair_across_network( context, db_port, attrs[addr_apidef.ADDRESS_PAIRS]) self._verify_cidr_defined(attrs) # Check that the MAC address is the same as the port for ap in attrs[addr_apidef.ADDRESS_PAIRS]: if ('mac_address' in ap and ap['mac_address'] != db_port['mac_address']): msg = _('Address pairs should have same MAC as the ' 'port') raise n_exc.BadRequest(resource='address_pairs', msg=msg) def _is_mac_in_use(self, context, network_id, mac_address): # Override this method as the backed doesn't support using the same # mac twice on any network, not just this specific network admin_ctx = context.elevated() return bool(admin_ctx.session.query(models_v2.Port). filter(models_v2.Port.mac_address == mac_address). count()) @db_api.retry_db_errors def base_create_port(self, context, port): created_port = super(NsxVPluginV2, self).create_port(context, port) self._extension_manager.process_create_port( context, port['port'], created_port) return created_port def _validate_extra_dhcp_options(self, opts): if not opts: return for opt in opts: opt_name = opt['opt_name'] opt_val = opt['opt_value'] if opt_name == 'classless-static-route': # separate validation for option121 if opt_val is not None: try: net, ip = opt_val.split(',') except Exception: msg = (_("Bad value %(val)s for DHCP option " "%(name)s") % {'name': opt_name, 'val': opt_val}) raise n_exc.InvalidInput(error_message=msg) elif opt_name not in vcns_const.SUPPORTED_DHCP_OPTIONS: try: option = int(opt_name) except ValueError: option = 255 if option >= 255: msg = (_("DHCP option %s is not supported") % opt_name) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _validate_port_qos(self, port): if validators.is_attr_set(port.get(qos_consts.QOS_POLICY_ID)): err_msg = (_("Cannot configure QOS directly on ports")) raise n_exc.InvalidInput(error_message=err_msg) def _assert_on_lb_port_admin_state(self, port_data, original_port, device_owner): if device_owner in [constants.DEVICE_OWNER_LOADBALANCERV2, oct_const.DEVICE_OWNER_OCTAVIA]: orig_state = original_port.get("admin_state_up") new_state = port_data.get("admin_state_up") if new_state is not None and (orig_state != new_state): err_msg = _("Changing admin_state for " "loadbalancer's internal port is not supported") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def create_port(self, context, port): port_data = port['port'] dhcp_opts = port_data.get(ext_edo.EXTRADHCPOPTS) self._validate_extra_dhcp_options(dhcp_opts) self._validate_max_ips_per_port(port_data.get('fixed_ips', []), port_data.get('device_owner')) self._validate_port_qos(port_data) direct_vnic_type = self._validate_port_vnic_type( context, port_data, port_data['network_id']) with db_api.CONTEXT_WRITER.using(context): # First we allocate port in neutron database neutron_db = super(NsxVPluginV2, self).create_port(context, port) self._extension_manager.process_create_port( context, port_data, neutron_db) # Port port-security is decided based on port's vnic_type and ports # network port-security state (unless explicitly requested # differently by the user). if not cfg.CONF.nsxv.spoofguard_enabled: port_security = False else: port_security = port_data.get(psec.PORTSECURITY) if validators.is_attr_set(port_security): # 'direct' and 'direct-physical' vnic types ports requires # port-security to be disabled. if direct_vnic_type and port_security: err_msg = _("Security features are not supported for " "ports with direct/direct-physical VNIC type") raise n_exc.InvalidInput(error_message=err_msg) elif direct_vnic_type: # Implicitly disable port-security for direct vnic types. port_security = False else: port_security = self._get_network_security_binding( context, neutron_db['network_id']) port_data[psec.PORTSECURITY] = port_security provider_sg_specified = (validators.is_attr_set( port_data.get(provider_sg.PROVIDER_SECURITYGROUPS)) and port_data[provider_sg.PROVIDER_SECURITYGROUPS] != []) has_security_groups = ( self._check_update_has_security_groups(port)) self._process_port_port_security_create( context, port_data, neutron_db) self._process_portbindings_create_and_update( context, port_data, neutron_db) # Update fields obtained from neutron db (eg: MAC address) port["port"].update(neutron_db) has_ip = self._ip_on_port(neutron_db) # allowed address pair checks attrs = port[port_def.RESOURCE_NAME] if self._check_update_has_allowed_address_pairs(port): if not port_security: raise addr_exc.AddressPairAndPortSecurityRequired() self._validate_address_pairs(context, attrs, neutron_db) else: # remove ATTR_NOT_SPECIFIED attrs[addr_apidef.ADDRESS_PAIRS] = [] # security group extension checks if has_ip and port_security: self._ensure_default_security_group_on_port(context, port) (sgids, ssgids) = self._get_port_security_groups_lists( context, port) elif (has_security_groups or provider_sg_specified): LOG.error("Port has conflicting port security status and " "security groups") raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() else: sgids = ssgids = [] self._process_port_create_security_group(context, port_data, sgids) self._process_port_create_provider_security_group(context, port_data, ssgids) neutron_db[addr_apidef.ADDRESS_PAIRS] = ( self._process_create_allowed_address_pairs( context, neutron_db, attrs.get(addr_apidef.ADDRESS_PAIRS))) self._process_port_create_extra_dhcp_opts( context, port_data, dhcp_opts) # MAC learning - only update DB. Can only update NSX when the port # exists - this is done via update if validators.is_attr_set(port_data.get(mac_ext.MAC_LEARNING)): if (((has_ip and port_security) or has_security_groups or provider_sg_specified) and port_data.get(mac_ext.MAC_LEARNING) is True): err_msg = _("Security features are not supported for " "mac learning") raise n_exc.InvalidInput(error_message=err_msg) self._create_mac_learning_state(context, port_data) elif mac_ext.MAC_LEARNING in port_data: # This is due to the fact that the default is # ATTR_NOT_SPECIFIED port_data.pop(mac_ext.MAC_LEARNING) try: # Configure NSX - this should not be done in the DB transaction # Configure the DHCP Edge service self._create_dhcp_static_binding(context, port_data) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to create port') # Revert what we have created and raise the exception self.delete_port(context, port_data['id']) # this extra lookup is necessary to get the # latest db model for the extension functions port_model = self._get_port(context, port_data['id']) resource_extend.apply_funcs('ports', port_data, port_model) self._remove_provider_security_groups_from_list(port_data) self._extend_nsx_port_dict_binding(context, port_data) kwargs = {'context': context, 'port': neutron_db} registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs) return port_data def _make_port_dict(self, port, fields=None, process_extensions=True): port_data = super(NsxVPluginV2, self)._make_port_dict( port, fields=fields, process_extensions=process_extensions) self._remove_provider_security_groups_from_list(port_data) return port_data def _get_port_subnet_mask(self, context, port): if len(port['fixed_ips']) > 0 and 'subnet_id' in port['fixed_ips'][0]: subnet_id = port['fixed_ips'][0]['subnet_id'] subnet = self._get_subnet(context, subnet_id) return str(netaddr.IPNetwork(subnet.cidr).netmask) def _get_port_fixed_ip_addr(self, port): if (len(port['fixed_ips']) > 0 and 'ip_address' in port['fixed_ips'][0]): return port['fixed_ips'][0]['ip_address'] def _count_no_sec_ports_for_device_id(self, context, device_id): """Find how many compute ports with this device ID and no security there are, so we can decide on adding / removing the device from the exclusion list """ filters = {'device_id': [device_id]} device_ports = self.get_ports(context.elevated(), filters=filters) ports = [port for port in device_ports if port['device_owner'].startswith('compute')] return len([p for p in ports if validators.is_attr_set(p.get(ext_vnic_idx.VNIC_INDEX)) and not p[psec.PORTSECURITY]]) def _add_vm_to_exclude_list(self, context, device_id, port_id): if (self._vcm and cfg.CONF.nsxv.use_exclude_list): # first time for this vm (we expect the count to be 1 already # because the DB was already updated) if (self._count_no_sec_ports_for_device_id( context, device_id) <= 1): vm_moref = self._vcm.get_vm_moref(device_id) if vm_moref is not None: try: LOG.info("Add VM %(dev)s to exclude list on " "behalf of port %(port)s: added to " "list", {"dev": device_id, "port": port_id}) self.nsx_v.vcns.add_vm_to_exclude_list(vm_moref) except vsh_exc.RequestBad as e: LOG.error("Failed to add vm %(device)s " "moref %(moref)s to exclude list: " "%(err)s", {'device': device_id, 'moref': vm_moref, 'err': e}) else: LOG.info("Add VM %(dev)s to exclude list on behalf of " "port %(port)s: VM already in list", {"dev": device_id, "port": port_id}) loose_ver = version.LooseVersion(self.nsx_v.vcns.get_version()) if loose_ver < version.LooseVersion('6.3.3'): LOG.info("Syncing firewall") self.nsx_v.vcns.sync_firewall() def _remove_vm_from_exclude_list(self, context, device_id, port_id, expected_count=0): if (self._vcm and cfg.CONF.nsxv.use_exclude_list): # No ports left in DB (expected count is 0 or 1 depending # on whether the DB was already updated), # So we can remove it from the backend exclude list if (self._count_no_sec_ports_for_device_id( context, device_id) <= expected_count): vm_moref = self._vcm.get_vm_moref(device_id) if vm_moref is not None: try: LOG.info("Remove VM %(dev)s from exclude list on " "behalf of port %(port)s: removed from " "list", {"dev": device_id, "port": port_id}) self.nsx_v.vcns.delete_vm_from_exclude_list(vm_moref) except vsh_exc.RequestBad as e: LOG.error("Failed to delete vm %(device)s " "moref %(moref)s from exclude list: " "%(err)s", {'device': device_id, 'moref': vm_moref, 'err': e}) else: LOG.info("Remove VM %(dev)s from exclude list on behalf " "of port %(port)s: other ports still in list", {"dev": device_id, "port": port_id}) def update_port(self, context, id, port): with locking.LockManager.get_lock('port-update-%s' % id): original_port = super(NsxVPluginV2, self).get_port(context, id) self._extend_get_port_dict_qos_and_binding(context, original_port) is_compute_port = self._is_compute_port(original_port) device_id = original_port['device_id'] if is_compute_port and device_id: # Lock on the device ID to make sure we do not change/delete # ports of the same device at the same time with locking.LockManager.get_lock( 'port-device-%s' % device_id): return self._update_port(context, id, port, original_port, is_compute_port, device_id) else: return self._update_port(context, id, port, original_port, is_compute_port, device_id) def _update_dhcp_address(self, context, network_id): with locking.LockManager.get_lock('dhcp-update-%s' % network_id): address_groups = self._create_network_dhcp_address_group( context, network_id) self.edge_manager.update_dhcp_edge_service( context, network_id, address_groups=address_groups) def _nsx_update_mac_learning(self, context, port): net_id = port['network_id'] # default dvs for this network az = self.get_network_az_by_net_id(context, net_id) az_dvs_id = az.dvs_id # get the network moref/s from the db net_mappings = nsx_db.get_nsx_network_mappings( context.session, net_id) for mapping in net_mappings: dvs_id = mapping.dvs_id or az_dvs_id try: self._vcm.update_port_groups_config( dvs_id, net_id, mapping.nsx_id, self._vcm.update_port_group_security_policy, True) except Exception as e: LOG.error("Unable to update network security override " "policy: %s", e) return self._vcm.update_port_security_policy( dvs_id, net_id, mapping.nsx_id, port['device_id'], port['mac_address'], port[mac_ext.MAC_LEARNING]) def _update_port(self, context, id, port, original_port, is_compute_port, device_id): attrs = port[port_def.RESOURCE_NAME] port_data = port['port'] dhcp_opts = port_data.get(ext_edo.EXTRADHCPOPTS) self._validate_extra_dhcp_options(dhcp_opts) self._validate_port_qos(port_data) if addr_apidef.ADDRESS_PAIRS in attrs: self._validate_address_pairs(context, attrs, original_port) self._validate_max_ips_per_port( port_data.get('fixed_ips', []), port_data.get('device_owner', original_port['device_owner'])) orig_has_port_security = (cfg.CONF.nsxv.spoofguard_enabled and original_port[psec.PORTSECURITY]) orig_device_owner = original_port.get('device_owner') self._assert_on_lb_port_admin_state(port_data, original_port, orig_device_owner) port_mac_change = port_data.get('mac_address') is not None port_ip_change = port_data.get('fixed_ips') is not None device_owner_change = port_data.get('device_owner') is not None # We do not support updating the port ip and device owner together if port_ip_change and device_owner_change: msg = (_('Cannot set fixed ips and device owner together for port ' '%s') % original_port['id']) raise n_exc.BadRequest(resource='port', msg=msg) # Check if port security has changed port_sec_change = False has_port_security = orig_has_port_security if (psec.PORTSECURITY in port_data and port_data[psec.PORTSECURITY] != original_port[psec.PORTSECURITY]): port_sec_change = True has_port_security = (cfg.CONF.nsxv.spoofguard_enabled and port_data[psec.PORTSECURITY]) # Address pairs require port security if (not has_port_security and (original_port[addr_apidef.ADDRESS_PAIRS] or addr_apidef.ADDRESS_PAIRS in attrs)): msg = _('Address pairs require port security enabled') raise n_exc.BadRequest(resource='port', msg=msg) # TODO(roeyc): create a method '_process_vnic_index_update' from the # following code block # Process update for vnic-index vnic_idx = port_data.get(ext_vnic_idx.VNIC_INDEX) # Only set the vnic index for a compute VM if validators.is_attr_set(vnic_idx) and is_compute_port: # Update database only if vnic index was changed if original_port.get(ext_vnic_idx.VNIC_INDEX) != vnic_idx: self._set_port_vnic_index_mapping( context, id, device_id, vnic_idx) vnic_id = self._get_port_vnic_id(vnic_idx, device_id) self._add_security_groups_port_mapping( context.session, vnic_id, original_port[ext_sg.SECURITYGROUPS] + original_port[provider_sg.PROVIDER_SECURITYGROUPS]) if has_port_security: LOG.debug("Assigning vnic port fixed-ips: port %s, " "vnic %s, with fixed-ips %s", id, vnic_id, original_port['fixed_ips']) self._update_vnic_assigned_addresses( context.session, original_port, vnic_id) if (cfg.CONF.nsxv.use_default_block_all and not original_port[ext_sg.SECURITYGROUPS]): self._add_member_to_security_group( self.sg_container_id, vnic_id) else: # Add vm to the exclusion list, since it has no port security self._add_vm_to_exclude_list(context, device_id, id) # if service insertion is enabled - add this vnic to the service # insertion security group if self._si_handler.enabled and original_port[psec.PORTSECURITY]: self._add_member_to_security_group(self._si_handler.sg_id, vnic_id) provider_sgs_specified = validators.is_attr_set( port_data.get(provider_sg.PROVIDER_SECURITYGROUPS)) delete_provider_sg = provider_sgs_specified and ( port_data[provider_sg.PROVIDER_SECURITYGROUPS] != []) delete_security_groups = self._check_update_deletes_security_groups( port) has_security_groups = self._check_update_has_security_groups(port) comp_owner_update = ('device_owner' in port_data and port_data['device_owner'].startswith('compute:')) direct_vnic_type = self._validate_port_vnic_type( context, port_data, original_port['network_id']) if direct_vnic_type and has_port_security: err_msg = _("Security features are not supported for " "ports with direct/direct-physical VNIC type") raise n_exc.InvalidInput(error_message=err_msg) old_mac_learning_state = original_port.get(mac_ext.MAC_LEARNING) if has_port_security: if ((mac_ext.MAC_LEARNING in port_data and port_data[mac_ext.MAC_LEARNING] is True) or (mac_ext.MAC_LEARNING not in port_data and old_mac_learning_state is True)): err_msg = _("Security features are not supported for " "mac_learning") raise n_exc.InvalidInput(error_message=err_msg) with db_api.CONTEXT_WRITER.using(context): ret_port = super(NsxVPluginV2, self).update_port( context, id, port) self._extension_manager.process_update_port( context, port_data, ret_port) self._process_portbindings_create_and_update( context, port_data, ret_port) # copy values over - except fixed_ips as # they've already been processed updates_fixed_ips = port['port'].pop('fixed_ips', []) ret_port.update(port['port']) has_ip = self._ip_on_port(ret_port) # checks that if update adds/modify security groups, # then port has ip and port-security if not (has_ip and has_port_security): if has_security_groups or provider_sgs_specified: LOG.error("Port has conflicting port security status and " "security groups") raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() if ((not delete_security_groups and original_port[ext_sg.SECURITYGROUPS]) or (not delete_provider_sg and original_port[provider_sg.PROVIDER_SECURITYGROUPS])): LOG.error("Port has conflicting port security status and " "security groups") raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups() if delete_security_groups or has_security_groups: self.update_security_group_on_port(context, id, port, original_port, ret_port) # NOTE(roeyc): Should call this method only after # update_security_group_on_port was called. pvd_sg_changed = self._process_port_update_provider_security_group( context, port, original_port, ret_port) update_assigned_addresses = False if addr_apidef.ADDRESS_PAIRS in attrs: update_assigned_addresses = self.update_address_pairs_on_port( context, id, port, original_port, ret_port) self._update_extra_dhcp_opts_on_port(context, id, port, ret_port) new_mac_learning_state = ret_port.get(mac_ext.MAC_LEARNING) if (new_mac_learning_state is not None and old_mac_learning_state != new_mac_learning_state): self._update_mac_learning_state(context, id, new_mac_learning_state) # update port security in DB if changed if psec.PORTSECURITY in port['port']: self._process_port_port_security_update( context, port_data, ret_port) if comp_owner_update: # Create dhcp bindings, the port is now owned by an instance self._create_dhcp_static_binding(context, ret_port) elif port_mac_change or port_ip_change or dhcp_opts: owner = original_port['device_owner'] # If port IP has changed we should update according to device # owner if is_compute_port: # This is an instance port, so re-create DHCP entry self._delete_dhcp_static_binding(context, original_port) self._create_dhcp_static_binding(context, ret_port) elif owner == constants.DEVICE_OWNER_DHCP: # Update the ip of the dhcp port # Note: if there are no fixed ips this means that we are in # the process of deleting the subnet of this port. # In this case we should avoid updating the nsx backed as the # delete subnet will soon do it. if dhcp_opts or ret_port.get('fixed_ips'): self._update_dhcp_address(context, ret_port['network_id']) elif (owner == constants.DEVICE_OWNER_ROUTER_GW or owner == constants.DEVICE_OWNER_ROUTER_INTF): # This is a router port - update the edge appliance old_ip = self._get_port_fixed_ip_addr(original_port) new_ip = self._get_port_fixed_ip_addr(ret_port) if ((old_ip is not None or new_ip is not None) and (old_ip != new_ip)): if validators.is_attr_set(original_port.get('device_id')): router_id = original_port['device_id'] router_driver = self._find_router_driver(context, router_id) # subnet mask is needed for adding new ip to the vnic sub_mask = self._get_port_subnet_mask(context, ret_port) router_driver.update_router_interface_ip( context, router_id, original_port['id'], ret_port['network_id'], old_ip, new_ip, sub_mask) else: LOG.info('Not updating fixed IP on backend for ' 'device owner [%(dev_own)s] and port %(pid)s', {'dev_own': owner, 'pid': original_port['id']}) # Processing compute port update vnic_idx = original_port.get(ext_vnic_idx.VNIC_INDEX) if validators.is_attr_set(vnic_idx) and is_compute_port: vnic_id = self._get_port_vnic_id(vnic_idx, device_id) curr_sgids = ( original_port[provider_sg.PROVIDER_SECURITYGROUPS] + original_port[ext_sg.SECURITYGROUPS]) if ret_port['device_id'] != device_id: # Update change device_id - remove port-vnic association and # delete security-groups memberships for the vnic self._delete_security_groups_port_mapping( context.session, vnic_id, curr_sgids) if cfg.CONF.nsxv.spoofguard_enabled: if original_port[psec.PORTSECURITY]: try: self._remove_vnic_from_spoofguard_policy( context.session, original_port['network_id'], vnic_id) except Exception as e: LOG.error('Could not delete the spoofguard ' 'policy. Exception %s', e) # remove vm from the exclusion list when it is detached # from the device if it has no port security if not original_port[psec.PORTSECURITY]: self._remove_vm_from_exclude_list( context, device_id, id) self._delete_port_vnic_index_mapping(context, id) self._delete_dhcp_static_binding(context, original_port) # if service insertion is enabled - remove this vnic from the # service insertion security group if (self._si_handler.enabled and original_port[psec.PORTSECURITY]): self._remove_member_from_security_group( self._si_handler.sg_id, vnic_id) else: # port security enabled / disabled if port_sec_change: if has_port_security: LOG.debug("Assigning vnic port fixed-ips: port %s, " "vnic %s, with fixed-ips %s", id, vnic_id, original_port['fixed_ips']) self._update_vnic_assigned_addresses( context.session, original_port, vnic_id) # Remove vm from the exclusion list, since it now has # port security self._remove_vm_from_exclude_list(context, device_id, id) # add the vm to the service insertion if self._si_handler.enabled: self._add_member_to_security_group( self._si_handler.sg_id, vnic_id) elif cfg.CONF.nsxv.spoofguard_enabled: try: self._remove_vnic_from_spoofguard_policy( context.session, original_port['network_id'], vnic_id) except Exception as e: LOG.error('Could not delete the spoofguard ' 'policy. Exception %s', e) # Add vm to the exclusion list, since it has no port # security now self._add_vm_to_exclude_list(context, device_id, id) # remove the vm from the service insertion if self._si_handler.enabled: self._remove_member_from_security_group( self._si_handler.sg_id, vnic_id) # Update vnic with the newest approved IP addresses if (has_port_security and (updates_fixed_ips or update_assigned_addresses)): LOG.debug("Updating vnic port fixed-ips: port %s, vnic " "%s, fixed-ips %s", id, vnic_id, ret_port['fixed_ips']) self._update_vnic_assigned_addresses( context.session, ret_port, vnic_id) if not has_port_security and has_security_groups: LOG.warning("port-security is disabled on " "port %(id)s, " "VM tools must be installed on instance " "%(device_id)s for security-groups to " "function properly ", {'id': id, 'device_id': original_port['device_id']}) if (delete_security_groups or has_security_groups or pvd_sg_changed): # Update security-groups, # calculate differences and update vnic membership # accordingly. new_sgids = ( ret_port[provider_sg.PROVIDER_SECURITYGROUPS] + ret_port[ext_sg.SECURITYGROUPS]) self._update_security_groups_port_mapping( context.session, id, vnic_id, curr_sgids, new_sgids) if (cfg.CONF.nsxv.use_default_block_all and not ret_port[ext_sg.SECURITYGROUPS]): # If there are no security groups ensure that the # default is 'Drop All' self._add_member_to_security_group( self.sg_container_id, vnic_id) # update mac learning on NSX if self._vcm: mac_learning = self.get_mac_learning_state(context, id) if mac_learning is not None: try: self._nsx_update_mac_learning(context, ret_port) except Exception as e: LOG.error("Unable to update mac learning for port %s, " "reason: %s", id, e) kwargs = { 'context': context, 'port': ret_port, 'mac_address_updated': False, 'original_port': original_port, } registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs) return ret_port def _extend_get_port_dict_qos_and_binding(self, context, port): self._extend_nsx_port_dict_binding(context, port) # add the qos policy id from the DB (always None in this plugin) if 'id' in port: port[qos_consts.QOS_POLICY_ID] = qos_com_utils.get_port_policy_id( context, port['id']) def _extend_nsx_port_dict_binding(self, context, port_data): # Extend port dict binding in case the data was not updated from the # DB by _extend_port_portbinding, which means this is an older port if pbin.VIF_TYPE not in port_data: port_data[pbin.VIF_TYPE] = nsx_constants.VIF_TYPE_DVS if pbin.VNIC_TYPE not in port_data: port_data[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL if pbin.VIF_DETAILS not in port_data: port_data[pbin.VIF_DETAILS] = {pbin.CAP_PORT_FILTER: True} if 'network_id' in port_data: net_bindings = nsxv_db.get_network_bindings( context.session, port_data['network_id']) if net_bindings: port_data[pbin.VIF_DETAILS][pbin.VIF_DETAILS_VLAN] = ( net_bindings[0].vlan_id) def get_port(self, context, id, fields=None): port = super(NsxVPluginV2, self).get_port(context, id, fields=None) self._extend_get_port_dict_qos_and_binding(context, port) return db_utils.resource_fields(port, fields) def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} self._update_filters_with_sec_group(context, filters) with db_api.CONTEXT_READER.using(context): ports = ( super(NsxVPluginV2, self).get_ports( context, filters, fields, sorts, limit, marker, page_reverse)) self._log_get_ports(ports, filters) # Add the relevant port extensions for port in ports[:]: self._extend_get_port_dict_qos_and_binding(context, port) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports]) def delete_port(self, context, id, l3_port_check=True, nw_gw_port_check=True, force_delete_dhcp=False, allow_delete_internal=False, allow_delete_lb_if=False): # Send delete port notification to any interested service plugin registry.publish(resources.PORT, events.BEFORE_DELETE, self, payload=events.DBEventPayload( context, resource_id=id, metadata={'port_check': l3_port_check})) neutron_db_port = self.get_port(context, id) device_id = neutron_db_port['device_id'] is_compute_port = self._is_compute_port(neutron_db_port) if not allow_delete_internal: self._validate_internal_network( context, neutron_db_port['network_id']) if (not allow_delete_lb_if and neutron_db_port.get('device_owner') and neutron_db_port['device_owner'] == lb_common.LBAAS_DEVICE_OWNER): msg = _("Cannot delete LB interface port") raise n_exc.InvalidInput(error_message=msg) if is_compute_port and device_id: # Lock on the device ID to make sure we do not change/delete # ports of the same device at the same time with locking.LockManager.get_lock( 'port-device-%s' % device_id): return self._delete_port(context, id, l3_port_check, nw_gw_port_check, neutron_db_port, force_delete_dhcp) else: return self._delete_port(context, id, l3_port_check, nw_gw_port_check, neutron_db_port, force_delete_dhcp) def _delete_port(self, context, id, l3_port_check, nw_gw_port_check, neutron_db_port, force_delete_dhcp=False): """Deletes a port on a specified Virtual Network. If the port contains a remote interface attachment, the remote interface is first un-plugged and then the port is deleted. :returns: None :raises: exception.PortInUse :raises: exception.PortNotFound :raises: exception.NetworkNotFound """ # if needed, check to see if this is a port owned by # a l3 router. If so, we should prevent deletion here if l3_port_check: self.prevent_l3_port_deletion(context, id) if (not force_delete_dhcp and neutron_db_port['device_owner'] in [constants.DEVICE_OWNER_DHCP]): msg = (_('Can not delete DHCP port %s') % neutron_db_port['id']) raise n_exc.BadRequest(resource='port', msg=msg) # If this port is attached to a device, remove the corresponding vnic # from all NSXv Security-Groups and the spoofguard policy port_index = neutron_db_port.get(ext_vnic_idx.VNIC_INDEX) compute_port = self._is_compute_port(neutron_db_port) if validators.is_attr_set(port_index): vnic_id = self._get_port_vnic_id(port_index, neutron_db_port['device_id']) sgids = neutron_db_port.get(ext_sg.SECURITYGROUPS) self._delete_security_groups_port_mapping( context.session, vnic_id, sgids) # if service insertion is enabled - remove this vnic from the # service insertion security group if self._si_handler.enabled and neutron_db_port[psec.PORTSECURITY]: self._remove_member_from_security_group(self._si_handler.sg_id, vnic_id) if (cfg.CONF.nsxv.spoofguard_enabled and neutron_db_port[psec.PORTSECURITY]): try: self._remove_vnic_from_spoofguard_policy( context.session, neutron_db_port['network_id'], vnic_id) except Exception as e: LOG.error('Could not delete the spoofguard policy. ' 'Exception %s', e) if not neutron_db_port[psec.PORTSECURITY] and compute_port: device_id = neutron_db_port['device_id'] # Note that we expect to find 1 relevant port in the DB still # because this port was not yet deleted self._remove_vm_from_exclude_list(context, device_id, id, expected_count=1) self.disassociate_floatingips(context, id) with db_api.CONTEXT_WRITER.using(context): super(NsxVPluginV2, self).delete_port(context, id) # deleting the dhcp binding anyway # (even if not compute port to be on the safe side) self._delete_dhcp_static_binding( context, neutron_db_port, log_error=(True if compute_port else False)) def base_delete_subnet(self, context, subnet_id): with locking.LockManager.get_lock('neutron-base-subnet'): super(NsxVPluginV2, self).delete_subnet(context, subnet_id) def delete_subnet(self, context, id): subnet = self._get_subnet(context, id) filters = {'fixed_ips': {'subnet_id': [id]}} ports = self.get_ports(context, filters=filters) # Add nsx-dhcp-edge-pool here is because we first delete the subnet in # db.locking if the subnet overlaps with another new creating subnet, # there is a chance that the new creating subnet select the deleting # subnet's edge and send update dhcp interface rest call before # deleting subnet's corresponding dhcp interface rest call and lead to # overlap response from backend. network_id = subnet['network_id'] self._validate_internal_network(context, network_id) with locking.LockManager.get_lock(network_id): with locking.LockManager.get_lock('nsx-dhcp-edge-pool'): with db_api.CONTEXT_WRITER.using(context): self.base_delete_subnet(context, id) if subnet['enable_dhcp']: # There is only DHCP port available if len(ports) == 1: port = ports.pop() # This is done out of the transaction as it invokes # update_port which interfaces with the NSX self.ipam.delete_port(context, port['id']) # Delete the DHCP edge service filters = {'network_id': [network_id]} remaining_subnets = self.get_subnets(context, filters=filters) if len(remaining_subnets) == 0: self._cleanup_dhcp_edge_before_deletion( context, network_id) LOG.debug("Delete the DHCP service for network %s", network_id) self.edge_manager.delete_dhcp_edge_service(context, network_id) else: # Update address group and delete the DHCP port only self._update_dhcp_address(context, network_id) def _is_overlapping_reserved_subnets(self, subnet): """Return True if the subnet overlaps with reserved subnets. For the V plugin we have a limitation that we should not use some reserved ranges like: 169.254.128.0/17 and 169.254.1.0/24 """ # translate the given subnet to a range object data = subnet['subnet'] if data['cidr'] not in (constants.ATTR_NOT_SPECIFIED, None): reserved_subnets = list(nsxv_constants.RESERVED_IPS) reserved_subnets.append(cfg.CONF.nsxv.vdr_transit_network) return edge_utils.is_overlapping_reserved_subnets(data['cidr'], reserved_subnets) return False def _get_dhcp_ip_addr_from_subnet(self, context, subnet_id): dhcp_port_filters = {'fixed_ips': {'subnet_id': [subnet_id]}, 'device_owner': [constants.DEVICE_OWNER_DHCP]} dhcp_ports = self.get_ports(context, filters=dhcp_port_filters) if dhcp_ports and dhcp_ports[0].get('fixed_ips'): return dhcp_ports[0]['fixed_ips'][0]['ip_address'] def is_dhcp_metadata(self, context, subnet_id): try: subnet = self.get_subnet(context, subnet_id) except n_exc.SubnetNotFound: LOG.debug("subnet %s not found to determine its dhcp meta", subnet_id) return False return bool(subnet['enable_dhcp'] and self.metadata_proxy_handler) def create_subnet_bulk(self, context, subnets): collection = "subnets" items = subnets[collection] new_subnets = [] for item in items: try: s = self.create_subnet(context, item) new_subnets.append(s) except Exception as e: LOG.error('Unable to create bulk subnets. Failed to ' 'create item %(item)s. Rolling back. ' 'Error: %(e)s', {'item': item, 'e': e}) for subnet in new_subnets: s_id = subnet['id'] try: self.delete_subnet(context, s_id) except Exception: LOG.error('Unable to delete subnet %s', s_id) raise return new_subnets def base_create_subnet(self, context, subnet): with locking.LockManager.get_lock('neutron-base-subnet'): return super(NsxVPluginV2, self).create_subnet(context, subnet) def create_subnet(self, context, subnet): """Create subnet on nsx_v provider network. If the subnet is created with DHCP enabled, and the network which the subnet is attached is not bound to an DHCP Edge, nsx_v will create the Edge and make sure the network is bound to the Edge """ self._validate_host_routes_input(subnet) if subnet['subnet']['enable_dhcp']: self._validate_external_subnet(context, subnet['subnet']['network_id']) data = subnet['subnet'] if (data.get('ip_version') == 6 or (data['cidr'] not in (constants.ATTR_NOT_SPECIFIED, None) and netaddr.IPNetwork(data['cidr']).version == 6)): err_msg = _("No support for DHCP for IPv6") raise n_exc.InvalidInput(error_message=err_msg) if self._is_overlapping_reserved_subnets(subnet): err_msg = _("The requested subnet contains reserved IP's") raise n_exc.InvalidInput(error_message=err_msg) with locking.LockManager.get_lock(subnet['subnet']['network_id']): s = self.base_create_subnet(context, subnet) self._extension_manager.process_create_subnet( context, subnet['subnet'], s) if s['enable_dhcp']: try: self._process_subnet_ext_attr_create( session=context.session, subnet_db=s, subnet_req=data) self._update_dhcp_service_with_subnet(context, s) except Exception: with excutils.save_and_reraise_exception(): self.base_delete_subnet(context, s['id']) return s def _process_subnet_ext_attr_create(self, session, subnet_db, subnet_req): # Verify if dns search domain/dhcp mtu for subnet are configured dns_search_domain = subnet_req.get( ext_dns_search_domain.DNS_SEARCH_DOMAIN) dhcp_mtu = subnet_req.get( ext_dhcp_mtu.DHCP_MTU) if (not validators.is_attr_set(dns_search_domain) and not validators.is_attr_set(dhcp_mtu)): return if not validators.is_attr_set(dns_search_domain): dns_search_domain = None if not validators.is_attr_set(dhcp_mtu): dhcp_mtu = None sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=session, subnet_id=subnet_db['id']) # Create a subnet extensions for subnet if it does not exist if not sub_binding: nsxv_db.add_nsxv_subnet_ext_attributes( session=session, subnet_id=subnet_db['id'], dns_search_domain=dns_search_domain, dhcp_mtu=dhcp_mtu) # Else update only if a new values for subnet extensions are provided elif (sub_binding.dns_search_domain != dns_search_domain or sub_binding.dhcp_mtu != dhcp_mtu): nsxv_db.update_nsxv_subnet_ext_attributes( session=session, subnet_id=subnet_db['id'], dns_search_domain=dns_search_domain, dhcp_mtu=dhcp_mtu) subnet_db['dns_search_domain'] = dns_search_domain subnet_db['dhcp_mtu'] = dhcp_mtu def _process_subnet_ext_attr_update(self, session, subnet_db, subnet_req): update_dhcp_config = False # Update extended attributes for subnet if (ext_dns_search_domain.DNS_SEARCH_DOMAIN in subnet_req or ext_dhcp_mtu.DHCP_MTU in subnet_req): self._process_subnet_ext_attr_create(session, subnet_db, subnet_req) update_dhcp_config = True return update_dhcp_config def _update_routers_on_gateway_change(self, context, subnet_id, new_gateway): """Update all relevant router edges that the nexthop changed.""" port_filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_GW], 'fixed_ips': {'subnet_id': [subnet_id]}} intf_ports = self.get_ports(context.elevated(), filters=port_filters) router_ids = [port['device_id'] for port in intf_ports] for router_id in router_ids: router_driver = self._find_router_driver(context, router_id) router_driver._update_nexthop(context, router_id, new_gateway) def update_subnet(self, context, id, subnet): # Lock the subnet so that no other conflicting action can occur on # the same subnet with locking.LockManager.get_lock('subnet-%s' % id): return self._safe_update_subnet(context, id, subnet) def _safe_update_subnet(self, context, id, subnet): s = subnet['subnet'] orig = self._get_subnet(context, id) gateway_ip = orig['gateway_ip'] enable_dhcp = orig['enable_dhcp'] orig_host_routes = orig['routes'] self._validate_external_subnet(context, orig['network_id']) self._validate_host_routes_input(subnet, orig_enable_dhcp=enable_dhcp, orig_host_routes=orig_host_routes) subnet = super(NsxVPluginV2, self).update_subnet(context, id, subnet) self._extension_manager.process_update_subnet(context, s, subnet) update_dhcp_config = self._process_subnet_ext_attr_update( context.session, subnet, s) if (gateway_ip != subnet['gateway_ip'] or update_dhcp_config or set(orig['dns_nameservers']) != set(subnet['dns_nameservers']) or orig_host_routes != subnet['host_routes'] or enable_dhcp and not subnet['enable_dhcp']): # Need to ensure that all of the subnet attributes will be reloaded # when creating the edge bindings. Without adding this the original # subnet details are provided. context.session.expire_all() # Update the edge network_id = subnet['network_id'] self.edge_manager.update_dhcp_edge_bindings(context, network_id) # also update routers that use this subnet as their gateway if gateway_ip != subnet['gateway_ip']: self._update_routers_on_gateway_change(context, id, subnet['gateway_ip']) if enable_dhcp != subnet['enable_dhcp']: self._update_subnet_dhcp_status(subnet, context) return subnet @staticmethod @resource_extend.extends([subnet_def.COLLECTION_NAME]) def _extend_subnet_dict_extended_attributes(subnet_res, subnet_db): subnet_attr = subnet_db.get('nsxv_subnet_attributes') if subnet_attr: subnet_res['dns_search_domain'] = subnet_attr.dns_search_domain subnet_res['dhcp_mtu'] = subnet_attr.dhcp_mtu def _is_subnet_gw_a_vdr(self, context, subnet): filters = {'fixed_ips': {'subnet_id': [subnet['id']], 'ip_address': [subnet['gateway_ip']]}} ports = self.get_ports(context, filters=filters) if ports and ports[0].get('device_id'): rtr_id = ports[0].get('device_id') rtr = self.get_router(context, rtr_id) if rtr and rtr.get('distributed'): return rtr_id def _update_subnet_dhcp_status(self, subnet, context): network_id = subnet['network_id'] if subnet['enable_dhcp']: # Check if the network has one related dhcp edge resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] edge_binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) if edge_binding: # Create DHCP port port_dict = {'name': '', 'admin_state_up': True, 'network_id': network_id, 'tenant_id': subnet['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['id']}], 'device_owner': constants.DEVICE_OWNER_DHCP, 'device_id': n_utils.get_dhcp_agent_device_id( network_id, 'nsxv'), 'mac_address': constants.ATTR_NOT_SPECIFIED } self.create_port(context, {'port': port_dict}) # First time binding network with dhcp edge else: with locking.LockManager.get_lock(subnet['network_id']): self._update_dhcp_service_with_subnet(context, subnet) return else: # delete dhcp port filters = {'fixed_ips': {'subnet_id': [subnet['id']]}} ports = self.get_ports(context, filters=filters) for port in ports: if port["device_owner"] == constants.DEVICE_OWNER_DHCP: self.ipam.delete_port(context, port['id']) # Delete the DHCP edge service network_id = subnet['network_id'] filters = {'network_id': [network_id]} subnets = self.get_subnets(context, filters=filters) cleaup_edge = True for s in subnets: if s['enable_dhcp']: cleaup_edge = False if cleaup_edge: self._cleanup_dhcp_edge_before_deletion( context, network_id) LOG.debug("Delete the DHCP service for network %s", network_id) self.edge_manager.delete_dhcp_edge_service(context, network_id) return self._update_dhcp_address(context, network_id) def _get_conflict_network_ids_by_overlapping(self, context, subnets): with locking.LockManager.get_lock('nsx-networking'): conflict_network_ids = [] subnet_ids = [subnet['id'] for subnet in subnets] conflict_set = netaddr.IPSet( [subnet['cidr'] for subnet in subnets]) subnets_qry = context.session.query(models_v2.Subnet).all() subnets_all = [subnet for subnet in subnets_qry if subnet['id'] not in subnet_ids] for subnet in subnets_all: cidr_set = netaddr.IPSet([subnet['cidr']]) if cidr_set & conflict_set: conflict_network_ids.append(subnet['network_id']) return conflict_network_ids def _get_conflicting_networks_for_subnet(self, context, subnet): """Return a list if networks IDs conflicting with requested subnet The requested subnet cannot be placed on the same DHCP edge as the conflicting networks. A network will be conflicting with the current subnet if: 1. overlapping ips 2. provider networks with different physical network 3. flat provider network with any other flat network 4. if not share_edges_between_tenants: networks of different tenants """ subnet_net = subnet['network_id'] subnet_tenant = subnet['tenant_id'] # The DHCP for network with different physical network can not be used # The flat network should be located in different DHCP conflicting_networks = [] all_networks = self.get_networks(context.elevated(), fields=['id', 'tenant_id']) phy_net = nsxv_db.get_network_bindings(context.session, subnet_net) if phy_net: binding_type = phy_net[0]['binding_type'] phy_uuid = phy_net[0]['phy_uuid'] for net_id in all_networks: p_net = nsxv_db.get_network_bindings(context.session, net_id['id']) if (p_net and binding_type == p_net[0]['binding_type'] and binding_type == c_utils.NsxVNetworkTypes.FLAT): conflicting_networks.append(net_id['id']) elif (p_net and phy_uuid != p_net[0]['phy_uuid']): conflicting_networks.append(net_id['id']) # get conflicting networks of other tenants if not cfg.CONF.nsxv.share_edges_between_tenants: for another_net in all_networks: if (another_net['id'] != subnet_net and another_net['tenant_id'] != subnet_tenant): conflicting_networks.append(another_net['id']) # get all of the subnets on the network, there may be more than one filters = {'network_id': [subnet_net]} subnets = super(NsxVPluginV2, self).get_subnets(context.elevated(), filters=filters) # Query all networks with overlap subnet if cfg.CONF.allow_overlapping_ips: conflicting_networks.extend( self._get_conflict_network_ids_by_overlapping( context.elevated(), subnets)) conflicting_networks = list(set(conflicting_networks)) return conflicting_networks def _get_edge_id_by_rtr_id(self, context, rtr_id): binding = nsxv_db.get_nsxv_router_binding( context.session, rtr_id) if binding: return binding['edge_id'] def _get_edge_id_and_az_by_rtr_id(self, context, rtr_id): binding = nsxv_db.get_nsxv_router_binding( context.session, rtr_id) if binding: return binding['edge_id'], binding['availability_zone'] return None, None def _update_dhcp_service_new_edge(self, context, resource_id): edge_id, az_name = self._get_edge_id_and_az_by_rtr_id( context, resource_id) if edge_id: with locking.LockManager.get_lock(str(edge_id)): if self.metadata_proxy_handler: LOG.debug('Update metadata for resource %s az=%s', resource_id, az_name) md_proxy = self.get_metadata_proxy_handler(az_name) if md_proxy: md_proxy.configure_router_edge(context, resource_id) self.setup_dhcp_edge_fw_rules(context, self, resource_id) def _update_dhcp_service_with_subnet(self, context, subnet): network_id = subnet['network_id'] # Create DHCP port port_dict = {'name': '', 'admin_state_up': True, 'network_id': network_id, 'tenant_id': subnet['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['id']}], 'device_owner': constants.DEVICE_OWNER_DHCP, 'device_id': n_utils.get_dhcp_agent_device_id( network_id, 'nsxv'), 'mac_address': constants.ATTR_NOT_SPECIFIED } self.create_port(context, {'port': port_dict}) try: self.edge_manager.create_dhcp_edge_service(context, network_id, subnet) # Create all dhcp ports within the network self._update_dhcp_address(context, network_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update DHCP for subnet %s", subnet['id']) def setup_dhcp_edge_fw_rules(self, context, plugin, router_id): rules = [] loose_ver = version.LooseVersion(self.nsx_v.vcns.get_version()) if loose_ver < version.LooseVersion('6.3.2'): # For these versions the raw icmp rule will not work due to # backend bug. Workaround: use applications, but since # application ids can change, we look them up by application name try: application_ids = plugin.nsx_v.get_icmp_echo_application_ids() rules = [{"name": "ICMPPing", "enabled": True, "action": "allow", "application": { "applicationId": application_ids}}] except Exception as e: LOG.error( 'Could not find ICMP Echo application. Exception %s', e) else: # For newer versions, we can use the raw icmp rule rules = [{"name": "ICMPPing", "enabled": True, "action": "allow", "protocol": "icmp", "icmp_type": 8}] if plugin.metadata_proxy_handler: rules += nsx_v_md_proxy.get_router_fw_rules() try: edge_utils.update_firewall(plugin.nsx_v, context, router_id, {'firewall_rule_list': rules}, allow_external=False) except Exception as e: # On failure, log that we couldn't configure the firewall on the # Edge appliance. This won't break the DHCP functionality LOG.error( 'Could not set up DHCP Edge firewall. Exception %s', e) def _create_network_dhcp_address_group(self, context, network_id): """Create dhcp address group for subnets attached to the network.""" filters = {'network_id': [network_id], 'device_owner': [constants.DEVICE_OWNER_DHCP]} ports = self.get_ports(context, filters=filters) filters = {'network_id': [network_id], 'enable_dhcp': [True]} subnets = self.get_subnets(context, filters=filters) address_groups = [] for subnet in subnets: address_group = {} ip_found = False for port in ports: fixed_ips = port['fixed_ips'] for fip in fixed_ips: s_id = fip['subnet_id'] ip_addr = fip['ip_address'] if s_id == subnet['id'] and self._is_valid_ip(ip_addr): address_group['primaryAddress'] = ip_addr ip_found = True break if ip_found: net = netaddr.IPNetwork(subnet['cidr']) address_group['subnetPrefixLength'] = str(net.prefixlen) address_groups.append(address_group) LOG.debug("Update the DHCP address group to %s", address_groups) return address_groups def _validate_router_size(self, router): # Check if router-size is specified. router-size can only be specified # for an exclusive non-distributed router; else raise a BadRequest # exception. r = router['router'] if validators.is_attr_set(r.get(ROUTER_SIZE)): if r.get('router_type') == constants.SHARED: msg = _("Cannot specify router-size for shared router") raise n_exc.BadRequest(resource="router", msg=msg) elif r.get('distributed') is True: msg = _("Cannot specify router-size for distributed router") raise n_exc.BadRequest(resource="router", msg=msg) else: if r.get('router_type') == nsxv_constants.EXCLUSIVE: r[ROUTER_SIZE] = cfg.CONF.nsxv.exclusive_router_appliance_size def _get_router_flavor_profile(self, context, flavor_id): flv_plugin = directory.get_plugin(plugin_const.FLAVORS) if not flv_plugin: msg = _("Flavors plugin not found") raise n_exc.BadRequest(resource="router", msg=msg) # Will raise FlavorNotFound if doesn't exist fl_db = flavors_plugin.FlavorsPlugin.get_flavor( flv_plugin, context, flavor_id) if fl_db['service_type'] != plugin_const.L3: raise n_exc.InvalidFlavorServiceType( service_type=fl_db['service_type']) if not fl_db['enabled']: raise flav_exc.FlavorDisabled() # get the profile (Currently only 1 is supported, so take the first) if not fl_db['service_profiles']: return profile_id = fl_db['service_profiles'][0] return flavors_plugin.FlavorsPlugin.get_service_profile( flv_plugin, context, profile_id) def _get_flavor_metainfo_from_profile(self, flavor_id, flavor_profile): if not flavor_profile: return {} metainfo_string = flavor_profile.get('metainfo').replace("'", "\"") try: metainfo = jsonutils.loads(metainfo_string) if not isinstance(metainfo, dict): LOG.warning("Skipping router flavor %(flavor)s metainfo " "[%(metainfo)s]: expected a dictionary", {'flavor': flavor_id, 'metainfo': metainfo_string}) metainfo = {} except ValueError as e: LOG.warning("Error reading router flavor %(flavor)s metainfo " "[%(metainfo)s]: %(error)s", {'flavor': flavor_id, 'metainfo': metainfo_string, 'error': e}) metainfo = {} return metainfo def get_flavor_metainfo(self, context, flavor_id): """Retrieve metainfo from first profile of specified flavor""" flavor_profile = self._get_router_flavor_profile(context, flavor_id) return self._get_flavor_metainfo_from_profile(flavor_id, flavor_profile) def _get_router_config_from_flavor(self, context, router): """Validate the router flavor and initialize router data Validate that the flavor is legit, and that contradicting configuration does not exist. Also update the router data to reflect the selected flavor. """ if not validators.is_attr_set(router.get('flavor_id')): return metainfo = self.get_flavor_metainfo(context, router['flavor_id']) # Go over the attributes of the metainfo allowed_keys = [ROUTER_SIZE, 'router_type', 'distributed', az_def.AZ_HINTS] # This info will be used later on # and is not part of standard router config future_use_keys = ['syslog'] for k, v in metainfo.items(): if k in allowed_keys: #special case for availability zones hints which are an array if k == az_def.AZ_HINTS: if not isinstance(v, list): v = [v] # The default az hists is an empty array if (validators.is_attr_set(router.get(k)) and len(router[k]) > 0): msg = (_("Cannot specify %s if the flavor profile " "defines it") % k) raise n_exc.BadRequest(resource="router", msg=msg) elif validators.is_attr_set(router.get(k)) and router[k] != v: msg = _("Cannot specify %s if the flavor defines it") % k raise n_exc.BadRequest(resource="router", msg=msg) # Legal value router[k] = v elif k in future_use_keys: pass else: LOG.warning("Skipping router flavor metainfo [%(k)s:%(v)s]" ":unsupported field", {'k': k, 'v': v}) def create_router(self, context, router, allow_metadata=True): r = router['router'] self._get_router_config_from_flavor(context, r) self._decide_router_type(context, r) self._validate_router_size(router) self._validate_availability_zones_in_obj(context, 'router', r) # First extract the gateway info in case of updating # gateway before edge is deployed. # TODO(berlin): admin_state_up and routes update gw_info = self._extract_external_gw(context, router) lrouter = super(NsxVPluginV2, self).create_router(context, router) with db_api.CONTEXT_WRITER.using(context): router_db = self._get_router(context, lrouter['id']) self._process_extra_attr_router_create(context, router_db, r) self._process_nsx_router_create(context, router_db, r) self._process_router_flavor_create(context, router_db, r) try: router_driver = self._get_router_driver(context, router_db) except Exception: LOG.exception("Failed to create router %s", router) with excutils.save_and_reraise_exception(): self.delete_router(context, lrouter['id']) with db_api.CONTEXT_READER.using(context): lrouter = super(NsxVPluginV2, self).get_router(context, lrouter['id']) try: if router_driver.get_type() == nsxv_constants.EXCLUSIVE: router_driver.create_router( context, lrouter, appliance_size=r.get(ROUTER_SIZE), allow_metadata=(allow_metadata and self.metadata_proxy_handler)) else: router_driver.create_router( context, lrouter, allow_metadata=(allow_metadata and self.metadata_proxy_handler)) if gw_info != constants.ATTR_NOT_SPECIFIED and gw_info: self._update_router_gw_info( context, lrouter['id'], gw_info) except Exception: LOG.exception("Failed to create router %s", router) with excutils.save_and_reraise_exception(): self.delete_router(context, lrouter['id']) # re-read the router with the updated data, and return it with db_api.CONTEXT_READER.using(context): return self.get_router(context, lrouter['id']) def _validate_router_migration(self, context, router_id, new_router_type, router): if new_router_type == 'shared': # shared router cannot have static routes # verify that the original router did not have static routes err_msg = _('Unable to create a shared router with static routes') routes = self._get_extra_routes_by_router_id(context, router_id) if len(routes) > 0: raise n_exc.InvalidInput(error_message=err_msg) # verify that the updated router does not have static routes if (validators.is_attr_set(router.get("routes")) and len(router['routes']) > 0): raise n_exc.InvalidInput(error_message=err_msg) # shared router cannot be attached to a loadbalancer edge_id = self._get_edge_id_by_rtr_id(context, router_id) if edge_id: lb_bind = nsxv_db.get_nsxv_lbaas_loadbalancer_binding_by_edge( context.session, edge_id) if lb_bind: err_msg = _('Unable to create a shared router with a load ' 'balancer') raise n_exc.InvalidInput(error_message=err_msg) # shared router cannot be attached to a fwaas if (self.fwaas_callbacks and self.fwaas_callbacks.should_apply_firewall_to_router( context, router, router_id)): err_msg = _('Unable to create a shared router with FWaaS') raise n_exc.InvalidInput(error_message=err_msg) def update_router(self, context, router_id, router): with locking.LockManager.get_lock('router-%s' % router_id): return self._safe_update_router(context, router_id, router) def _safe_update_router(self, context, router_id, router): # Validate that the gateway information is relevant gw_info = self._extract_external_gw(context, router, is_extract=False) # Toggling the distributed flag is not supported if 'distributed' in router['router']: r = self.get_router(context, router_id) if r['distributed'] != router['router']['distributed']: err_msg = _('Unable to update distributed mode') raise n_exc.InvalidInput(error_message=err_msg) # Toggling router type is supported only for non-distributed router elif 'router_type' in router['router']: r = self.get_router(context, router_id) if r.get('router_type') != router['router']['router_type']: if r["distributed"]: err_msg = _('Unable to update distributed mode') raise n_exc.InvalidInput(error_message=err_msg) else: # should migrate the router because its type changed new_router_type = router['router']['router_type'] self._validate_router_size(router) self._validate_router_migration( context, router_id, new_router_type, r) # remove the router from the old pool, and free resources old_router_driver = \ self._router_managers.get_tenant_router_driver( context, r['router_type']) old_router_driver.detach_router(context, router_id, router) # update the router-type with db_api.CONTEXT_WRITER.using(context): router_db = self._get_router(context, router_id) self._process_nsx_router_create( context, router_db, router['router']) # update availability zone router['router']['availability_zone_hints'] = r.get( 'availability_zone_hints') # add the router to the new pool appliance_size = router['router'].get(ROUTER_SIZE) new_router_driver = \ self._router_managers.get_tenant_router_driver( context, new_router_type) new_router_driver.attach_router( context, router_id, router, appliance_size=appliance_size) # continue to update the router with the new driver # but remove the router-size that was already updated router['router'].pop(ROUTER_SIZE, None) if (validators.is_attr_set(gw_info) and not gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default)): router_ports = self._get_router_interfaces(context, router_id) for port in router_ports: for fip in port['fixed_ips']: self._validate_address_scope_for_router_interface( context.elevated(), router_id, gw_info['network_id'], fip['subnet_id']) router_driver = self._find_router_driver(context, router_id) return router_driver.update_router(context, router_id, router) def _check_router_in_use(self, context, router_id): with db_api.CONTEXT_READER.using(context): # Ensure that the router is not used router_filter = {'router_id': [router_id]} fips = self.get_floatingips_count(context.elevated(), filters=router_filter) if fips: raise l3_exc.RouterInUse(router_id=router_id) device_filter = {'device_id': [router_id], 'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]} ports = self.get_ports_count(context.elevated(), filters=device_filter) if ports: raise l3_exc.RouterInUse(router_id=router_id) if nsxv_db.get_nsxv_internal_edge_by_router( context.elevated().session, router_id): msg = _("Cannot delete internal router %s") % router_id raise n_exc.InvalidInput(error_message=msg) def delete_router(self, context, id): self._check_router_in_use(context, id) router_driver = self._find_router_driver(context, id) # Clear vdr's gw relative components if the router has gw info if router_driver.get_type() == "distributed": router = self.get_router(context, id) if router.get(l3_apidef.EXTERNAL_GW_INFO): try: router_driver._update_router_gw_info(context, id, {}) except Exception as e: # Do not fail router deletion LOG.error("Failed to remove router %(rtr)s GW info before " "deletion: %(e)s", {'e': e, 'rtr': id}) super(NsxVPluginV2, self).delete_router(context, id) router_driver.delete_router(context, id) def get_availability_zone_name_by_edge(self, context, edge_id): az_name = nsxv_db.get_edge_availability_zone( context.session, edge_id) if az_name: return az_name # fallback return nsx_az.DEFAULT_NAME def get_network_availability_zones(self, net_db): context = n_context.get_admin_context() return self._get_network_availability_zones(context, net_db) def _get_network_availability_zones(self, context, net_db): """Return availability zones which a network belongs to. Return only the actual az the dhcp edge is deployed on. If there is no edge - the availability zones list is empty. """ resource_id = (vcns_const.DHCP_EDGE_PREFIX + net_db["id"])[:36] dhcp_edge_binding = nsxv_db.get_nsxv_router_binding( context.session, resource_id) if dhcp_edge_binding: return [dhcp_edge_binding['availability_zone']] return [] def get_router_availability_zones(self, router): """Return availability zones which a router belongs to. Return only the actual az the router edge is deployed on. If there is no edge - the availability zones list is empty. """ context = n_context.get_admin_context() binding = nsxv_db.get_nsxv_router_binding( context.session, router['id']) if binding: return [binding['availability_zone']] return [] def _process_router_flavor_create(self, context, router_db, r): """Update the router DB structure with the flavor ID upon creation """ if validators.is_attr_set(r.get('flavor_id')): router_db.flavor_id = r['flavor_id'] @staticmethod @resource_extend.extends([l3_apidef.ROUTERS]) def add_flavor_id(router_res, router_db): router_res['flavor_id'] = router_db['flavor_id'] def get_router(self, context, id, fields=None): router = super(NsxVPluginV2, self).get_router(context, id, fields) if router.get("distributed") and 'router_type' in router: del router['router_type'] if router.get("router_type") == nsxv_constants.EXCLUSIVE: binding = nsxv_db.get_nsxv_router_binding(context.session, router.get("id")) if binding: router[ROUTER_SIZE] = binding.get("appliance_size") else: LOG.error("No binding for router %s", id) return router def _get_external_attachment_info(self, context, router): gw_port = router.gw_port ipaddress = None netmask = None nexthop = None if gw_port: # TODO(berlin): we can only support gw port with one fixed ip at # present. if gw_port.get('fixed_ips'): ipaddress = gw_port['fixed_ips'][0]['ip_address'] subnet_id = gw_port['fixed_ips'][0]['subnet_id'] subnet = self.get_subnet(context.elevated(), subnet_id) nexthop = subnet['gateway_ip'] network_id = gw_port.get('network_id') if network_id: ext_net = self._get_network(context, network_id) if not ext_net.external: msg = (_("Network '%s' is not a valid external " "network") % network_id) raise n_exc.BadRequest(resource='router', msg=msg) if ext_net.subnets: netmask = set([str(ext_subnet.cidr) for ext_subnet in ext_net.subnets]) return (ipaddress, netmask, nexthop) def _add_network_info_for_routes(self, context, routes, ports): for route in routes: for port in ports: for ip in port['fixed_ips']: subnet = self.get_subnet(context.elevated(), ip['subnet_id']) if netaddr.all_matching_cidrs( route['nexthop'], [subnet['cidr']]): net = self.get_network(context.elevated(), subnet['network_id']) route['network_id'] = net['id'] if net.get(extnet_apidef.EXTERNAL): route['external'] = True def _prepare_edge_extra_routes(self, context, router_id): routes = self._get_extra_routes_by_router_id(context, router_id) filters = {'device_id': [router_id]} ports = self.get_ports(context.elevated(), filters) self._add_network_info_for_routes(context, routes, ports) return routes def _update_routes(self, context, router_id, nexthop): routes = self._prepare_edge_extra_routes(context, router_id) edge_utils.update_routes(self.nsx_v, context, router_id, routes, nexthop) def _update_current_gw_port(self, context, router_id, router, ext_ips): """Override this function in order not to call plugins' update_port since the actual backend work was already done by the router driver, and it may cause a deadlock. """ port_data = {'fixed_ips': ext_ips} updated_port = super(NsxVPluginV2, self).update_port( context, router.gw_port['id'], {'port': port_data}) self._extension_manager.process_update_port( context, port_data, updated_port) registry.publish(resources.ROUTER_GATEWAY, events.AFTER_UPDATE, self._update_current_gw_port, payload=events.DBEventPayload( context, resource_id=router_id, states=(router,), metadata={ 'network_id': router.gw_port.network_id, 'updated_port': updated_port })) context.session.expire(router.gw_port) def _update_router_gw_info(self, context, router_id, info, is_routes_update=False, force_update=False): with db_api.CONTEXT_WRITER.using(context): # use the reader context a this might be called from create_router router_db = self._get_router(context, router_id) router_driver = self._get_router_driver(context, router_db) if info: try: ext_ips = info.get('external_fixed_ips') network_id = info.get('network_id') org_enable_snat = router_db.enable_snat # Ensure that a router cannot have SNAT disabled if there are # floating IP's assigned if ('enable_snat' in info and org_enable_snat != info.get('enable_snat') and info.get('enable_snat') is False and self.router_gw_port_has_floating_ips(context, router_id)): msg = _("Unable to set SNAT disabled. Floating IPs " "assigned") raise n_exc.InvalidInput(error_message=msg) # for multiple external subnets support, we need to set gw # port first on subnet which has gateway. If can't get one # subnet with gateway or allocate one available ip from # subnet, we would just enter normal logic and admin should # exactly know what he did. if (not ext_ips and network_id and (not router_db.gw_port or not router_db.gw_port.get('fixed_ips'))): net_id_filter = {'network_id': [network_id]} subnets = self.get_subnets(context, filters=net_id_filter) fixed_subnet = True if len(subnets) <= 1: fixed_subnet = False else: for subnet in subnets: if ipv6_utils.is_auto_address_subnet(subnet): fixed_subnet = False if fixed_subnet: for subnet in subnets: if not subnet['gateway_ip']: continue try: info['external_fixed_ips'] = [{ 'subnet_id': subnet['id']}] return router_driver._update_router_gw_info( context, router_id, info, is_routes_update=is_routes_update) except n_exc.IpAddressGenerationFailure: del info['external_fixed_ips'] LOG.warning("Cannot get one subnet with gateway " "to allocate one available gw ip") router_driver._update_router_gw_info( context, router_id, info, is_routes_update=is_routes_update, force_update=force_update) except vsh_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.error("Failed to update gw_info %(info)s on " "router %(router_id)s", {'info': str(info), 'router_id': router_id}) router_driver._update_router_gw_info( context, router_id, {}, is_routes_update=is_routes_update, force_update=force_update) else: router_driver._update_router_gw_info( context, router_id, info, is_routes_update=is_routes_update, force_update=force_update) def _get_internal_network_ids_by_router(self, context, router_id): ports_qry = context.session.query(models_v2.Port) intf_ports = ports_qry.filter_by( device_id=router_id, device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF).all() intf_net_ids = list(set([port['network_id'] for port in intf_ports])) return intf_net_ids def _get_address_groups(self, context, router_id, network_id): address_groups = [] ports = self._get_router_interface_ports_by_network( context, router_id, network_id) for port in ports: address_group = {} gateway_ip = port['fixed_ips'][0]['ip_address'] subnet = self.get_subnet(context, port['fixed_ips'][0]['subnet_id']) prefixlen = str(netaddr.IPNetwork(subnet['cidr']).prefixlen) address_group['primaryAddress'] = gateway_ip address_group['subnetPrefixLength'] = prefixlen address_groups.append(address_group) return address_groups def _get_dnat_rules(self, context, router): fip_qry = context.session.query(l3_db_models.FloatingIP) fip_db = fip_qry.filter_by(router_id=router['id']).all() dnat = [{'dst': fip.floating_ip_address, 'translated': fip.fixed_ip_address} for fip in fip_db if fip.fixed_port_id] return dnat def _get_nat_rules(self, context, router): snat = [] dnat = self._get_dnat_rules(context, router) gw_port = router.gw_port if gw_port and gw_port.get('fixed_ips') and router.enable_snat: snat_ip = gw_port['fixed_ips'][0]['ip_address'] subnets = self._load_router_subnet_cidrs_from_db( context.elevated(), router['id']) gw_address_scope = self._get_network_address_scope( context.elevated(), gw_port['network_id']) for subnet in subnets: # Do not build NAT rules for v6 if subnet.get('ip_version') == 6: continue # if the subnets address scope is the same as the gateways: # no need for SNAT subnet_address_scope = self._get_subnetpool_address_scope( context.elevated(), subnet['subnetpool_id']) if (gw_address_scope and gw_address_scope == subnet_address_scope): LOG.info("No need for SNAT rule for router %(router)s " "and subnet %(subnet)s because they use the " "same address scope %(addr_scope)s.", {'router': router['id'], 'subnet': subnet['id'], 'addr_scope': gw_address_scope}) continue snat.append(self._get_default_nat_rule( context, router['id'], subnet, snat_ip)) return (snat, dnat) def _get_default_nat_rule(self, context, router_id, subnet, snat_ip): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) bind_to_all = False if binding: azs = nsx_az.NsxVAvailabilityZones() az = azs.get_availability_zone(binding['availability_zone']) bind_to_all = az.bind_floatingip_to_all_interfaces rule = {'src': subnet['cidr'], 'translated': snat_ip} if not bind_to_all: rule['vnic_index'] = vcns_const.EXTERNAL_VNIC_INDEX return rule def _get_nosnat_subnets_fw_rules(self, context, router, subnets=None): """Open edge firewall holes for nosnat subnets to do static routes.""" no_snat_fw_rules = [] gw_port = router.gw_port if gw_port and not router.enable_snat: subnet_cidrs = self._find_router_subnets_cidrs(context.elevated(), router['id'], subnets) if subnet_cidrs: no_snat_fw_rules.append({ 'name': NO_SNAT_RULE_NAME, 'action': 'allow', 'enabled': True, 'source_vnic_groups': ["external"], 'destination_ip_address': subnet_cidrs}) return no_snat_fw_rules def _get_allocation_pools_fw_rule(self, context, router, subnets=None): """Get the firewall rule for the default gateway address pool Return the firewall rule that should be added in order to allow not SNAT-ed traffic to external gateway with the same address scope as the interfaces """ gw_port = router.gw_port if not gw_port or not router.enable_snat: return gw_address_scope = self._get_network_address_scope( context.elevated(), gw_port['network_id']) if gw_address_scope is None: return if not subnets: subnets = self._load_router_subnet_cidrs_from_db( context.elevated(), router['id']) no_nat_cidrs = [] for subnet in subnets: # if the subnets address scope is the same as the gateways: # we should add it to the rule subnet_address_scope = self._get_subnetpool_address_scope( context.elevated(), subnet['subnetpool_id']) if (gw_address_scope == subnet_address_scope): no_nat_cidrs.append(subnet['cidr']) if no_nat_cidrs: return {'name': ALLOCATION_POOL_RULE_NAME, 'action': 'allow', 'enabled': True, 'source_vnic_groups': ["external"], 'destination_ip_address': no_nat_cidrs} def _get_dnat_fw_rule(self, context, router): # Get FW rule to open dnat firewall flows dnat_rules = self._get_dnat_rules(context, router) dnat_cidrs = [rule['dst'] for rule in dnat_rules] if dnat_cidrs: return { 'name': DNAT_RULE_NAME, 'action': 'allow', 'enabled': True, 'destination_ip_address': dnat_cidrs} def _get_subnet_fw_rules(self, context, router, subnets=None): # Get FW rule/s to open subnets firewall flows and static routes # relative flows fw_rules = [] subnet_cidrs_per_ads = self._find_router_subnets_cidrs_per_addr_scope( context.elevated(), router['id'], subnets=subnets) routes = self._get_extra_routes_by_router_id(context, router['id']) routes_dest = [route['destination'] for route in routes] for subnet_cidrs in subnet_cidrs_per_ads: # create a rule to allow east-west traffic between subnets on this # address scope # Also add the static routes to each address scope ips = subnet_cidrs + routes_dest fw_rules.append({ 'name': SUBNET_RULE_NAME, 'action': 'allow', 'enabled': True, 'source_ip_address': ips, 'destination_ip_address': ips}) return fw_rules def _update_nat_rules(self, context, router, router_id=None): snat, dnat = self._get_nat_rules(context, router) if not router_id: router_id = router['id'] edge_utils.update_nat_rules( self.nsx_v, context, router_id, snat, dnat) def recalculate_snat_rules_for_router(self, context, router, subnets): """Recalculate router snat rules for specific subnets. Invoked when subnetpool address scope changes. """ # Recalculate all nat rules for all subnets of the router router_db = self._get_router(context, router['id']) self._update_nat_rules(context, router_db) def recalculate_fw_rules_for_router(self, context, router, subnets): """Recalculate router fw rules for specific subnets. Invoked when subnetpool address scope changes. """ # Recalculate all fw rules for all subnets of the router router_db = self._get_router(context, router['id']) self._update_subnets_and_dnat_firewall(context, router_db) def _check_intf_number_of_router(self, context, router_id): intf_ports = self._get_port_by_device_id( context, router_id, l3_db.DEVICE_OWNER_ROUTER_INTF) if len(intf_ports) >= (vcns_const.MAX_INTF_NUM): err_msg = _("Interfaces number on router: %(router_id)s " "has reached the maximum %(number)d which NSXv can " "support. Please use vdr if you want to add unlimited " "interfaces") % {'router_id': router_id, 'number': vcns_const.MAX_INTF_NUM} raise nsx_exc.ServiceOverQuota(overs="router-interface-add", err_msg=err_msg) def _update_router_admin_state(self, context, router_id, router_type, admin_state): # Collecting all router interfaces and updating the connection status # for each one to reflect the router admin-state-up status. intf_net_ids = ( self._get_internal_network_ids_by_router(context, router_id)) edge_id = self._get_edge_id_by_rtr_id(context, router_id) if not edge_id: LOG.warning("Cannot update router %s admin state: no edge id " "found", router_id) with locking.LockManager.get_lock(edge_id): for network_id in intf_net_ids: address_groups = ( self._get_address_groups(context, router_id, network_id)) update_args = (self.nsx_v, context, router_id, network_id, address_groups, admin_state) if router_type == 'distributed': edge_utils.update_vdr_internal_interface(*update_args) else: edge_utils.update_internal_interface(*update_args) def _get_interface_info(self, context, interface_info): is_port, is_sub = self._validate_interface_info(interface_info) if is_port: port = self._check_router_port(context, interface_info['port_id'], '') subnet_id = port['fixed_ips'][0]['subnet_id'] net_id = port['network_id'] elif is_sub: subnet_id = interface_info['subnet_id'] net_id = self.get_subnet( context, subnet_id)['network_id'] return net_id, subnet_id def add_router_interface(self, context, router_id, interface_info): router = self.get_router(context, router_id) net_id, subnet_id = self._get_interface_info(context, interface_info) network = self.get_network(context.elevated(), net_id) # Do not support external subnet/port as a router interface if network.get(extnet_apidef.EXTERNAL): msg = _("cannot add an external subnet/port as a router interface") raise n_exc.InvalidInput(error_message=msg) snat_disabled = (router[l3_apidef.EXTERNAL_GW_INFO] and not router[l3_apidef.EXTERNAL_GW_INFO]['enable_snat']) if snat_disabled and subnet_id: gw_network_id = router[l3_apidef.EXTERNAL_GW_INFO]['network_id'] self._validate_address_scope_for_router_interface( context.elevated(), router_id, gw_network_id, subnet_id) router_driver = self._find_router_driver(context, router_id) try: return router_driver.add_router_interface( context, router_id, interface_info) except vsh_exc.VcnsApiException as e: LOG.error("Failed to add interface_info %(info)s on " "router %(router_id)s: %(e)s", {'info': str(interface_info), 'router_id': router_id, 'e': e.message}) try: router_driver.remove_router_interface( context, router_id, interface_info) except Exception: # Rollback may fail if creation failed too early pass raise nsx_exc.NsxPluginException(err_msg=e.message) def remove_router_interface(self, context, router_id, interface_info): # Get the router interface port id if self.fwaas_callbacks: port_id = interface_info.get('port_id') if not port_id: subnet_id = interface_info['subnet_id'] subnet = self._get_subnet(context, subnet_id) rport_qry = context.session.query(models_v2.Port) ports = rport_qry.filter_by( device_id=router_id, device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF, network_id=subnet['network_id']) for p in ports: if p['fixed_ips'][0]['subnet_id'] == subnet_id: port_id = p['id'] break router_driver = self._find_router_driver(context, router_id) result = router_driver.remove_router_interface( context, router_id, interface_info) # inform the FWaaS that interface port was removed if self.fwaas_callbacks and port_id: self.fwaas_callbacks.delete_port(context, port_id) return result def _get_floatingips_by_router(self, context, router_id): fip_qry = context.session.query(l3_db_models.FloatingIP) fip_db = fip_qry.filter_by(router_id=router_id).all() return [fip.floating_ip_address for fip in fip_db if fip.fixed_port_id] def _update_external_interface(self, context, router, router_id=None): ext_net_id = router.gw_port_id and router.gw_port.network_id addr, mask, nexthop = self._get_external_attachment_info( context, router) secondary = self._get_floatingips_by_router(context, router['id']) if not router_id: router_id = router['id'] self.edge_manager.update_external_interface( self.nsx_v, context, router_id, ext_net_id, addr, mask, secondary) def _set_floatingip_status(self, context, floatingip_db, status=None): if not status: status = (constants.FLOATINGIP_STATUS_ACTIVE if floatingip_db.get('router_id') else constants.FLOATINGIP_STATUS_DOWN) if floatingip_db['status'] != status: floatingip_db['status'] = status self.update_floatingip_status(context, floatingip_db['id'], status) def _update_edge_router(self, context, router_id): router_driver = self._find_router_driver(context, router_id) router_driver._update_edge_router(context, router_id) def create_floatingip(self, context, floatingip): fip_db = super(NsxVPluginV2, self).create_floatingip( context, floatingip) router_id = fip_db['router_id'] if router_id: try: self._update_edge_router(context, router_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update edge router") super(NsxVPluginV2, self).delete_floatingip(context, fip_db['id']) self._set_floatingip_status(context, fip_db) return fip_db def update_floatingip(self, context, id, floatingip): old_fip = self._get_floatingip(context, id) old_router_id = old_fip.router_id old_port_id = old_fip.fixed_port_id fip_db = super(NsxVPluginV2, self).update_floatingip( context, id, floatingip) router_id = fip_db.get('router_id') try: # Update old router's nat rules if old_router_id is not None. if old_router_id: self._update_edge_router(context, old_router_id) # Update current router's nat rules if router_id is not None. if router_id: self._update_edge_router(context, router_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update edge router") super(NsxVPluginV2, self).update_floatingip( context, id, {'floatingip': {'port_id': old_port_id}}) self._set_floatingip_status(context, fip_db) return fip_db def delete_floatingip(self, context, id): fip_db = self._get_floatingip(context, id) router_id = None if fip_db.fixed_port_id: router_id = fip_db.router_id super(NsxVPluginV2, self).delete_floatingip(context, id) if router_id: self._update_edge_router(context, router_id) def disassociate_floatingips(self, context, port_id): router_id = None try: fip_qry = context.session.query(l3_db_models.FloatingIP) fip_db = fip_qry.filter_by(fixed_port_id=port_id) for fip in fip_db: if fip.router_id: router_id = fip.router_id break except sa_exc.NoResultFound: router_id = None super(NsxVPluginV2, self).disassociate_floatingips(context, port_id) if router_id: self._update_edge_router(context, router_id) def _update_subnets_and_dnat_firewall(self, context, router_db, router_id=None): """Update the router edge firewall with all the relevant rules. router_db is the neutron router structure router_id is the id of the actual router that will be updated on the NSX (in case of distributed router it can be plr or tlr) This is just a wrapper of update_router_firewall """ if not router_id: router_id = router_db['id'] self.update_router_firewall(context, router_id, router_db) def update_router_firewall(self, context, router_id, router_db): """Recreate all rules in the router edge firewall router_db is the neutron router structure router_id is the id of the actual router that will be updated on the NSX (in case of distributed router it can be plr or tlr) """ fw_rules = [] distributed = False if router_db: nsx_attr = router_db.get('nsx_attributes', {}) distributed = ( nsx_attr.get('distributed', False) if nsx_attr else False) edge_id = self._get_edge_id_by_rtr_id(context, router_id) # Add FW rule/s to open subnets firewall flows and static routes # relative flows subnets = self._load_router_subnet_cidrs_from_db(context.elevated(), router_id) subnet_rules = self._get_subnet_fw_rules(context, router_db, subnets=subnets) if subnet_rules: fw_rules.extend(subnet_rules) # If metadata service is enabled, block access to inter-edge network if self.metadata_proxy_handler and not distributed: fw_rules += nsx_v_md_proxy.get_router_fw_rules() # Add FWaaS rules if FWaaS is enabled if (self.fwaas_callbacks and self.fwaas_callbacks.should_apply_firewall_to_router( context, router_db, router_id)): fwaas_rules = self.fwaas_callbacks.get_fwaas_rules_for_router( context, router_db['id'], router_db, edge_id) if fwaas_rules: fw_rules += fwaas_rules # The rules added from here forward are relevant only for interface # ports without fwaas firewall group # To allow this traffic on interfaces with firewall group, the user # should add specific rules. dnat_rule = self._get_dnat_fw_rule(context, router_db) if dnat_rule: fw_rules.append(dnat_rule) # Add rule for not NAT-ed allocation pools alloc_pool_rule = self._get_allocation_pools_fw_rule( context, router_db, subnets=subnets) if alloc_pool_rule: fw_rules.append(alloc_pool_rule) # Add no-snat rules nosnat_fw_rules = self._get_nosnat_subnets_fw_rules( context, router_db, subnets=subnets) fw_rules.extend(nosnat_fw_rules) vpn_plugin = directory.get_plugin(plugin_const.VPN) if vpn_plugin: vpn_driver = vpn_plugin.drivers[vpn_plugin.default_provider] vpn_rules = ( vpn_driver._generate_ipsecvpn_firewall_rules( self.plugin_type(), context, edge_id=edge_id)) fw_rules.extend(vpn_rules) # Get the load balancer rules in case they are refreshed # (relevant only for older LB that are still on the router edge) lb_rules = nsxv_db.get_nsxv_lbaas_loadbalancer_binding_by_edge( context.session, edge_id) for rule in lb_rules: vsm_rule = self.nsx_v.vcns.get_firewall_rule( edge_id, rule['edge_fw_rule_id'])[1] lb_fw_rule = { 'action': edge_firewall_driver.FWAAS_ALLOW, 'enabled': vsm_rule['enabled'], 'destination_ip_address': vsm_rule['destination']['ipAddress'], 'name': vsm_rule['name'], 'ruleId': vsm_rule['ruleId'] } fw_rules.append(lb_fw_rule) fw = {'firewall_rule_list': fw_rules} try: edge_utils.update_firewall(self.nsx_v, context, router_id, fw) except vsh_exc.ResourceNotFound: LOG.error("Failed to update firewall for router %s", router_id) def _delete_nsx_security_group(self, nsx_sg_id, nsx_policy): """Helper method to delete nsx security group.""" if nsx_sg_id is not None: if nsx_policy: # First remove this security group from the NSX policy, # Or else the delete will fail try: with locking.LockManager.get_lock( 'neutron-security-policy-' + str(nsx_policy)): self.nsx_sg_utils.del_nsx_security_group_from_policy( nsx_policy, nsx_sg_id) except Exception as e: LOG.warning("Failed to remove nsx security group " "%(id)s from policy %(pol)s : %(e)s", {'id': nsx_sg_id, 'pol': nsx_policy, 'e': e}) self.nsx_v.vcns.delete_security_group(nsx_sg_id) # Security group handling section # def _delete_section(self, section_uri): """Helper method to delete nsx rule section.""" if section_uri is not None: self.nsx_v.vcns.delete_section(section_uri) def _get_section_uri(self, session, security_group_id): mapping = nsxv_db.get_nsx_section(session, security_group_id) if mapping is not None: return mapping['ip_section_id'] def _create_fw_section_for_security_group(self, context, securitygroup, nsx_sg_id): logging = (cfg.CONF.nsxv.log_security_groups_allowed_traffic or securitygroup[sg_logging.LOGGING]) action = 'deny' if securitygroup[provider_sg.PROVIDER] else 'allow' section_name = self.nsx_sg_utils.get_nsx_section_name(securitygroup) nsx_rules = [] # Translate Neutron rules to NSXv fw rules and construct the fw section for rule in securitygroup['security_group_rules']: nsx_rule = self._create_nsx_rule( context, rule, nsx_sg_id, logged=logging, action=action) nsx_rules.append(nsx_rule) section = self.nsx_sg_utils.get_section_with_rules( section_name, nsx_rules) # Execute REST API for creating the section h, c = self.nsx_v.vcns.create_section( 'ip', self.nsx_sg_utils.to_xml_string(section), insert_top=securitygroup[provider_sg.PROVIDER], insert_before=self.default_section) rule_pairs = self.nsx_sg_utils.get_rule_id_pair_from_section(c) # Add database associations for fw section and rules nsxv_db.add_neutron_nsx_section_mapping( context.session, securitygroup['id'], h['location']) for pair in rule_pairs: # Save nsx rule id in the DB for future access nsxv_db.add_neutron_nsx_rule_mapping( context.session, pair['neutron_id'], pair['nsx_id']) def _create_nsx_security_group(self, context, securitygroup): nsx_sg_name = self.nsx_sg_utils.get_nsx_sg_name(securitygroup) # NSX security-group config sg_dict = {"securitygroup": {"name": nsx_sg_name, "description": securitygroup['description']}} # Create the nsx security group h, nsx_sg_id = self.nsx_v.vcns.create_security_group(sg_dict) # Save moref in the DB for future access nsx_db.add_neutron_nsx_security_group_mapping( context.session, securitygroup['id'], nsx_sg_id) return nsx_sg_id def _process_security_group_create_backend_resources(self, context, securitygroup): nsx_sg_id = self._create_nsx_security_group(context, securitygroup) policy = securitygroup.get(sg_policy.POLICY) if self._use_nsx_policies and policy: # When using policies - no rules should be created. # just add the security group to the policy on the backend. self._update_nsx_security_group_policies( policy, None, nsx_sg_id) else: try: self._create_fw_section_for_security_group( context, securitygroup, nsx_sg_id) except Exception: with excutils.save_and_reraise_exception(): self._delete_nsx_security_group(nsx_sg_id, policy) if not securitygroup[provider_sg.PROVIDER]: # Add Security Group to the Security Groups container in order to # apply the default block rule. # This is relevant for policies security groups too. # provider security-groups should not have a default blocking rule. self._add_member_to_security_group(self.sg_container_id, nsx_sg_id) def _validate_security_group(self, context, security_group, default_sg, id=None): if self._use_nsx_policies: new_policy = None sg_with_policy = False if not id: # called from create_security_group # must have a policy: if not security_group.get(sg_policy.POLICY): if default_sg: # For default sg the default policy will be used security_group[sg_policy.POLICY] = ( cfg.CONF.nsxv.default_policy_id) elif not cfg.CONF.nsxv.allow_tenant_rules_with_policy: if context.is_admin: msg = _('A security group must be assigned to a ' 'policy') else: msg = _('Creation of security group is not ' 'allowed') raise n_exc.InvalidInput(error_message=msg) new_policy = security_group.get(sg_policy.POLICY) sg_with_policy = True if new_policy else False else: # called from update_security_group. # Check if the existing security group has policy or not sg_with_policy = self._is_policy_security_group(context, id) if sg_policy.POLICY in security_group: new_policy = security_group[sg_policy.POLICY] if sg_with_policy and not new_policy: # cannot remove a policy from an existing sg msg = (_('Security group %s must be assigned to a ' 'policy') % id) raise n_exc.InvalidInput(error_message=msg) if not sg_with_policy and new_policy: # cannot add a policy to a non-policy security group msg = (_('Cannot add policy to an existing security ' 'group %s') % id) raise n_exc.InvalidInput(error_message=msg) # validate that the new policy exists (and not hidden) by using the # plugin getter that raises an exception if it fails. if new_policy: try: policy_obj = self.get_nsx_policy(context, new_policy) except n_exc.ObjectNotFound: msg = _('Policy %s was not found on the NSX') % new_policy raise n_exc.InvalidInput(error_message=msg) # Do not support logging with policy if sg_with_policy and security_group.get(sg_logging.LOGGING): msg = _('Cannot support logging when using NSX policies') raise n_exc.InvalidInput(error_message=msg) # Use the NSX policy description as the description of this # security group if the description was not set by the user # and the security group is new or policy was updated # if the nsx policy has not description - use its name if new_policy and not security_group.get('description'): security_group['description'] = ( policy_obj.get('description') or policy_obj.get('name'))[:db_const.DESCRIPTION_FIELD_SIZE] else: # must not have a policy: if security_group.get(sg_policy.POLICY): msg = _('The security group cannot be assigned to a policy') raise n_exc.InvalidInput(error_message=msg) def create_security_group(self, context, security_group, default_sg=False): """Create a security group.""" sg_data = security_group['security_group'] sg_id = sg_data["id"] = str(uuidutils.generate_uuid()) self._validate_security_group(context, sg_data, default_sg) with db_api.CONTEXT_WRITER.using(context): is_provider = True if sg_data.get(provider_sg.PROVIDER) else False is_policy = True if sg_data.get(sg_policy.POLICY) else False if is_provider or is_policy: new_sg = self.create_security_group_without_rules( context, security_group, default_sg, is_provider) else: new_sg = super(NsxVPluginV2, self).create_security_group( context, security_group, default_sg) self._process_security_group_properties_create( context, new_sg, sg_data, default_sg) try: self._process_security_group_create_backend_resources( context, new_sg) except Exception: # Couldn't create backend resources, rolling back neutron db # changes. with excutils.save_and_reraise_exception(): # Delete security-group and its associations from database, # Only admin can delete the default security-group if default_sg: context = context.elevated() super(NsxVPluginV2, self).delete_security_group(context, sg_id) LOG.exception('Failed to create security group') return new_sg def _update_security_group_with_policy(self, updated_group, sg_data, nsx_sg_id): """Handle security group update when using NSX policies Remove the security group from the old policies, and apply on the new policies """ # Verify that the policy was not removed from the security group if (sg_policy.POLICY in updated_group and not updated_group[sg_policy.POLICY]): msg = _('It is not allowed to remove the policy from security ' 'group %s') % nsx_sg_id raise n_exc.InvalidInput(error_message=msg) if (updated_group.get(sg_policy.POLICY) and updated_group[sg_policy.POLICY] != sg_data[sg_policy.POLICY]): new_policy = updated_group[sg_policy.POLICY] old_policy = sg_data[sg_policy.POLICY] self._update_nsx_security_group_policies( new_policy, old_policy, nsx_sg_id) def _update_nsx_security_group_policies(self, new_policy, old_policy, nsx_sg_id): # update the NSX security group to use this policy if old_policy: with locking.LockManager.get_lock( 'neutron-security-policy-' + str(old_policy)): self.nsx_sg_utils.del_nsx_security_group_from_policy( old_policy, nsx_sg_id) with locking.LockManager.get_lock( 'neutron-security-policy-' + str(new_policy)): self.nsx_sg_utils.add_nsx_security_group_to_policy( new_policy, nsx_sg_id) def update_security_group(self, context, id, security_group): s = security_group['security_group'] self._validate_security_group(context, s, False, id=id) self._prevent_non_admin_edit_provider_sg(context, id) nsx_sg_id = nsx_db.get_nsx_security_group_id(context.session, id, moref=True) section_uri = self._get_section_uri(context.session, id) section_needs_update = False sg_data = super(NsxVPluginV2, self).update_security_group( context, id, security_group) # Reflect security-group name or description changes in the backend, if set(['name', 'description']) & set(s.keys()): nsx_sg_name = self.nsx_sg_utils.get_nsx_sg_name(sg_data) section_name = self.nsx_sg_utils.get_nsx_section_name(sg_data) self.nsx_v.vcns.update_security_group( nsx_sg_id, nsx_sg_name, sg_data['description']) # security groups with NSX policy - update the backend policy attached # to the security group if (self._use_nsx_policies and self._is_policy_security_group(context, id)): if sg_policy.POLICY in sg_data: self._update_security_group_with_policy(s, sg_data, nsx_sg_id) # The rest of the update are not relevant to policies security # groups as there is no matching section self._process_security_group_properties_update( context, sg_data, s) return sg_data with locking.LockManager.get_lock('rule-update-%s' % id): # Get the backend section matching this security group h, c = self.nsx_v.vcns.get_section(section_uri) section = self.nsx_sg_utils.parse_section(c) # dfw section name needs to be updated if the sg name was modified if 'name' in s.keys(): section.attrib['name'] = section_name section_needs_update = True # Update the dfw section if security-group logging option has # changed. log_all_rules = cfg.CONF.nsxv.log_security_groups_allowed_traffic self._process_security_group_properties_update(context, sg_data, s) if not log_all_rules and context.is_admin: section_needs_update |= ( self.nsx_sg_utils.set_rules_logged_option( section, sg_data[sg_logging.LOGGING])) if section_needs_update: # update the section with all the modifications self.nsx_v.vcns.update_section( section_uri, self.nsx_sg_utils.to_xml_string(section), h) return sg_data def delete_security_group(self, context, id, delete_base=True): """Delete a security group.""" self._prevent_non_admin_edit_provider_sg(context, id) self._prevent_non_admin_delete_policy_sg(context, id) policy = self._get_security_group_policy(context, id) try: # Find nsx rule sections section_uri = self._get_section_uri(context.session, id) # Find nsx security group nsx_sg_id = nsx_db.get_nsx_security_group_id(context.session, id, moref=True) if delete_base: # Delete neutron security group super(NsxVPluginV2, self).delete_security_group(context, id) # Delete nsx rule sections self._delete_section(section_uri) # Delete nsx security group self._delete_nsx_security_group(nsx_sg_id, policy) except Exception: with excutils.save_and_reraise_exception(): LOG.exception("Failed to delete security group") def _translate_nsx_protocols(self, protocol): # The NSX only excepts '58' as icmp-v6 if (protocol == constants.PROTO_NAME_IPV6_ICMP or protocol == constants.PROTO_NAME_IPV6_ICMP_LEGACY): return str(constants.PROTO_NUM_IPV6_ICMP) # Some protocols are not supported and should be used as a number if protocol in UNSUPPORTED_RULE_NAMED_PROTOCOLS: return str(self._get_ip_proto_number(protocol)) return protocol def _create_nsx_rule(self, context, rule, nsx_sg_id=None, logged=False, action='allow'): src = None dest = None port = None protocol = None icmptype = None icmpcode = None flags = {} if nsx_sg_id is None: # Find nsx security group for neutron security group nsx_sg_id = nsx_db.get_nsx_security_group_id( context.session, rule['security_group_id'], moref=True) # Find the remote nsx security group id, which might be the current # one. In case of the default security-group, the associated # nsx-security-group wasn't written to the database yet. if rule['remote_group_id'] == rule['security_group_id']: remote_nsx_sg_id = nsx_sg_id else: remote_nsx_sg_id = nsx_db.get_nsx_security_group_id( context.session, rule['remote_group_id'], moref=True) # Get source and destination containers from rule if rule['direction'] == 'ingress': if rule.get(secgroup_rule_local_ip_prefix.LOCAL_IP_PREFIX): dest = self.nsx_sg_utils.get_remote_container( None, rule[secgroup_rule_local_ip_prefix.LOCAL_IP_PREFIX]) src = self.nsx_sg_utils.get_remote_container( remote_nsx_sg_id, rule['remote_ip_prefix']) dest = dest or self.nsx_sg_utils.get_container(nsx_sg_id) flags['direction'] = 'in' else: dest = self.nsx_sg_utils.get_remote_container( remote_nsx_sg_id, rule['remote_ip_prefix']) src = self.nsx_sg_utils.get_container(nsx_sg_id) flags['direction'] = 'out' protocol = self._translate_nsx_protocols(rule.get('protocol')) if rule['port_range_min'] is not None: if protocol == '1' or protocol == '58' or protocol == 'icmp': icmptype = str(rule['port_range_min']) if rule['port_range_max'] is not None: icmpcode = str(rule['port_range_max']) else: port = str(rule['port_range_min']) if rule['port_range_max'] != rule['port_range_min']: port = port + '-' + str(rule['port_range_max']) # Get the neutron rule id to use as name in nsxv rule name = rule.get('id') services = [(protocol, port, icmptype, icmpcode)] if protocol else [] flags['ethertype'] = rule.get('ethertype') # Add rule in nsx rule section nsx_rule = self.nsx_sg_utils.get_rule_config( applied_to_ids=[nsx_sg_id], name=name, source=src, destination=dest, services=services, flags=flags, action=action, logged=logged, tag='Project_%s' % rule['tenant_id'], notes=rule.get('description')) return nsx_rule def create_security_group_rule(self, context, security_group_rule, create_base=True): """Create a single security group rule.""" bulk_rule = {'security_group_rules': [security_group_rule]} return self.create_security_group_rule_bulk( context, bulk_rule, create_base=create_base)[0] def _validate_security_group_rules(self, context, rules): for rule in rules['security_group_rules']: r = rule.get('security_group_rule') port_based_proto = (self._get_ip_proto_number(r['protocol']) in PROTOCOLS_SUPPORTING_PORTS) if (not port_based_proto and (r['port_range_min'] is not None or r['port_range_max'] is not None)): msg = (_("Port values not valid for " "protocol: %s") % r['protocol']) raise n_exc.BadRequest(resource='security_group_rule', msg=msg) return super(NsxVPluginV2, self)._validate_security_group_rules( context, rules) def create_security_group_rule_bulk(self, context, security_group_rules, create_base=True): """Create security group rules. :param security_group_rules: list of rules to create """ sg_rules = security_group_rules['security_group_rules'] sg_id = sg_rules[0]['security_group_rule']['security_group_id'] self._prevent_non_admin_edit_provider_sg(context, sg_id) ruleids = set() nsx_rules = [] self._validate_security_group_rules(context, security_group_rules) if self._is_policy_security_group(context, sg_id): # If policies are/were enabled - creating rules is forbidden msg = (_('Cannot create rules for security group %s with' ' a policy') % sg_id) raise n_exc.InvalidInput(error_message=msg) with locking.LockManager.get_lock('rule-update-%s' % sg_id): # Querying DB for associated dfw section id section_uri = self._get_section_uri(context.session, sg_id) logging = self._is_security_group_logged(context, sg_id) provider = self._is_provider_security_group(context, sg_id) log_all_rules = cfg.CONF.nsxv.log_security_groups_allowed_traffic # Translating Neutron rules to Nsx DFW rules for r in sg_rules: rule = r['security_group_rule'] if not self._check_local_ip_prefix(context, rule): rule[secgroup_rule_local_ip_prefix.LOCAL_IP_PREFIX] = None self._fix_sg_rule_dict_ips(rule) rule['id'] = rule.get('id') or uuidutils.generate_uuid() ruleids.add(rule['id']) nsx_rules.append( self._create_nsx_rule(context, rule, logged=log_all_rules or logging, action='deny' if provider else 'allow') ) _h, _c = self.nsx_v.vcns.get_section(section_uri) section = self.nsx_sg_utils.parse_section(_c) self.nsx_sg_utils.fix_existing_section_rules(section) self.nsx_sg_utils.extend_section_with_rules(section, nsx_rules) try: h, c = self.nsx_v.vcns.update_section( section_uri, self.nsx_sg_utils.to_xml_string(section), _h) except vsh_exc.RequestBad as e: # Raise the original reason of the failure details = et.fromstring(e.response).find('details') raise n_exc.BadRequest( resource='security_group_rule', msg=details.text if details is not None else "Unknown") rule_pairs = self.nsx_sg_utils.get_rule_id_pair_from_section(c) try: # Save new rules in Database, including mappings between Nsx rules # and Neutron security-groups rules with db_api.CONTEXT_WRITER.using(context): if create_base: new_rule_list = super( NsxVPluginV2, self).create_security_group_rule_bulk_native( context, security_group_rules) for i, r in enumerate(sg_rules): self._process_security_group_rule_properties( context, new_rule_list[i], r['security_group_rule']) else: new_rule_list = sg_rules for pair in rule_pairs: neutron_rule_id = pair['neutron_id'] nsx_rule_id = pair['nsx_id'] if neutron_rule_id in ruleids: nsxv_db.add_neutron_nsx_rule_mapping( context.session, neutron_rule_id, nsx_rule_id) except Exception: with excutils.save_and_reraise_exception(): for nsx_rule_id in [p['nsx_id'] for p in rule_pairs if p['neutron_id'] in ruleids]: with locking.LockManager.get_lock('rule-update-%s' % sg_id): self.nsx_v.vcns.remove_rule_from_section( section_uri, nsx_rule_id) LOG.exception("Failed to create security group rule") return new_rule_list def delete_security_group_rule(self, context, id, delete_base=True): """Delete a security group rule.""" rule_db = self._get_security_group_rule(context, id) security_group_id = rule_db['security_group_id'] self._prevent_non_admin_edit_provider_sg(context, security_group_id) # Get the nsx rule from neutron DB and delete it nsx_rule_id = nsxv_db.get_nsx_rule_id(context.session, id) section_uri = self._get_section_uri( context.session, security_group_id) try: if nsx_rule_id and section_uri: with locking.LockManager.get_lock('rule-update-%s' % security_group_id): self.nsx_v.vcns.remove_rule_from_section( section_uri, nsx_rule_id) except vsh_exc.ResourceNotFound: LOG.debug("Security group rule %(id)s deleted, backend " "nsx-rule %(nsx_rule_id)s doesn't exist.", {'id': id, 'nsx_rule_id': nsx_rule_id}) if delete_base: obj_reg.load_class('SecurityGroupRule').delete_objects( context, id=id) def _remove_vnic_from_spoofguard_policy(self, session, net_id, vnic_id): policy_id = nsxv_db.get_spoofguard_policy_id(session, net_id) self.nsx_v.vcns.inactivate_vnic_assigned_addresses(policy_id, vnic_id) def _update_vnic_assigned_addresses(self, session, port, vnic_id): sg_policy_id = nsxv_db.get_spoofguard_policy_id( session, port['network_id']) if not sg_policy_id: LOG.warning("Spoofguard not defined for network %s", port['network_id']) return mac_addr = port['mac_address'] approved_addrs = [addr['ip_address'] for addr in port['fixed_ips']] # add in the address pair approved_addrs.extend( addr['ip_address'] for addr in port[addr_apidef.ADDRESS_PAIRS]) # add the IPv6 link-local address if there is an IPv6 address if any([netaddr.valid_ipv6(address) for address in approved_addrs]): lla = str(netutils.get_ipv6_addr_by_EUI64( constants.IPv6_LLA_PREFIX, mac_addr)) approved_addrs.append(lla) try: self.nsx_v.vcns.approve_assigned_addresses( sg_policy_id, vnic_id, mac_addr, approved_addrs) except vsh_exc.AlreadyExists: # Entry already configured on the NSX pass try: self.nsx_v.vcns.publish_assigned_addresses(sg_policy_id, vnic_id) except Exception as e: LOG.warning("Failed to publish entry for port %(port)s " "for vnic %(vnic)s: %(exc)s", {'port': port['id'], 'vnic': vnic_id, 'exc': str(e)}) def _is_compute_port(self, port): try: if (port['device_id'] and uuidutils.is_uuid_like(port['device_id']) and port['device_owner'].startswith('compute:')): return True except (KeyError, AttributeError): pass return False def _is_valid_ip(self, ip_addr): return netaddr.valid_ipv4(ip_addr) or netaddr.valid_ipv6(ip_addr) def _ensure_lock_operations(self): try: self.nsx_v.vcns.edges_lock_operation() except Exception: LOG.info("Unable to set manager lock operation") def _aggregate_publishing(self): try: self.nsx_v.vcns.configure_aggregate_publishing() except Exception: LOG.info("Unable to configure aggregate publishing") def _configure_reservations(self): ver = self.nsx_v.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.3'): LOG.debug("Skipping reservation configuration. " "Not supported by version - %s.", ver) return try: self.nsx_v.vcns.configure_reservations() except Exception: LOG.info("Unable to configure edge reservations") def _validate_config(self): self.existing_dvs = self.nsx_v.vcns.get_dvs_list() if (cfg.CONF.nsxv.dvs_id and not self.nsx_v.vcns.validate_dvs(cfg.CONF.nsxv.dvs_id, dvs_list=self.existing_dvs)): raise nsx_exc.NsxResourceNotFound( res_name='dvs_id', res_id=cfg.CONF.nsxv.dvs_id) for dvs_id in self._availability_zones_data.get_additional_dvs_ids(): if not self.nsx_v.vcns.validate_dvs(dvs_id, dvs_list=self.existing_dvs): raise nsx_exc.NsxAZResourceNotFound( res_name='dvs_id', res_id=dvs_id) # validate network-vlan dvs ID's for dvs_id in self._network_vlans: if not self.nsx_v.vcns.validate_dvs(dvs_id, dvs_list=self.existing_dvs): raise nsx_exc.NsxResourceNotFound(res_name='dvs_id', res_id=dvs_id) # Validate the global & per-AZ validate_datacenter_moid if not self.nsx_v.vcns.validate_datacenter_moid( cfg.CONF.nsxv.datacenter_moid, during_init=True): raise nsx_exc.NsxResourceNotFound( res_name='datacenter_moid', res_id=cfg.CONF.nsxv.datacenter_moid) for dc in self._availability_zones_data.get_additional_datacenter(): if not self.nsx_v.vcns.validate_datacenter_moid( dc, during_init=True): raise nsx_exc.NsxAZResourceNotFound( res_name='datacenter_moid', res_id=dc) # Validate the global & per-AZ external_network if not self.nsx_v.vcns.validate_network( cfg.CONF.nsxv.external_network, during_init=True): raise nsx_exc.NsxResourceNotFound( res_name='external_network', res_id=cfg.CONF.nsxv.external_network) for ext_net in self._availability_zones_data.get_additional_ext_net(): if not self.nsx_v.vcns.validate_network( ext_net, during_init=True): raise nsx_exc.NsxAZResourceNotFound( res_name='external_network', res_id=ext_net) # Validate the global & per-AZ vdn_scope_id if not self.nsx_v.vcns.validate_vdn_scope(cfg.CONF.nsxv.vdn_scope_id): raise nsx_exc.NsxResourceNotFound( res_name='vdn_scope_id', res_id=cfg.CONF.nsxv.vdn_scope_id) for vdns in self._availability_zones_data.get_additional_vdn_scope(): if not self.nsx_v.vcns.validate_vdn_scope(vdns): raise nsx_exc.NsxAZResourceNotFound( res_name='vdn_scope_id', res_id=vdns) # Validate the global & per-AZ mgt_net_moid if (cfg.CONF.nsxv.mgt_net_moid and not self.nsx_v.vcns.validate_network( cfg.CONF.nsxv.mgt_net_moid)): raise nsx_exc.NsxResourceNotFound( res_name='mgt_net_moid', res_id=cfg.CONF.nsxv.mgt_net_moid) for mgmt_net in self._availability_zones_data.get_additional_mgt_net(): if not self.nsx_v.vcns.validate_network(mgmt_net): raise nsx_exc.NsxAZResourceNotFound( res_name='mgt_net_moid', res_id=mgmt_net) ver = self.nsx_v.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.0'): LOG.warning("Skipping validations. Not supported by version.") return # Validate the host_groups for each AZ if cfg.CONF.nsxv.use_dvs_features: azs = self.get_azs_list() for az in azs: if az.edge_host_groups and az.edge_ha: if len(az.edge_host_groups) < 2: error = _("edge_host_groups must have at least 2 " "names") raise nsx_exc.NsxPluginException(err_msg=error) if (not az.ha_placement_random and len(az.edge_host_groups) > 2): LOG.warning("Availability zone %(az)s has %(count)s " "hostgroups. only the first 2 will be " "used until ha_placement_random is " "enabled", {'az': az.name, 'count': len(az.edge_host_groups)}) self._vcm.validate_host_groups(az.resource_pool, az.edge_host_groups) # Validations below only supported by 6.2.0 and above inventory = [(cfg.CONF.nsxv.resource_pool_id, 'resource_pool_id'), (cfg.CONF.nsxv.datastore_id, 'datastore_id'), (cfg.CONF.nsxv.ha_datastore_id, 'ha_datastore_id'), ] # Treat the cluster list for cluster in cfg.CONF.nsxv.cluster_moid: inventory.append((cluster, 'cluster_moid')) # Add the availability zones resources az_resources = self._availability_zones_data.get_inventory() for res in az_resources: inventory.append((res, 'availability_zone ' + res)) if cfg.CONF.nsxv.use_nsx_policies: # if use_nsx_policies=True, the default policy must be defined if not cfg.CONF.nsxv.default_policy_id: error = _("default_policy_id must be defined") raise nsx_exc.NsxPluginException(err_msg=error) inventory.append((cfg.CONF.nsxv.default_policy_id, 'default_policy_id')) for moref, field in inventory: if moref and not self.nsx_v.vcns.validate_inventory(moref): error = _("Configured %s not found") % field raise nsx_exc.NsxPluginException(err_msg=error) if cfg.CONF.nsxv.vdr_transit_network: edge_utils.validate_vdr_transit_network() # Validate configuration connectivity per AZ self._availability_zones_data.validate_connectivity(self.nsx_v.vcns) def _nsx_policy_is_hidden(self, policy): for attrib in policy.get('extendedAttributes', []): if (attrib['name'].lower() == 'ishidden' and attrib['value'].lower() == 'true'): return True return False def _nsx_policy_to_dict(self, policy): return {'id': policy['objectId'], 'name': policy.get('name'), 'description': policy.get('description')} def get_nsx_policy(self, context, id, fields=None): try: policy = self.nsx_v.vcns.get_security_policy(id, return_xml=False) except vsh_exc.ResourceNotFound: # no such policy on backend raise n_exc.ObjectNotFound(id=id) if self._nsx_policy_is_hidden(policy): # This is an hidden policy raise n_exc.ObjectNotFound(id=id) return self._nsx_policy_to_dict(policy) def get_nsx_policies(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): policies = self.nsx_v.vcns.get_security_policies() results = [] for policy in policies.get('policies', []): if not self._nsx_policy_is_hidden(policy): results.append(self._nsx_policy_to_dict(policy)) return results def _get_appservice_id(self, name): return self.nsx_v.vcns.get_application_id(name) def service_router_has_loadbalancers(self, context, router_id): # This api is used by Octavia to verify that a router can be deleted # Currently the V plugin does not support this return False ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.206254 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/0000755000175000017500000000000000000000000023644 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/__init__.py0000644000175000017500000000000000000000000025743 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.206254 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/common/0000755000175000017500000000000000000000000025134 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/common/VcnsApiClient.py0000644000175000017500000001375600000000000030224 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import os import xml.etree.ElementTree as et from oslo_context import context as context_utils from oslo_serialization import jsonutils import requests import six from vmware_nsx.plugins.nsx_v.vshield.common import exceptions def _xmldump(obj): """Sort of improved xml creation method. This converts the dict to xml with following assumptions: Keys starting with _(underscore) are to be used as attributes and not element keys starting with @ so that dict can be made. Keys starting with __(double underscore) are to be skipped and its value is processed. The keys are not part of any xml schema. """ config = "" attr = "" if isinstance(obj, dict): for key, value in six.iteritems(obj): if key.startswith('__'): # Skip the key and evaluate it's value. a, x = _xmldump(value) config += x elif key.startswith('_'): attr += ' %s="%s"' % (key[1:], value) else: a, x = _xmldump(value) if key.startswith('@'): cfg = "%s" % (x) else: cfg = "<%s%s>%s" % (key, a, x, key) config += cfg elif isinstance(obj, list): for value in obj: a, x = _xmldump(value) attr += a config += x else: config = obj return attr, config def xmldumps(obj): attr, xml = _xmldump(obj) return xml class VcnsApiHelper(object): errors = { 303: exceptions.ResourceRedirect, 400: exceptions.RequestBad, 403: exceptions.Forbidden, 404: exceptions.ResourceNotFound, 409: exceptions.ServiceConflict, 415: exceptions.MediaTypeUnsupport, 503: exceptions.ServiceUnavailable } nsx_errors = { # firewall rule doesn't exists for deletion. 100046: exceptions.ResourceNotFound, 100029: exceptions.ResourceNotFound, } def __init__(self, address, user, password, format='json', ca_file=None, insecure=True, timeout=None): # pylint: disable=deprecated-method encode_fn = base64.encodestring if six.PY2 else base64.encodebytes self.authToken = encode_fn(six.b("%s:%s" % (user, password))) self.user = user self.passwd = password self.address = address self.format = format self.timeout = timeout if format == 'json': self.encode = jsonutils.dumps else: self.encode = xmldumps if insecure: self.verify_cert = False else: if ca_file: self.verify_cert = ca_file else: self.verify_cert = True self._session = None self._pid = None @property def session(self): if self._session is None or self._pid != os.getpid(): self._pid = os.getpid() self._session = requests.Session() return self._session def _get_nsx_errorcode(self, content): try: if self.format == 'xml': error = et.fromstring(content).find('errorCode') errcode = error is not None and int(error.text) else: # json error = jsonutils.loads(content) errcode = int(error.get('errorCode')) return errcode except (TypeError, ValueError, et.ParseError): # We won't assume that integer error-code value is guaranteed. return None def _get_request_id(self): ctx = context_utils.get_current() if ctx: return ctx.__dict__.get('request_id') def request(self, method, uri, params=None, headers=None, encodeparams=True, timeout=None): uri = self.address + uri if timeout is None: timeout = self.timeout if headers is None: headers = {} auth_token = self.authToken.decode('ascii').strip() headers['Accept'] = 'application/' + self.format headers['Authorization'] = 'Basic ' + auth_token headers['Content-Type'] = 'application/' + self.format request_id = self._get_request_id() if request_id: headers['TicketNumber'] = request_id if params: if encodeparams is True: data = self.encode(params) else: data = params else: data = None try: response = self.session.request(method, uri, verify=self.verify_cert, data=data, headers=headers, timeout=timeout) except requests.exceptions.Timeout: raise exceptions.ResourceTimedOut(uri=uri) status = response.status_code if 200 <= status < 300: return response.headers, response.text nsx_errcode = self._get_nsx_errorcode(response.text) if nsx_errcode in self.nsx_errors: cls = self.nsx_errors[nsx_errcode] elif status in self.errors: cls = self.errors[status] else: cls = exceptions.VcnsApiException raise cls(uri=uri, status=status, header=response.headers, response=response.text) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/common/__init__.py0000644000175000017500000000000000000000000027233 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/common/constants.py0000644000175000017500000000532700000000000027531 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from vmware_nsx.common import nsxv_constants EDGE_ID = 'edge_id' ROUTER_ID = 'router_id' DHCP_EDGE_PREFIX = 'dhcp-' PLR_EDGE_PREFIX = 'plr-' BACKUP_ROUTER_PREFIX = 'backup-' EDGE_NAME_LEN = 20 # Interface EXTERNAL_VNIC_INDEX = 0 INTERNAL_VNIC_INDEX = 1 EXTERNAL_VNIC_NAME = "external" INTERNAL_VNIC_NAME = "internal" MAX_VNIC_NUM = 10 # we can add at most 8 interfaces on service edge. Other two interfaces # are used for metadata and external network access. MAX_INTF_NUM = 8 MAX_TUNNEL_NUM = (cfg.CONF.nsxv.maximum_tunnels_per_vnic if (cfg.CONF.nsxv.maximum_tunnels_per_vnic < 110 and cfg.CONF.nsxv.maximum_tunnels_per_vnic > 0) else 10) # SNAT rule location PREPEND = 0 APPEND = -1 # error code NSX_ERROR_ALREADY_EXISTS = 210 VCNS_ERROR_CODE_EDGE_NOT_RUNNING = 10013 NSX_ERROR_DHCP_OVERLAPPING_IP = 12501 NSX_ERROR_DHCP_DUPLICATE_HOSTNAME = 12504 NSX_ERROR_DHCP_DUPLICATE_MAC = 12518 NSX_ERROR_IPAM_ALLOCATE_ALL_USED = 120051 NSX_ERROR_IPAM_ALLOCATE_IP_USED = 120056 NSX_ERROR_ALREADY_HAS_SG_POLICY = 120508 SUFFIX_LENGTH = 8 #Edge size SERVICE_SIZE_MAPPING = { 'router': nsxv_constants.COMPACT, 'dhcp': nsxv_constants.COMPACT, 'lb': nsxv_constants.COMPACT } ALLOWED_EDGE_SIZES = (nsxv_constants.COMPACT, nsxv_constants.LARGE, nsxv_constants.XLARGE, nsxv_constants.QUADLARGE) #Edge type ALLOWED_EDGE_TYPES = (nsxv_constants.SERVICE_EDGE, nsxv_constants.VDR_EDGE) SUPPORTED_DHCP_OPTIONS = { 'interface-mtu': 'option26', 'tftp-server-name': 'option66', 'bootfile-name': 'option67', 'classless-static-route': 'option121', 'tftp-server-address': 'option150', 'tftp-server': 'option150', 'server-ip-address': 'option150', } # router status by number class RouterStatus(object): ROUTER_STATUS_ACTIVE = 0 ROUTER_STATUS_DOWN = 1 ROUTER_STATUS_PENDING_CREATE = 2 ROUTER_STATUS_PENDING_DELETE = 3 ROUTER_STATUS_ERROR = 4 class InternalEdgePurposes(object): INTER_EDGE_PURPOSE = 'inter_edge_net' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/common/exceptions.py0000644000175000017500000000427600000000000027700 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions from vmware_nsx._i18n import _ class VcnsException(exceptions.NeutronException): pass class VcnsGeneralException(VcnsException): def __init__(self, message): self.message = message super(VcnsGeneralException, self).__init__() class VcnsBadRequest(exceptions.BadRequest): pass class VcnsNotFound(exceptions.NotFound): message = _('%(resource)s not found: %(msg)s') class VcnsApiException(VcnsException): message = _("An unknown exception %(status)s occurred: %(response)s.") def __init__(self, **kwargs): super(VcnsApiException, self).__init__(**kwargs) self.status = kwargs.get('status') self.header = kwargs.get('header') self.response = kwargs.get('response') class ResourceRedirect(VcnsApiException): message = _("Resource %(uri)s has been redirected") class RequestBad(VcnsApiException): message = _("Request %(uri)s is Bad, response %(response)s") class Forbidden(VcnsApiException): message = _("Forbidden: %(uri)s") class ResourceNotFound(VcnsApiException): message = _("Resource %(uri)s not found") class ResourceTimedOut(VcnsApiException): message = _("Resource %(uri)s timed out") class MediaTypeUnsupport(VcnsApiException): message = _("Media Type %(uri)s is not supported") class ServiceUnavailable(VcnsApiException): message = _("Service Unavailable: %(uri)s") class ServiceConflict(VcnsApiException): message = _("Concurrent object access error: %(uri)s") class AlreadyExists(VcnsApiException): message = _("Resource %(resource)s already exists") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/edge_appliance_driver.py0000644000175000017500000010354300000000000030517 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from distutils import version import random import time from neutron_lib import constants as lib_const from neutron_lib import context as q_context from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from sqlalchemy.orm import exc as sa_exc from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import constants from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield.tasks import ( constants as task_constants) from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks LOG = logging.getLogger(__name__) class EdgeApplianceDriver(object): def __init__(self): super(EdgeApplianceDriver, self).__init__() # store the last task per edge that has the latest config self.updated_task = { 'nat': {}, 'route': {}, } random.seed() def _assemble_edge(self, name, appliance_size="compact", deployment_container_id=None, datacenter_moid=None, enable_aesni=True, dist=False, enable_fips=False, remote_access=False, edge_ha=False): edge = { 'name': name, 'fqdn': None, 'enableAesni': enable_aesni, 'enableFips': enable_fips, 'featureConfigs': { 'features': [ { 'featureType': 'firewall_4.0', 'globalConfig': { 'tcpTimeoutEstablished': 7200 } } ] }, 'cliSettings': { 'remoteAccess': remote_access }, 'autoConfiguration': { 'enabled': False, 'rulePriority': 'high' }, 'appliances': { 'applianceSize': appliance_size }, } if not dist: edge['type'] = "gatewayServices" edge['vnics'] = {'vnics': []} else: edge['type'] = "distributedRouter" edge['interfaces'] = {'interfaces': []} if deployment_container_id: edge['appliances']['deploymentContainerId'] = ( deployment_container_id) if datacenter_moid: edge['datacenterMoid'] = datacenter_moid if not dist and edge_ha: self._enable_high_availability(edge) return edge def _select_datastores(self, availability_zone): primary_ds = availability_zone.datastore_id secondary_ds = availability_zone.ha_datastore_id if availability_zone.ha_placement_random: # we want to switch primary and secondary datastores # half of the times, to balance it if random.random() > 0.5: primary_ds = availability_zone.ha_datastore_id secondary_ds = availability_zone.datastore_id return primary_ds, secondary_ds def _assemble_edge_appliances(self, availability_zone): appliances = [] if availability_zone.ha_datastore_id and availability_zone.edge_ha: # create appliance with HA primary_ds, secondary_ds = self._select_datastores( availability_zone) appliances.append(self._assemble_edge_appliance( availability_zone.resource_pool, primary_ds)) appliances.append(self._assemble_edge_appliance( availability_zone.resource_pool, secondary_ds)) elif availability_zone.datastore_id: # Single datastore appliances.append(self._assemble_edge_appliance( availability_zone.resource_pool, availability_zone.datastore_id)) return appliances def _assemble_edge_appliance(self, resource_pool_id, datastore_id): appliance = {} if resource_pool_id: appliance['resourcePoolId'] = resource_pool_id if datastore_id: appliance['datastoreId'] = datastore_id return appliance def _assemble_edge_vnic(self, name, index, portgroup_id, tunnel_index=-1, primary_address=None, subnet_mask=None, secondary=None, type="internal", enable_proxy_arp=False, enable_send_redirects=True, is_connected=True, mtu=1500, address_groups=None): vnic = { 'index': index, 'name': name, 'type': type, 'portgroupId': portgroup_id, 'mtu': mtu, 'enableProxyArp': enable_proxy_arp, 'enableSendRedirects': enable_send_redirects, 'isConnected': is_connected } if address_groups is None: address_groups = [] if not address_groups: if primary_address and subnet_mask: address_group = { 'primaryAddress': primary_address, 'subnetMask': subnet_mask } if secondary: address_group['secondaryAddresses'] = { 'ipAddress': secondary, 'type': 'secondary_addresses' } vnic['addressGroups'] = { 'addressGroups': [address_group] } else: vnic['subInterfaces'] = {'subInterfaces': address_groups} else: if tunnel_index < 0: vnic['addressGroups'] = {'addressGroups': address_groups} else: vnic['subInterfaces'] = {'subInterfaces': address_groups} return vnic def _assemble_vdr_interface(self, portgroup_id, primary_address=None, subnet_mask=None, secondary=None, type="internal", is_connected=True, mtu=1500, address_groups=None): interface = { 'type': type, 'connectedToId': portgroup_id, 'mtu': mtu, 'isConnected': is_connected } if address_groups is None: address_groups = [] if not address_groups: if primary_address and subnet_mask: address_group = { 'primaryAddress': primary_address, 'subnetMask': subnet_mask } if secondary: address_group['secondaryAddresses'] = { 'ipAddress': secondary, 'type': 'secondary_addresses' } interface['addressGroups'] = { 'addressGroups': [address_group] } else: interface['addressGroups'] = {'addressGroups': address_groups} interfaces = {'interfaces': [interface]} return interfaces def _edge_status_to_level(self, status): if status == 'GREEN': status_level = constants.RouterStatus.ROUTER_STATUS_ACTIVE elif status in ('GREY', 'YELLOW'): status_level = constants.RouterStatus.ROUTER_STATUS_DOWN else: status_level = constants.RouterStatus.ROUTER_STATUS_ERROR return status_level def _enable_loadbalancer(self, edge): if (not edge.get('featureConfigs') or not edge['featureConfigs'].get('features')): edge['featureConfigs'] = {'features': []} edge['featureConfigs']['features'].append( {'featureType': 'loadbalancer_4.0', 'enabled': True}) def _enable_high_availability(self, edge): if (not edge.get('featureConfigs') or not edge['featureConfigs'].get('features')): edge['featureConfigs'] = {'features': []} edge['featureConfigs']['features'].append( {'featureType': 'highavailability_4.0', 'enabled': True}) def get_edge_status(self, edge_id): try: response = self.vcns.get_edge_status(edge_id)[1] status_level = self._edge_status_to_level( response['edgeStatus']) except exceptions.VcnsApiException as e: LOG.error("VCNS: Failed to get edge %(edge_id)s status: " "Reason: %(reason)s", {'edge_id': edge_id, 'reason': e.response}) status_level = constants.RouterStatus.ROUTER_STATUS_ERROR try: desc = jsonutils.loads(e.response) if desc.get('errorCode') == ( constants.VCNS_ERROR_CODE_EDGE_NOT_RUNNING): status_level = constants.RouterStatus.ROUTER_STATUS_DOWN except ValueError: LOG.error('Error code not present. %s', e.response) return status_level def get_interface(self, edge_id, vnic_index): # get vnic interface address groups try: return self.vcns.query_interface(edge_id, vnic_index) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("NSXv: Failed to query vnic %s", vnic_index) def update_interface(self, router_id, edge_id, index, network, tunnel_index=-1, address=None, netmask=None, secondary=None, is_connected=True, address_groups=None): LOG.debug("VCNS: update vnic %(index)d: %(addr)s %(netmask)s", { 'index': index, 'addr': address, 'netmask': netmask}) if index == constants.EXTERNAL_VNIC_INDEX: name = constants.EXTERNAL_VNIC_NAME intf_type = 'uplink' else: name = constants.INTERNAL_VNIC_NAME + str(index) if tunnel_index < 0: intf_type = 'internal' else: intf_type = 'trunk' config = self._assemble_edge_vnic( name, index, network, tunnel_index, address, netmask, secondary, type=intf_type, address_groups=address_groups, is_connected=is_connected) self.vcns.update_interface(edge_id, config) def add_vdr_internal_interface(self, edge_id, network, address=None, netmask=None, secondary=None, address_groups=None, type="internal", is_connected=True): LOG.debug("Add VDR interface on edge: %s", edge_id) if address_groups is None: address_groups = [] interface_req = ( self._assemble_vdr_interface(network, address, netmask, secondary, address_groups=address_groups, is_connected=is_connected, type=type)) self.vcns.add_vdr_internal_interface(edge_id, interface_req) header, response = self.vcns.get_edge_interfaces(edge_id) for interface in response['interfaces']: if interface['connectedToId'] == network: vnic_index = int(interface['index']) return vnic_index def update_vdr_internal_interface(self, edge_id, index, network, address_groups=None, is_connected=True): if not address_groups: address_groups = [] interface = { 'type': 'internal', 'connectedToId': network, 'mtu': 1500, 'isConnected': is_connected, 'addressGroups': {'addressGroup': address_groups} } interface_req = {'interface': interface} try: header, response = self.vcns.update_vdr_internal_interface( edge_id, index, interface_req) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update vdr interface on edge: " "%s", edge_id) def delete_vdr_internal_interface(self, edge_id, interface_index): LOG.debug("Delete VDR interface on edge: %s", edge_id) try: header, response = self.vcns.delete_vdr_internal_interface( edge_id, interface_index) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to delete vdr interface on edge: " "%s", edge_id) def delete_interface(self, router_id, edge_id, index): LOG.debug("Deleting vnic %(vnic_index)s: on edge %(edge_id)s", {'vnic_index': index, 'edge_id': edge_id}) try: self.vcns.delete_interface(edge_id, index) except exceptions.ResourceNotFound: LOG.error('Failed to delete vnic %(vnic_index)s on edge ' '%(edge_id)s: edge was not found', {'vnic_index': index, 'edge_id': edge_id}) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to delete vnic %(vnic_index)s: " "on edge %(edge_id)s", {'vnic_index': index, 'edge_id': edge_id}) LOG.debug("Deletion complete vnic %(vnic_index)s: on edge %(edge_id)s", {'vnic_index': index, 'edge_id': edge_id}) def deploy_edge(self, context, router_id, name, internal_network, dist=False, loadbalancer_enable=True, appliance_size=nsxv_constants.LARGE, availability_zone=None, deploy_metadata=False): edge_name = name edge = self._assemble_edge( edge_name, datacenter_moid=availability_zone.datacenter_moid, deployment_container_id=self.deployment_container_id, appliance_size=appliance_size, remote_access=False, dist=dist, edge_ha=availability_zone.edge_ha) appliances = self._assemble_edge_appliances(availability_zone) if appliances: edge['appliances']['appliances'] = appliances if not dist: vnic_external = self._assemble_edge_vnic( constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX, availability_zone.external_network, type="uplink") edge['vnics']['vnics'].append(vnic_external) else: edge['mgmtInterface'] = { 'connectedToId': availability_zone.external_network, 'name': "mgmtInterface"} if internal_network: vnic_inside = self._assemble_edge_vnic( constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX, internal_network, edge_utils.get_vdr_transit_network_plr_address(), edge_utils.get_vdr_transit_network_netmask(), type="internal") edge['vnics']['vnics'].append(vnic_inside) # If default login credentials for Edge are set, configure accordingly if (cfg.CONF.nsxv.edge_appliance_user and cfg.CONF.nsxv.edge_appliance_password): edge['cliSettings'].update({ 'userName': cfg.CONF.nsxv.edge_appliance_user, 'password': cfg.CONF.nsxv.edge_appliance_password}) if not dist and loadbalancer_enable: self._enable_loadbalancer(edge) edge_id = None try: header = self.vcns.deploy_edge(edge)[0] edge_id = header.get('location', '/').split('/')[-1] if edge_id: nsxv_db.update_nsxv_router_binding( context.session, router_id, edge_id=edge_id) if not dist: # Init Edge vnic binding nsxv_db.init_edge_vnic_binding( context.session, edge_id) else: if router_id: nsxv_db.update_nsxv_router_binding( context.session, router_id, status=lib_const.ERROR) error = _('Failed to deploy edge') raise nsxv_exc.NsxPluginException(err_msg=error) self.callbacks.complete_edge_creation( context, edge_id, name, router_id, dist, True, availability_zone=availability_zone, deploy_metadata=deploy_metadata) except exceptions.VcnsApiException: self.callbacks.complete_edge_creation( context, edge_id, name, router_id, dist, False, availability_zone=availability_zone) with excutils.save_and_reraise_exception(): LOG.exception("NSXv: deploy edge failed.") return edge_id def update_edge(self, context, router_id, edge_id, name, internal_network, dist=False, loadbalancer_enable=True, appliance_size=nsxv_constants.LARGE, set_errors=False, availability_zone=None): """Update edge name.""" edge = self._assemble_edge( name, datacenter_moid=availability_zone.datacenter_moid, deployment_container_id=self.deployment_container_id, appliance_size=appliance_size, remote_access=False, dist=dist, edge_ha=availability_zone.edge_ha) edge['id'] = edge_id appliances = self._assemble_edge_appliances(availability_zone) if appliances: edge['appliances']['appliances'] = appliances if not dist: vnic_external = self._assemble_edge_vnic( constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX, availability_zone.external_network, type="uplink") edge['vnics']['vnics'].append(vnic_external) else: edge['mgmtInterface'] = { 'connectedToId': availability_zone.external_network, 'name': "mgmtInterface"} if internal_network: internal_vnic = self._assemble_edge_vnic( constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX, internal_network, edge_utils.get_vdr_transit_network_plr_address(), edge_utils.get_vdr_transit_network_netmask(), type="internal") edge['vnics']['vnics'].append(internal_vnic) if not dist and loadbalancer_enable: self._enable_loadbalancer(edge) try: self.vcns.update_edge(edge_id, edge) self.callbacks.complete_edge_update( context, edge_id, router_id, True, set_errors) except exceptions.VcnsApiException as e: LOG.error("Failed to update edge: %s", e.response) self.callbacks.complete_edge_update( context, edge_id, router_id, False, set_errors) return False return True def rename_edge(self, edge_id, name): """rename edge.""" try: # First get the current edge structure # [0] is the status, [1] is the body edge = self.vcns.get_edge(edge_id)[1] if edge['name'] == name: LOG.debug('Edge %s is already named %s', edge_id, name) return # remove some data that will make the update fail edge_utils.remove_irrelevant_keys_from_edge_request(edge) # set the new name in the request edge['name'] = name # update the edge self.vcns.update_edge(edge_id, edge) except exceptions.VcnsApiException as e: LOG.error("Failed to rename edge: %s", e.response) def resize_edge(self, edge_id, size): """update the size of a router edge.""" try: # First get the current edge structure # [0] is the status, [1] is the body edge = self.vcns.get_edge(edge_id)[1] if edge.get('appliances'): if edge['appliances']['applianceSize'] == size: LOG.debug('Edge %s is already with size %s', edge_id, size) return ver = self.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.3'): # remove some data that will make the update fail edge_utils.remove_irrelevant_keys_from_edge_request(edge) # set the new size in the request edge['appliances']['applianceSize'] = size # update the edge self.vcns.update_edge(edge_id, edge) except exceptions.VcnsApiException as e: LOG.error("Failed to resize edge: %s", e.response) def delete_edge(self, context, router_id, edge_id, dist=False): LOG.debug("Deleting edge %s", edge_id) if context is None: context = q_context.get_admin_context() try: LOG.debug("Deleting router binding %s", router_id) nsxv_db.delete_nsxv_router_binding(context.session, router_id) if not dist: LOG.debug("Deleting vnic bindings for edge %s", edge_id) nsxv_db.clean_edge_vnic_binding(context.session, edge_id) except sa_exc.NoResultFound: LOG.warning("Router Binding for %s not found", router_id) if edge_id: try: self.vcns.delete_edge(edge_id) return True except exceptions.ResourceNotFound: return True except exceptions.VcnsApiException as e: LOG.exception("VCNS: Failed to delete %(edge_id)s:\n" "%(response)s", {'edge_id': edge_id, 'response': e.response}) return False except Exception: LOG.exception("VCNS: Failed to delete %s", edge_id) return False def _assemble_nat_rule(self, action, original_address, translated_address, vnic_index=None, enabled=True, protocol='any', original_port='any', translated_port='any'): nat_rule = {} nat_rule['action'] = action if vnic_index is not None: nat_rule['vnic'] = vnic_index nat_rule['originalAddress'] = original_address nat_rule['translatedAddress'] = translated_address nat_rule['enabled'] = enabled nat_rule['protocol'] = protocol nat_rule['originalPort'] = original_port nat_rule['translatedPort'] = translated_port return nat_rule def get_nat_config(self, edge_id): try: return self.vcns.get_nat_config(edge_id)[1] except exceptions.VcnsApiException as e: LOG.exception("VCNS: Failed to get nat config:\n%s", e.response) raise e def update_nat_rules(self, edge_id, snats, dnats, indices=None): LOG.debug("VCNS: update nat rule\n" "SNAT:%(snat)s\n" "DNAT:%(dnat)s\n" "INDICES: %(index)s\n", { 'snat': snats, 'dnat': dnats, 'index': indices}) nat_rules = [] for dnat in dnats: vnic_index = None if 'vnic_index' in dnat: vnic_index = dnat['vnic_index'] if vnic_index or not indices: # we are adding a predefined index or # adding to all interfaces nat_rules.append(self._assemble_nat_rule( 'dnat', dnat['dst'], dnat['translated'], vnic_index=vnic_index )) nat_rules.append(self._assemble_nat_rule( 'snat', dnat['translated'], dnat['dst'], vnic_index=vnic_index )) else: for index in indices: nat_rules.append(self._assemble_nat_rule( 'dnat', dnat['dst'], dnat['translated'], vnic_index=index )) nat_rules.append(self._assemble_nat_rule( 'snat', dnat['translated'], dnat['dst'], vnic_index=index )) for snat in snats: vnic_index = None if 'vnic_index' in snat: vnic_index = snat['vnic_index'] if vnic_index or not indices: # we are adding a predefined index # or adding to all interfaces nat_rules.append(self._assemble_nat_rule( 'snat', snat['src'], snat['translated'], vnic_index=vnic_index )) else: for index in indices: nat_rules.append(self._assemble_nat_rule( 'snat', snat['src'], snat['translated'], vnic_index=index )) nat = { 'featureType': 'nat', 'rules': { 'natRulesDtos': nat_rules } } try: self.vcns.update_nat_config(edge_id, nat) return True except exceptions.VcnsApiException as e: LOG.exception("VCNS: Failed to create snat rule:\n%s", e.response) return False def update_routes(self, edge_id, gateway, routes): if gateway: gateway = gateway.split('/')[0] static_routes = [] for route in routes: if route.get('vnic_index') is None: static_routes.append({ "description": "", "vnic": constants.INTERNAL_VNIC_INDEX, "network": route['cidr'], "nextHop": route['nexthop'] }) else: static_routes.append({ "description": "", "vnic": route['vnic_index'], "network": route['cidr'], "nextHop": route['nexthop'] }) request = { "staticRoutes": { "staticRoutes": static_routes } } if gateway: request["defaultRoute"] = { "description": "default-gateway", "gatewayAddress": gateway } try: self.vcns.update_routes(edge_id, request) return True except exceptions.VcnsApiException as e: LOG.exception("VCNS: Failed to update routes:\n%s", e.response) return False def create_lswitch(self, name, tz_config, tags=None, port_isolation=False, replication_mode="service"): lsconfig = { 'display_name': utils.check_and_truncate(name), "tags": tags or [], "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalSwitchConfig", "transport_zones": tz_config } if port_isolation is bool: lsconfig["port_isolation_enabled"] = port_isolation if replication_mode: lsconfig["replication_mode"] = replication_mode response = self.vcns.create_lswitch(lsconfig)[1] return response def delete_lswitch(self, lswitch_id): self.vcns.delete_lswitch(lswitch_id) def get_loadbalancer_config(self, edge_id): try: header, response = self.vcns.get_loadbalancer_config( edge_id) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to get service config") return response def enable_service_loadbalancer(self, edge_id): config = self.get_loadbalancer_config( edge_id) if not config['enabled']: config['enabled'] = True try: self.vcns.enable_service_loadbalancer(edge_id, config) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to enable loadbalancer " "service config") def _delete_port_group(self, task): try: self.vcns.delete_port_group( task.userdata['dvs_id'], task.userdata['port_group_id']) except Exception as e: LOG.error('Unable to delete %(pg)s exception %(ex)s', {'pg': task.userdata['port_group_id'], 'ex': e}) return task_constants.TaskStatus.ERROR return task_constants.TaskStatus.COMPLETED def _retry_task(self, task): delay = 0.5 max_retries = max(cfg.CONF.nsxv.retries, 1) args = task.userdata.get('args', []) kwargs = task.userdata.get('kwargs', {}) retry_number = task.userdata['retry_number'] retry_command = task.userdata['retry_command'] try: retry_command(*args, **kwargs) except Exception as exc: LOG.debug("Task %(name)s retry %(retry)s failed %(exc)s", {'name': task.name, 'exc': exc, 'retry': retry_number}) retry_number += 1 if retry_number > max_retries: with excutils.save_and_reraise_exception(): LOG.exception("Failed to %s", task.name) else: task.userdata['retry_number'] = retry_number # Sleep twice as long as the previous retry tts = (2 ** (retry_number - 1)) * delay time.sleep(min(tts, 60)) return task_constants.TaskStatus.PENDING LOG.info("Task %(name)s completed.", {'name': task.name}) return task_constants.TaskStatus.COMPLETED def delete_port_group(self, dvs_id, port_group_id): task_name = 'delete-port-group-%s-%s' % (port_group_id, dvs_id) userdata = {'retry_number': 1, 'retry_command': self.vcns.delete_port_group, 'args': [dvs_id, port_group_id]} task = tasks.Task(task_name, port_group_id, self._retry_task, status_callback=self._retry_task, userdata=userdata) self.task_manager.add(task) def delete_virtual_wire(self, vw_id): task_name = 'delete-virtualwire-%s' % vw_id userdata = {'retry_number': 1, 'retry_command': self.vcns.delete_virtual_wire, 'args': [vw_id]} task = tasks.Task(task_name, vw_id, self._retry_task, status_callback=self._retry_task, userdata=userdata) self.task_manager.add(task) def create_bridge(self, device_name, bridge): try: self.vcns.create_bridge(device_name, bridge) except exceptions.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to create bridge in the %s", device_name) def delete_bridge(self, device_name): try: self.vcns.delete_bridge(device_name) except exceptions.VcnsApiException: LOG.exception("Failed to delete bridge in the %s", device_name) def update_edge_ha(self, edge_id): ha_request = { 'featureType': "highavailability_4.0", 'enabled': True} self.vcns.enable_ha(edge_id, ha_request) def update_edge_syslog(self, edge_id, syslog_config, router_id): if 'server_ip' not in syslog_config: LOG.warning("Server IP missing in syslog config for %s", router_id) return protocol = syslog_config.get('protocol', 'tcp') if protocol not in ['tcp', 'udp']: LOG.warning("Invalid protocol in syslog config for %s", router_id) return loglevel = syslog_config.get('log_level') if loglevel and loglevel not in edge_utils.SUPPORTED_EDGE_LOG_LEVELS: LOG.warning("Invalid loglevel in syslog config for %s", router_id) return server_ip = syslog_config['server_ip'] request = {'featureType': 'syslog', 'protocol': protocol, 'serverAddresses': {'ipAddress': [server_ip], 'type': 'IpAddressesDto'}} # edge allows up to 2 syslog servers if 'server2_ip' in syslog_config: request['serverAddresses']['ipAddress'].append( syslog_config['server2_ip']) self.vcns.update_edge_syslog(edge_id, request) # update log level for routing in separate API call if loglevel: edge_utils.update_edge_loglevel(self.vcns, edge_id, 'routing', loglevel) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/edge_dynamic_routing_driver.py0000644000175000017500000002151400000000000031753 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import locking LOG = logging.getLogger(__name__) class EdgeDynamicRoutingDriver(object): """Edge driver API to implement the dynamic routing""" def __init__(self): # it will be initialized at subclass self.vcns = None self.ecmp_wait_time = cfg.CONF.nsxv.ecmp_wait_time def _prepare_bgp_config(self, bgp_config): bgp_config.setdefault('enabled', False) bgp_config.setdefault('bgpNeighbours', {'bgpNeighbours': []}) bgp_config.setdefault('redistribution', {'rules': {'rules': []}}) curr_neighbours = [{'bgpNeighbour': nbr} for nbr in bgp_config['bgpNeighbours']['bgpNeighbours']] bgp_config['bgpNeighbours'] = curr_neighbours for nbr in curr_neighbours: bgp_filters = [{'bgpFilter': bf} for bf in nbr['bgpNeighbour']['bgpFilters']['bgpFilters']] nbr['bgpNeighbour']['bgpFilters'] = bgp_filters redistribution_rules = [{'rule': rule} for rule in bgp_config['redistribution']['rules']['rules']] bgp_config['redistribution']['rules'] = redistribution_rules def _get_routing_config(self, edge_id): h, config = self.vcns.get_edge_routing_config(edge_id) # Backend complains when adding this in the request. config.pop('featureType') config.pop('ospf') global_config = config['routingGlobalConfig'] bgp_config = config.get('bgp', {}) self._prepare_bgp_config(bgp_config) global_config.setdefault('ipPrefixes', {'ipPrefixes': []}) curr_prefixes = [{'ipPrefix': prx} for prx in global_config['ipPrefixes']['ipPrefixes']] global_config['ipPrefixes'] = curr_prefixes # Don't change any static routes. static_routing = config.get('staticRouting', {}) static_routes = static_routing.get('staticRoutes', {}) current_routes = [{'route': route} for route in static_routes.get('staticRoutes', [])] static_routing['staticRoutes'] = current_routes return {'routing': config} def _update_routing_config(self, edge_id, **kwargs): routing_config = self._get_routing_config(edge_id) global_config = routing_config['routing']['routingGlobalConfig'] current_prefixes = global_config['ipPrefixes'] global_config['ecmp'] = True if 'router_id' in kwargs: global_config['routerId'] = kwargs['router_id'] current_prefixes[:] = [p for p in current_prefixes if p['ipPrefix']['name'] not in kwargs.get('prefixes_to_remove', [])] # Avoid adding duplicate rules when shared router relocation current_prefixes.extend([p for p in kwargs.get('prefixes_to_add', []) if p not in current_prefixes]) self.vcns.update_edge_routing_config(edge_id, routing_config) def _reset_routing_global_config(self, edge_id): routing_config = self._get_routing_config(edge_id) global_config = routing_config['routing']['routingGlobalConfig'] global_config['ecmp'] = False global_config.pop('routerId') global_config.pop('ipPrefixes') self.vcns.update_edge_routing_config(edge_id, routing_config) def get_routing_bgp_config(self, edge_id): h, config = self.vcns.get_bgp_routing_config(edge_id) bgp_config = config if config else {} self._prepare_bgp_config(bgp_config) return {'bgp': bgp_config} def _update_bgp_routing_config(self, edge_id, **kwargs): bgp_config = self.get_routing_bgp_config(edge_id) curr_neighbours = bgp_config['bgp']['bgpNeighbours'] curr_rules = bgp_config['bgp']['redistribution']['rules'] bgp_config['bgp']['enabled'] = True if 'default_originate' in kwargs: bgp_config['bgp']['defaultOriginate'] = kwargs['default_originate'] if 'local_as' in kwargs: bgp_config['bgp']['localAS'] = kwargs['local_as'] if 'enabled' in kwargs: bgp_config['bgp']['redistribution']['enabled'] = kwargs['enabled'] curr_rules[:] = [rule for rule in curr_rules if rule['rule'].get('prefixName') not in kwargs.get('rules_to_remove', [])] # Avoid adding duplicate rules when shared router relocation curr_rules_prefixes = [r['rule'].get('prefixName') for r in curr_rules] curr_rules.extend([r for r in kwargs.get('rules_to_add', []) if r['rule'].get('prefixName') not in curr_rules_prefixes]) neighbours_to_remove = [nbr['bgpNeighbour']['ipAddress'] for nbr in kwargs.get('neighbours_to_remove', [])] curr_neighbours[:] = [nbr for nbr in curr_neighbours if nbr['bgpNeighbour']['ipAddress'] not in neighbours_to_remove] curr_neighbours.extend(kwargs.get('neighbours_to_add', [])) self.vcns.update_bgp_dynamic_routing(edge_id, bgp_config) def add_bgp_speaker_config(self, edge_id, prot_router_id, local_as, enabled, bgp_neighbours, prefixes, redistribution_rules, default_originate=False): with locking.LockManager.get_lock(str(edge_id)): self._update_routing_config(edge_id, router_id=prot_router_id, prefixes_to_add=prefixes) if self.ecmp_wait_time > 0: time.sleep(self.ecmp_wait_time) self._update_bgp_routing_config( edge_id, enabled=enabled, local_as=local_as, neighbours_to_add=bgp_neighbours, prefixes_to_add=prefixes, rules_to_add=redistribution_rules, default_originate=default_originate) def delete_bgp_speaker_config(self, edge_id): with locking.LockManager.get_lock(str(edge_id)): self.vcns.delete_bgp_routing_config(edge_id) self._reset_routing_global_config(edge_id) def add_bgp_neighbours(self, edge_id, bgp_neighbours): # Query the bgp config first and update the bgpNeighbour with locking.LockManager.get_lock(str(edge_id)): self._update_bgp_routing_config(edge_id, neighbours_to_add=bgp_neighbours) def remove_bgp_neighbours(self, edge_id, bgp_neighbours): with locking.LockManager.get_lock(str(edge_id)): self._update_bgp_routing_config( edge_id, neighbours_to_remove=bgp_neighbours) def update_bgp_neighbours(self, edge_id, neighbours_to_add=None, neighbours_to_remove=None): with locking.LockManager.get_lock(str(edge_id)): self._update_bgp_routing_config( edge_id, neighbours_to_add=neighbours_to_add, neighbours_to_remove=neighbours_to_remove) def update_routing_redistribution(self, edge_id, enabled): with locking.LockManager.get_lock(str(edge_id)): self._update_bgp_routing_config(edge_id, enabled=enabled) def add_bgp_redistribution_rules(self, edge_id, prefixes, rules): with locking.LockManager.get_lock(str(edge_id)): self._update_routing_config(edge_id, prefixes_to_add=prefixes) self._update_bgp_routing_config(edge_id, rules_to_add=rules) LOG.debug("Added redistribution rules %s on edge %s", rules, edge_id) def remove_bgp_redistribution_rules(self, edge_id, prefixes): with locking.LockManager.get_lock(str(edge_id)): self._update_bgp_routing_config(edge_id, rules_to_remove=prefixes) self._update_routing_config(edge_id, prefixes_to_remove=prefixes) LOG.debug("Removed redistribution rules for prefixes %s on edge %s", prefixes, edge_id) def update_router_id(self, edge_id, router_id): with locking.LockManager.get_lock(str(edge_id)): self._update_routing_config(edge_id, router_id=router_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/edge_firewall_driver.py0000644000175000017500000004365000000000000030372 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import ( exceptions as vcns_exc) LOG = logging.getLogger(__name__) VSE_FWAAS_ALLOW = "accept" VSE_FWAAS_DENY = "deny" VSE_FWAAS_REJECT = "reject" FWAAS_ALLOW = "allow" FWAAS_DENY = "deny" FWAAS_REJECT = "reject" FWAAS_ALLOW_EXT_RULE_NAME = 'Allow To External' class EdgeFirewallDriver(object): """Implementation of driver APIs for Edge Firewall feature configuration """ def __init__(self): super(EdgeFirewallDriver, self).__init__() self._icmp_echo_application_ids = None def _convert_firewall_action(self, action): if action == FWAAS_ALLOW: return VSE_FWAAS_ALLOW elif action == FWAAS_DENY: return VSE_FWAAS_DENY elif action == FWAAS_REJECT: return VSE_FWAAS_REJECT else: msg = _("Invalid action value %s in a firewall rule") % action raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) def _restore_firewall_action(self, action): if action == VSE_FWAAS_ALLOW: return FWAAS_ALLOW elif action == VSE_FWAAS_DENY: return FWAAS_DENY elif action == VSE_FWAAS_REJECT: return FWAAS_REJECT else: msg = (_("Invalid action value %s in " "a vshield firewall rule") % action) raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) def _get_port_range(self, min_port, max_port): if not min_port or min_port == 'any': return None if min_port == max_port: return str(min_port) else: return '%d:%d' % (min_port, max_port) def _get_ports_list_from_string(self, port_str): """Receives a string representation of the service ports, and return a list of integers Supported formats: Empty string - no ports "number" - a single port "num1:num2" - a range "num1,num2,num3" - a list """ if not port_str or port_str == 'any': return [] if ':' in port_str: min_port, sep, max_port = port_str.partition(":") return ["%s-%s" % (int(min_port.strip()), int(max_port.strip()))] if ',' in port_str: # remove duplications (using set) and empty/non numeric entries ports_set = set() for orig_port in port_str.split(','): port = orig_port.strip() if port and port.isdigit(): ports_set.add(int(port)) return sorted(list(ports_set)) else: return [int(port_str.strip())] def _convert_firewall_rule(self, rule, index=None): vcns_rule = { "action": self._convert_firewall_action(rule['action']), "enabled": rule.get('enabled', True)} if rule.get('name'): vcns_rule['name'] = rule['name'] if rule.get('description'): vcns_rule['description'] = rule['description'] if rule.get('source_ip_address'): vcns_rule['source'] = { "ipAddress": rule['source_ip_address'] } if rule.get('source_vnic_groups'): vcns_rule['source'] = { "vnicGroupId": rule['source_vnic_groups'] } if rule.get('destination_ip_address'): vcns_rule['destination'] = { "ipAddress": rule['destination_ip_address'] } if rule.get('destination_vnic_groups'): vcns_rule['destination'] = { "vnicGroupId": rule['destination_vnic_groups'] } if rule.get('application'): vcns_rule['application'] = rule['application'] service = {} if rule.get('source_port'): service['sourcePort'] = self._get_ports_list_from_string( rule['source_port']) if rule.get('destination_port'): service['port'] = self._get_ports_list_from_string( rule['destination_port']) if rule.get('protocol'): service['protocol'] = rule['protocol'] if rule['protocol'] == 'icmp': if rule.get('icmp_type'): service['icmpType'] = rule['icmp_type'] else: service['icmpType'] = 'any' if rule.get('ruleId'): vcns_rule['ruleId'] = rule.get('ruleId') if service: vcns_rule['application'] = { 'service': [service] } if rule.get('logged'): vcns_rule['loggingEnabled'] = rule['logged'] if index: vcns_rule['ruleTag'] = index return vcns_rule def _restore_firewall_rule(self, context, edge_id, rule): fw_rule = {} rule_binding = nsxv_db.get_nsxv_edge_firewallrule_binding_by_vseid( context.session, edge_id, rule['ruleId']) if rule_binding: fw_rule['id'] = rule_binding['rule_id'] fw_rule['ruleId'] = rule['ruleId'] if rule.get('source'): src = rule['source'] fw_rule['source_ip_address'] = src['ipAddress'] fw_rule['source_vnic_groups'] = src['vnicGroupId'] if rule.get('destination'): dest = rule['destination'] fw_rule['destination_ip_address'] = dest['ipAddress'] fw_rule['destination_vnic_groups'] = dest['vnicGroupId'] if 'application' in rule and 'service' in rule['application']: service = rule['application']['service'][0] fw_rule['protocol'] = service['protocol'] if service.get('sourcePort'): fw_rule['source_port'] = self._get_port_range( service['sourcePort'][0], service['sourcePort'][-1]) if service.get('destination_port'): fw_rule['destination_port'] = self._get_port_range( service['port'][0], service['port'][-1]) fw_rule['action'] = self._restore_firewall_action(rule['action']) fw_rule['enabled'] = rule['enabled'] if rule.get('name'): fw_rule['name'] = rule['name'] if rule.get('description'): fw_rule['description'] = rule['description'] if rule.get('loggingEnabled'): fw_rule['logged'] = rule['loggingEnabled'] return fw_rule def _convert_firewall(self, firewall, allow_external=False): ruleTag = 1 vcns_rules = [] for rule in firewall['firewall_rule_list']: tag = rule.get('ruleTag', ruleTag) vcns_rule = self._convert_firewall_rule(rule, tag) vcns_rules.append(vcns_rule) if not rule.get('ruleTag'): ruleTag += 1 if allow_external: # Add the allow-external rule with the latest tag vcns_rules.append({'name': FWAAS_ALLOW_EXT_RULE_NAME, 'action': "accept", 'enabled': True, 'destination': {'vnicGroupId': ["external"]}, 'ruleTag': ruleTag}) return { 'featureType': "firewall_4.0", 'globalConfig': {'tcpTimeoutEstablished': 7200}, 'firewallRules': { 'firewallRules': vcns_rules}} def _restore_firewall(self, context, edge_id, response): res = {} res['firewall_rule_list'] = [] for rule in response['firewallRules']['firewallRules']: if rule.get('ruleType') == 'default_policy': continue firewall_rule = self._restore_firewall_rule(context, edge_id, rule) res['firewall_rule_list'].append({'firewall_rule': firewall_rule}) return res def _get_firewall(self, edge_id): try: return self.vcns.get_firewall(edge_id)[1] except vcns_exc.VcnsApiException as e: LOG.exception("Failed to get firewall with edge " "id: %s", edge_id) raise e def _get_firewall_rule_next(self, context, edge_id, rule_vseid): # Return the firewall rule below 'rule_vseid' fw_cfg = self._get_firewall(edge_id) for i in range(len(fw_cfg['firewallRules']['firewallRules'])): rule_cur = fw_cfg['firewallRules']['firewallRules'][i] if str(rule_cur['ruleId']) == rule_vseid: if (i + 1) == len(fw_cfg['firewallRules']['firewallRules']): return None else: return fw_cfg['firewallRules']['firewallRules'][i + 1] def get_firewall_rule(self, context, id, edge_id): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, id, edge_id) if rule_map is None: msg = _("No rule id:%s found in the edge_firewall_binding") % id LOG.error(msg) raise vcns_exc.VcnsNotFound( resource='vcns_firewall_rule_bindings', msg=msg) vcns_rule_id = rule_map.rule_vseid try: response = self.vcns.get_firewall_rule( edge_id, vcns_rule_id)[1] except vcns_exc.VcnsApiException as e: LOG.exception("Failed to get firewall rule: %(rule_id)s " "with edge_id: %(edge_id)s", { 'rule_id': id, 'edge_id': edge_id}) raise e return self._restore_firewall_rule(context, edge_id, response) def get_firewall(self, context, edge_id): response = self._get_firewall(edge_id) return self._restore_firewall(context, edge_id, response) def delete_firewall(self, context, edge_id): try: self.vcns.delete_firewall(edge_id) except vcns_exc.VcnsApiException as e: LOG.exception("Failed to delete firewall " "with edge_id:%s", edge_id) raise e nsxv_db.cleanup_nsxv_edge_firewallrule_binding( context.session, edge_id) def update_firewall_rule(self, context, id, edge_id, firewall_rule): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, id, edge_id) vcns_rule_id = rule_map.rule_vseid fwr_req = self._convert_firewall_rule(firewall_rule) try: self.vcns.update_firewall_rule(edge_id, vcns_rule_id, fwr_req) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update firewall rule: " "%(rule_id)s " "with edge_id: %(edge_id)s", {'rule_id': id, 'edge_id': edge_id}) def delete_firewall_rule(self, context, id, edge_id): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, id, edge_id) vcns_rule_id = rule_map.rule_vseid try: self.vcns.delete_firewall_rule(edge_id, vcns_rule_id) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to delete firewall rule: " "%(rule_id)s " "with edge_id: %(edge_id)s", {'rule_id': id, 'edge_id': edge_id}) nsxv_db.delete_nsxv_edge_firewallrule_binding( context.session, id) def _add_rule_above(self, context, ref_rule_id, edge_id, firewall_rule): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, ref_rule_id, edge_id) ref_vcns_rule_id = rule_map.rule_vseid fwr_req = self._convert_firewall_rule(firewall_rule) try: header = self.vcns.add_firewall_rule_above( edge_id, ref_vcns_rule_id, fwr_req)[0] except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to add firewall rule above: " "%(rule_id)s with edge_id: %(edge_id)s", {'rule_id': ref_vcns_rule_id, 'edge_id': edge_id}) objuri = header['location'] fwr_vseid = objuri[objuri.rfind("/") + 1:] map_info = { 'rule_id': firewall_rule['id'], 'rule_vseid': fwr_vseid, 'edge_id': edge_id} nsxv_db.add_nsxv_edge_firewallrule_binding( context.session, map_info) def _add_rule_below(self, context, ref_rule_id, edge_id, firewall_rule): rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding( context.session, ref_rule_id, edge_id) ref_vcns_rule_id = rule_map.rule_vseid fwr_vse_next = self._get_firewall_rule_next( context, edge_id, ref_vcns_rule_id) fwr_req = self._convert_firewall_rule(firewall_rule) if fwr_vse_next: ref_vcns_rule_id = fwr_vse_next['ruleId'] try: header = self.vcns.add_firewall_rule_above( edge_id, int(ref_vcns_rule_id), fwr_req)[0] except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to add firewall rule above: " "%(rule_id)s with edge_id: %(edge_id)s", {'rule_id': ref_vcns_rule_id, 'edge_id': edge_id}) else: # append the rule at the bottom try: header = self.vcns.add_firewall_rule( edge_id, fwr_req)[0] except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to append a firewall rule" "with edge_id: %s", edge_id) objuri = header['location'] fwr_vseid = objuri[objuri.rfind("/") + 1:] map_info = { 'rule_id': firewall_rule['id'], 'rule_vseid': fwr_vseid, 'edge_id': edge_id } nsxv_db.add_nsxv_edge_firewallrule_binding( context.session, map_info) def insert_rule(self, context, rule_info, edge_id, fwr): if rule_info.get('insert_before'): self._add_rule_above( context, rule_info['insert_before'], edge_id, fwr) elif rule_info.get('insert_after'): self._add_rule_below( context, rule_info['insert_after'], edge_id, fwr) else: msg = _("Can't execute insert rule operation " "without reference rule_id") raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg) def update_firewall(self, edge_id, firewall, context, allow_external=True): config = self._convert_firewall(firewall, allow_external=allow_external) try: self.vcns.update_firewall(edge_id, config) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update firewall " "with edge_id: %s", edge_id) vcns_fw_config = self._get_firewall(edge_id) nsxv_db.cleanup_nsxv_edge_firewallrule_binding( context.session, edge_id) self._create_rule_id_mapping( context, edge_id, firewall, vcns_fw_config) def _create_rule_id_mapping( self, context, edge_id, firewall, vcns_fw): for rule in vcns_fw['firewallRules']['firewallRules']: if rule.get('ruleTag'): index = rule['ruleTag'] - 1 # TODO(linb):a simple filter of the retrieved rules which may # be created by other operations unintentionally if index < len(firewall['firewall_rule_list']): rule_vseid = rule['ruleId'] rule_id = firewall['firewall_rule_list'][index].get('id') if rule_id: map_info = { 'rule_id': rule_id, 'rule_vseid': rule_vseid, 'edge_id': edge_id } nsxv_db.add_nsxv_edge_firewallrule_binding( context.session, map_info) def get_icmp_echo_application_ids(self): # check cached list first # (if backend version changes, neutron should be restarted) if self._icmp_echo_application_ids: return self._icmp_echo_application_ids self._icmp_echo_application_ids = self.get_application_ids( ['ICMP Echo', 'IPv6-ICMP Echo']) if not self._icmp_echo_application_ids: raise nsx_exc.NsxResourceNotFound( res_name='ICMP Echo', res_id='') return self._icmp_echo_application_ids def get_application_ids(self, application_names): results = self.vcns.list_applications() application_ids = [] for result in results: for name in application_names: if result['name'] == name: application_ids.append(result['objectId']) return application_ids ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/edge_ipsecvpn_driver.py0000644000175000017500000001467500000000000030421 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.plugins.nsx_v.vshield.common import ( exceptions as vcns_exc) LOG = logging.getLogger(__name__) ENCRYPTION_ALGORITHM_MAP = { '3des': '3des', 'aes-128': 'aes', 'aes-256': 'aes256' } PFS_MAP = { 'group2': 'dh2', 'group5': 'dh5'} TRANSFORM_PROTOCOL_ALLOWED = ('esp',) ENCAPSULATION_MODE_ALLOWED = ('tunnel',) class EdgeIPsecVpnDriver(object): """Driver APIs for Edge IPsec VPN bulk configuration.""" def _check_ikepolicy_ipsecpolicy_allowed(self, ikepolicy, ipsecpolicy): """Check whether ikepolicy and ipsecpolicy are allowed on vshield edge. Some IPsec VPN configurations and features are configured by default or not supported on vshield edge. """ # Check validation of IKEPolicy. if ikepolicy['ike_version'] != 'v1': msg = _("Unsupported ike_version: %s! Only 'v1' ike version is " "supported on vshield Edge!" ) % ikepolicy['ike_version'] LOG.warning(msg) raise vcns_exc.VcnsBadRequest(resource='ikepolicy', msg=msg) # In VSE, Phase 1 and Phase 2 share the same encryption_algorithm # and authentication algorithms setting. At present, just record the # discrepancy error in log and take ipsecpolicy to do configuration. if (ikepolicy['auth_algorithm'] != ipsecpolicy['auth_algorithm'] or ikepolicy['encryption_algorithm'] != ipsecpolicy[ 'encryption_algorithm'] or ikepolicy['pfs'] != ipsecpolicy['pfs']): LOG.warning( "IKEPolicy and IPsecPolicy should have consistent " "auth_algorithm, encryption_algorithm and pfs for VSE!") # Check whether encryption_algorithm is allowed. encryption_algorithm = ENCRYPTION_ALGORITHM_MAP.get( ipsecpolicy.get('encryption_algorithm'), None) if not encryption_algorithm: msg = _("Unsupported encryption_algorithm: %s! '3des', " "'aes-128' and 'aes-256' are supported on VSE right now." ) % ipsecpolicy['encryption_algorithm'] LOG.warning(msg) raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg) # Check whether pfs is allowed. if not PFS_MAP.get(ipsecpolicy['pfs']): msg = _("Unsupported pfs: %s! 'group2' and 'group5' " "are supported on VSE right now.") % ipsecpolicy['pfs'] LOG.warning(msg) raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg) # Check whether transform protocol is allowed. if ipsecpolicy['transform_protocol'] not in TRANSFORM_PROTOCOL_ALLOWED: msg = _("Unsupported transform protocol: %s! 'esp' is supported " "by default on VSE right now." ) % ipsecpolicy['transform_protocol'] LOG.warning(msg) raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg) # Check whether encapsulation mode is allowed. if ipsecpolicy['encapsulation_mode'] not in ENCAPSULATION_MODE_ALLOWED: msg = _("Unsupported encapsulation mode: %s! 'tunnel' is " "supported by default on VSE right now." ) % ipsecpolicy['encapsulation_mode'] LOG.warning(msg) raise vcns_exc.VcnsBadRequest(resource='ipsecpolicy', msg=msg) def _convert_ipsec_site(self, site, enablePfs=True): self._check_ikepolicy_ipsecpolicy_allowed( site['ikepolicy'], site['ipsecpolicy']) return { 'enabled': site['site'].get('admin_state_up'), 'enablePfs': enablePfs, 'dhGroup': PFS_MAP.get(site['ipsecpolicy']['pfs']), 'name': site['site'].get('name'), 'description': site['site'].get('description'), 'localId': site['external_ip'], 'localIp': site['external_ip'], 'peerId': site['site'].get('peer_id'), 'peerIp': site['site'].get('peer_address'), 'localSubnets': { 'subnets': [site['subnet'].get('cidr')]}, 'peerSubnets': { 'subnets': site['site'].get('peer_cidrs')}, 'authenticationMode': site['site'].get('auth_mode'), 'psk': site['site'].get('psk'), 'encryptionAlgorithm': ENCRYPTION_ALGORITHM_MAP.get( site['ipsecpolicy'].get('encryption_algorithm'))} def update_ipsec_config(self, edge_id, sites, enabled=True): ipsec_config = {'featureType': "ipsec_4.0", 'enabled': enabled} vse_sites = [self._convert_ipsec_site(site) for site in sites] ipsec_config['sites'] = {'sites': vse_sites} try: self.vcns.update_ipsec_config(edge_id, ipsec_config) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update ipsec vpn " "configuration with edge_id: %s", edge_id) def delete_ipsec_config(self, edge_id): try: self.vcns.delete_ipsec_config(edge_id) except vcns_exc.ResourceNotFound: LOG.warning("IPsec config not found on edge: %s", edge_id) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception("Failed to delete ipsec vpn configuration " "with edge_id: %s", edge_id) def get_ipsec_config(self, edge_id): return self.vcns.get_ipsec_config(edge_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/edge_utils.py0000644000175000017500000036114600000000000026355 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from distutils import version import os import random import time import eventlet import netaddr from neutron_lib.api.definitions import extra_dhcp_opt as ext_edo from neutron_lib.api import validators from neutron_lib import constants from neutron_lib import context as q_context from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import l3 as l3_exc from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import excutils from oslo_utils import timeutils from oslo_utils import uuidutils import six from six import moves from sqlalchemy import exc as db_base_exc from sqlalchemy.orm import exc as sa_exc from vmware_nsx._i18n import _ from vmware_nsx.common import config as conf from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsxv_db from vmware_nsx.dvs import dvs from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxapi_exc from vmware_nsx.plugins.nsx_v.vshield import vcns WORKER_POOL_SIZE = 8 RP_FILTER_PROPERTY_OFF_TEMPLATE = 'sysctl.net.ipv4.conf.%s.rp_filter=%s' MAX_EDGE_PENDING_SEC = 600 LOG = logging.getLogger(__name__) _uuid = uuidutils.generate_uuid SUPPORTED_EDGE_LOG_MODULES = ('routing', 'highavailability', 'dhcp', 'loadbalancer', 'dns') SUPPORTED_EDGE_LOG_LEVELS = ('none', 'debug', 'info', 'warning', 'error') def _get_vdr_transit_network_ipobj(): transit_net = cfg.CONF.nsxv.vdr_transit_network return netaddr.IPNetwork(transit_net) def get_vdr_transit_network_netmask(): ip = _get_vdr_transit_network_ipobj() return str(ip.netmask) def get_vdr_transit_network_tlr_address(): ip = _get_vdr_transit_network_ipobj() return str(ip[1]) def get_vdr_transit_network_plr_address(): ip = _get_vdr_transit_network_ipobj() # We need to ensure backwards compatibility. The original edge address # was "169.254.2.3" if conf.DEFAULT_VDR_TRANSIT_NETWORK == cfg.CONF.nsxv.vdr_transit_network: return conf.DEFAULT_PLR_ADDRESS else: return str(ip[2]) def validate_vdr_transit_network(): try: ip = _get_vdr_transit_network_ipobj() except Exception: raise n_exc.Invalid(_("Invalid VDR transit network")) if len(ip) < 4: raise n_exc.Invalid(_("VDR transit address range too small")) if is_overlapping_reserved_subnets(cfg.CONF.nsxv.vdr_transit_network, nsxv_constants.RESERVED_IPS): raise n_exc.Invalid(_("VDR transit network overlaps reserved subnet")) def is_overlapping_reserved_subnets(cidr, reserved_subnets): """Return True if the subnet overlaps with reserved subnets. For the V plugin we have a limitation that we should not use some reserved ranges like: 169.254.128.0/17 and 169.254.1.0/24 """ range = netaddr.IPNetwork(cidr) # Check each reserved subnet for intersection for reserved_subnet in reserved_subnets: # translate the reserved subnet to a range object reserved_range = netaddr.IPNetwork(reserved_subnet) # check if new subnet overlaps this reserved subnet if (range.first <= reserved_range.last and reserved_range.first <= range.last): return True return False def parse_backup_edge_pool_opt_per_az(az): """Parse edge pool opts per AZ and returns result.""" edge_pool_opts = az.backup_edge_pool res = [] for edge_pool_def in edge_pool_opts: split = edge_pool_def.split(':') try: (edge_type, edge_size, minimum_pooled_edges, maximum_pooled_edges) = split[:4] except ValueError: raise n_exc.Invalid(_("Invalid edge pool format for availability" " zone %s") % az.name) if edge_type not in vcns_const.ALLOWED_EDGE_TYPES: msg = (_("edge type '%(edge_type)s' is not allowed, " "allowed types: %(allowed)s for availability zone " "%(name)s") % {'edge_type': edge_type, 'allowed': vcns_const.ALLOWED_EDGE_TYPES, 'name': az.name}) LOG.error(msg) raise n_exc.Invalid(msg) edge_size = edge_size or nsxv_constants.COMPACT if edge_size not in vcns_const.ALLOWED_EDGE_SIZES: msg = (_("edge size '%(edge_size)s' is not allowed, " "allowed types: %(allowed)s for availability zone " "%(name)s") % {'edge_type': edge_size, 'allowed': vcns_const.ALLOWED_EDGE_SIZES, 'name': az.name}) LOG.error(msg) raise n_exc.Invalid(msg) res.append({'edge_type': edge_type, 'edge_size': edge_size, 'minimum_pooled_edges': int(minimum_pooled_edges), 'maximum_pooled_edges': int(maximum_pooled_edges)}) edge_pool_dicts = {} for edge_type in vcns_const.ALLOWED_EDGE_TYPES: edge_pool_dicts[edge_type] = {} for r in res: edge_pool_dict = edge_pool_dicts[r['edge_type']] if r['edge_size'] in edge_pool_dict.keys(): raise n_exc.Invalid(_("Duplicate edge pool configuration for " "availability zone %s") % az.name) else: edge_pool_dict[r['edge_size']] = { 'minimum_pooled_edges': r['minimum_pooled_edges'], 'maximum_pooled_edges': r['maximum_pooled_edges']} return edge_pool_dicts class EdgeManager(object): """Edge Appliance Management. EdgeManager provides a pool of edge appliances which we can use to support DHCP&metadata, L3&FIP and LB&FW&VPN services. """ def __init__(self, nsxv_manager, plugin): LOG.debug("Start Edge Manager initialization") self._worker_pool_pid = None self._worker_pool = None self.nsxv_manager = nsxv_manager self._availability_zones = nsx_az.NsxVAvailabilityZones() self.edge_pool_dicts = self._parse_backup_edge_pool_opt() self.nsxv_plugin = nsxv_manager.callbacks.plugin self.plugin = plugin self.per_interface_rp_filter = self._get_per_edge_rp_filter_state() self._check_backup_edge_pools() def _parse_backup_edge_pool_opt(self): """Parse edge pool opts for all availability zones.""" az_list = self._availability_zones.list_availability_zones_objects() az_pools = {} for az in az_list: az_pools[az.name] = parse_backup_edge_pool_opt_per_az(az) return az_pools def _get_az_pool(self, az_name): return self.edge_pool_dicts[az_name] def _get_worker_pool(self): if self._worker_pool_pid != os.getpid(): self._worker_pool_pid = os.getpid() self._worker_pool = eventlet.GreenPool(WORKER_POOL_SIZE) return self._worker_pool def _get_per_edge_rp_filter_state(self): ver = self.nsxv_manager.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.0'): return False return True def _mark_router_bindings_status_error(self, context, edge_id, error_reason="backend error"): for binding in nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_id): if binding['status'] == constants.ERROR: continue LOG.error('Mark router binding ERROR for resource ' '%(res_id)s on edge %(edge_id)s due to ' '%(reason)s', {'res_id': binding['router_id'], 'edge_id': edge_id, 'reason': error_reason}) nsxv_db.update_nsxv_router_binding( context.session, binding['router_id'], status=constants.ERROR) def _deploy_edge(self, context, lrouter, lswitch=None, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None, deploy_metadata=False): """Create an edge for logical router support.""" if context is None: context = q_context.get_admin_context() # deploy edge return self.nsxv_manager.deploy_edge(context, lrouter['id'], lrouter['name'], internal_network=None, appliance_size=appliance_size, dist=(edge_type == nsxv_constants.VDR_EDGE), availability_zone=availability_zone, deploy_metadata=deploy_metadata) def _deploy_backup_edges_on_db(self, context, num, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): router_ids = [(vcns_const.BACKUP_ROUTER_PREFIX + _uuid())[:vcns_const.EDGE_NAME_LEN] for i in moves.range(num)] for router_id in router_ids: nsxv_db.add_nsxv_router_binding( context.session, router_id, None, None, constants.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone.name) return router_ids def _deploy_backup_edges_at_backend( self, context, router_ids, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): eventlet.spawn_n(self._pool_creator, router_ids, appliance_size, edge_type, availability_zone) def _pool_creator(self, router_ids, appliance_size, edge_type, availability_zone): for router_id in router_ids: fake_router = { 'id': router_id, 'name': router_id} self._get_worker_pool().spawn_n( self._deploy_edge, None, fake_router, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) def _delete_edge(self, context, router_binding): if router_binding['status'] == constants.ERROR: LOG.warning("Start deleting %(router_id)s corresponding " "edge: %(edge_id)s due to status error", {'router_id': router_binding['router_id'], 'edge_id': router_binding['edge_id']}) nsxv_db.update_nsxv_router_binding( context.session, router_binding['router_id'], status=constants.PENDING_DELETE) self._get_worker_pool().spawn_n( self.nsxv_manager.delete_edge, None, router_binding['router_id'], router_binding['edge_id'], dist=(router_binding['edge_type'] == nsxv_constants.VDR_EDGE)) def _delete_backup_edges_on_db(self, context, backup_router_bindings): for binding in backup_router_bindings: try: nsxv_db.update_nsxv_router_binding( context.session, binding['router_id'], status=constants.PENDING_DELETE) except sa_exc.NoResultFound: LOG.debug("Router binding %s does not exist.", binding['router_id']) def _delete_backup_edges_at_backend(self, context, backup_router_bindings): for binding in backup_router_bindings: # delete edge LOG.debug("Start deleting extra edge: %s in pool", binding['edge_id']) self._get_worker_pool().spawn_n( self.nsxv_manager.delete_edge, None, binding['router_id'], binding['edge_id'], dist=(binding['edge_type'] == nsxv_constants.VDR_EDGE)) def _clean_all_error_edge_bindings(self, context, availability_zone): # Find all backup edges in error state & # backup edges which are in pending-XXX state for too long filters = {'status': [constants.PENDING_CREATE, constants.PENDING_UPDATE, constants.PENDING_DELETE], 'availability_zone': [availability_zone.name]} if cfg.CONF.nsxv.housekeeping_readonly: filters['status'].append(constants.ERROR) like_filters = {'router_id': vcns_const.BACKUP_ROUTER_PREFIX + "%"} router_bindings = nsxv_db.get_nsxv_router_bindings( context.session, filters=filters, like_filters=like_filters) # filter only the entries in error state or too long in pending state error_router_bindings = [] for binding in router_bindings: to_delete = False if binding.status == constants.ERROR: to_delete = True elif binding.status == constants.PENDING_CREATE: # Bindings migrated from older versions have no created_at # attribute which should also be deleted. if (not binding.created_at or timeutils.is_older_than( binding.created_at, MAX_EDGE_PENDING_SEC)): to_delete = True elif (binding.status == constants.PENDING_UPDATE or binding.status == constants.PENDING_DELETE): # Bindings migrated from older versions have no updated_at # attribute. We will not delete those for now, as it is risky # and fails lots of tests. if (binding.updated_at and timeutils.is_older_than( binding.updated_at, MAX_EDGE_PENDING_SEC)): to_delete = True if to_delete: LOG.warning("Going to delete Erroneous edge: %s", binding) error_router_bindings.append(binding) self._delete_backup_edges_on_db(context, error_router_bindings) self._delete_backup_edges_at_backend(context, error_router_bindings) def _get_backup_edge_bindings(self, context, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, db_update_lock=False, availability_zone=None): filters = {'appliance_size': [appliance_size], 'edge_type': [edge_type], 'availability_zone': [availability_zone.name], 'status': [constants.PENDING_CREATE, constants.PENDING_UPDATE, constants.ACTIVE]} like_filters = {'router_id': vcns_const.BACKUP_ROUTER_PREFIX + "%"} return nsxv_db.get_nsxv_router_bindings( context.session, filters=filters, like_filters=like_filters) def _check_backup_edge_pools(self): admin_ctx = q_context.get_admin_context() for az in self._availability_zones.list_availability_zones_objects(): self._clean_all_error_edge_bindings(admin_ctx, az) for edge_type, v in self._get_az_pool(az.name).items(): for edge_size in vcns_const.ALLOWED_EDGE_SIZES: if edge_size in v.keys(): edge_pool_range = v[edge_size] self._check_backup_edge_pool( edge_pool_range['minimum_pooled_edges'], edge_pool_range['maximum_pooled_edges'], appliance_size=edge_size, edge_type=edge_type, availability_zone=az) else: self._check_backup_edge_pool( 0, 0, appliance_size=edge_size, edge_type=edge_type, availability_zone=az) def _check_backup_edge_pool(self, minimum_pooled_edges, maximum_pooled_edges, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): """Check edge pool's status and return one available edge for use.""" admin_ctx = q_context.get_admin_context() backup_router_bindings = self._get_backup_edge_bindings( admin_ctx, appliance_size=appliance_size, edge_type=edge_type, db_update_lock=True, availability_zone=availability_zone) backup_num = len(backup_router_bindings) if backup_num > maximum_pooled_edges: self._delete_backup_edges_on_db( admin_ctx, backup_router_bindings[:backup_num - maximum_pooled_edges]) elif backup_num < minimum_pooled_edges: new_backup_num = backup_num router_ids = [] while (new_backup_num < minimum_pooled_edges): router_ids.extend( self._deploy_backup_edges_on_db( admin_ctx, 1, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone)) new_backup_num = len( self._get_backup_edge_bindings( admin_ctx, appliance_size=appliance_size, edge_type=edge_type, db_update_lock=True, availability_zone=availability_zone)) if backup_num > maximum_pooled_edges: self._delete_backup_edges_at_backend( admin_ctx, backup_router_bindings[:backup_num - maximum_pooled_edges]) elif backup_num < minimum_pooled_edges: self._deploy_backup_edges_at_backend( admin_ctx, router_ids, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) def check_edge_active_at_backend(self, edge_id): try: status = self.nsxv_manager.get_edge_status(edge_id) return (status == vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE) except Exception: return False def _get_available_router_binding(self, context, appliance_size=nsxv_constants.COMPACT, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): backup_router_bindings = self._get_backup_edge_bindings( context, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) while backup_router_bindings: router_binding = random.choice(backup_router_bindings) if (router_binding['status'] == constants.ACTIVE): if not self.check_edge_active_at_backend( router_binding['edge_id']): LOG.debug("Delete unavailable backup resource " "%(router_id)s with edge_id %(edge_id)s", {'router_id': router_binding['router_id'], 'edge_id': router_binding['edge_id']}) self._delete_edge(context, router_binding) else: LOG.debug("Get an available backup resource " "%(router_id)s with edge_id %(edge_id)s", {'router_id': router_binding['router_id'], 'edge_id': router_binding['edge_id']}) return router_binding backup_router_bindings.remove(router_binding) def _get_physical_provider_network(self, context, network_id, az_dvs): bindings = nsxv_db.get_network_bindings(context.session, network_id) # Set the return value as the availability zone DVS-ID of the # mgmt/edge cluster phys_net = az_dvs network_type = None if bindings: binding = bindings[0] network_type = binding['binding_type'] if (network_type == c_utils.NsxVNetworkTypes.VLAN and binding['phy_uuid'] != ''): if ',' not in binding['phy_uuid']: phys_net = binding['phy_uuid'] # Return user input physical network value for all network types # except VXLAN networks. The DVS-ID of the mgmt/edge cluster must # be returned for VXLAN network types. # We also validate that this binding starts with 'dvs'. If a admin # creates a provider portgroup then we need to use the default # configured DVS. elif (not network_type == c_utils.NsxVNetworkTypes.VXLAN and binding['phy_uuid'] != '' and binding['phy_uuid'].startswith('dvs')): phys_net = binding['phy_uuid'] return phys_net, network_type def _create_sub_interface(self, context, network_id, network_name, tunnel_index, address_groups, port_group_id=None): az = self.plugin.get_network_az_by_net_id(context, network_id) vcns_network_id = _retrieve_nsx_switch_id(context, network_id, az.name) if port_group_id is None: portgroup = {'vlanId': 0, 'networkName': network_name, 'networkBindingType': 'Static', 'networkType': 'Isolation'} config_spec = {'networkSpec': portgroup} dvs_id, network_type = self._get_physical_provider_network( context, network_id, az.dvs_id) pg, port_group_id = self.nsxv_manager.vcns.create_port_group( dvs_id, config_spec) # Ensure that the portgroup has the correct teaming self.plugin._update_network_teaming(dvs_id, None, port_group_id) interface = { 'name': _uuid(), 'tunnelId': tunnel_index, 'logicalSwitchId': vcns_network_id, 'isConnected': True } interface['addressGroups'] = {'addressGroups': address_groups} return port_group_id, interface def _getvnic_config(self, edge_id, vnic_index): _, vnic_config = self.nsxv_manager.get_interface(edge_id, vnic_index) return vnic_config def _delete_dhcp_internal_interface(self, context, edge_id, vnic_index, tunnel_index, network_id): """Delete the dhcp internal interface.""" LOG.debug("Query the vnic %s for DHCP Edge %s", vnic_index, edge_id) try: vnic_config = self._getvnic_config(edge_id, vnic_index) sub_interfaces = (vnic_config['subInterfaces']['subInterfaces'] if 'subInterfaces' in vnic_config else []) port_group_id = (vnic_config['portgroupId'] if 'portgroupId' in vnic_config else None) for sub_interface in sub_interfaces: if tunnel_index == sub_interface['tunnelId']: LOG.debug("Delete the tunnel %d on vnic %d", tunnel_index, vnic_index) (vnic_config['subInterfaces']['subInterfaces']. remove(sub_interface)) break # Clean the vnic if there is no sub-interface attached if len(sub_interfaces) == 0: header, _ = self.nsxv_manager.vcns.delete_interface(edge_id, vnic_index) if port_group_id: az = self.plugin.get_network_az_by_net_id( context, network_id) dvs_id, net_type = self._get_physical_provider_network( context, network_id, az.dvs_id) self.nsxv_manager.delete_port_group(dvs_id, port_group_id) else: self.nsxv_manager.vcns.update_interface(edge_id, vnic_config) except nsxapi_exc.VcnsApiException: LOG.exception('Failed to delete vnic %(vnic_index)d ' 'tunnel %(tunnel_index)d on edge %(edge_id)s ' 'for network %(net_id)s', {'vnic_index': vnic_index, 'tunnel_index': tunnel_index, 'net_id': network_id, 'edge_id': edge_id}) self._mark_router_bindings_status_error( context, edge_id, error_reason="delete dhcp internal interface failure") self._delete_dhcp_router_binding(context, network_id, edge_id) def _delete_dhcp_router_binding(self, context, network_id, edge_id): """Delete the router binding or clean the edge appliance.""" resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] bindings = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_id) all_edge_dhcp_entries = [binding['router_id'] for binding in bindings if binding['router_id']. startswith(vcns_const.DHCP_EDGE_PREFIX)] for router_id in all_edge_dhcp_entries: if (router_id != resource_id): # There are additional networks on this DHCP edge. # just delete the binding one and not the edge itself nsxv_db.delete_nsxv_router_binding(context.session, resource_id) return az_name = bindings[0]['availability_zone'] if bindings else '' self._free_dhcp_edge_appliance(context, network_id, az_name) def _addr_groups_convert_to_ipset(self, address_groups): cidr_list = [] for addr_group in address_groups: cidr = "/".join([addr_group['primaryAddress'], addr_group['subnetPrefixLength']]) cidr_list.append(cidr) return netaddr.IPSet(cidr_list) def _update_dhcp_internal_interface(self, context, edge_id, vnic_index, tunnel_index, network_id, address_groups): """Update the dhcp internal interface: 1. Add a new vnic tunnel with the address groups 2. Update the address groups to an existing tunnel """ LOG.debug("Query the vnic %s for DHCP Edge %s", vnic_index, edge_id) h, vnic_config = self.nsxv_manager.get_interface(edge_id, vnic_index) sub_iface_dict = vnic_config.get('subInterfaces') port_group_id = vnic_config.get('portgroupId') new_tunnel_creation = True iface_list = [] # Update the sub interface address groups for specific tunnel if sub_iface_dict: sub_interfaces = sub_iface_dict.get('subInterfaces') addr_groups_ipset = self._addr_groups_convert_to_ipset( address_groups) for sb in sub_interfaces: if tunnel_index == sb['tunnelId']: new_tunnel_creation = False sb['addressGroups']['addressGroups'] = address_groups else: sb_ipset = self._addr_groups_convert_to_ipset( sb['addressGroups']['addressGroups']) if addr_groups_ipset & sb_ipset: ls_id = sb['logicalSwitchId'] net_ids = nsx_db.get_net_ids(context.session, ls_id) if net_ids: # Here should never happen, else one bug occurs LOG.error("net %(id)s on edge %(edge_id)s " "overlaps with new net %(net_id)s", {'id': net_ids[0], 'edge_id': edge_id, 'net_id': network_id}) raise nsx_exc.NsxPluginException( err_msg=(_("update dhcp interface for net %s " "failed") % network_id)) else: # Occurs when there are DB inconsistency sb["is_overlapped"] = True LOG.error("unexpected sub intf %(id)s on edge " "%(edge_id)s overlaps with new net " "%(net_id)s. we would update with " "deleting it for DB consistency", {'id': ls_id, 'edge_id': edge_id, 'net_id': network_id}) iface_list = [sub for sub in sub_interfaces if not sub.get('is_overlapped', False)] # The first DHCP service creation, not update if new_tunnel_creation: network_name_item = [edge_id, str(vnic_index), str(tunnel_index)] network_name = ('-'.join(network_name_item) + _uuid())[:36] port_group_id, iface = self._create_sub_interface( context, network_id, network_name, tunnel_index, address_groups, port_group_id) iface_list.append(iface) LOG.debug("Update the vnic %d for DHCP Edge %s", vnic_index, edge_id) self.nsxv_manager.update_interface('fake_router_id', edge_id, vnic_index, port_group_id, tunnel_index, address_groups=iface_list) @vcns.retry_upon_exception(db_base_exc.OperationalError, max_delay=10) def _allocate_edge_appliance(self, context, resource_id, name, appliance_size=nsxv_constants.COMPACT, dist=False, availability_zone=None, deploy_metadata=False): """Try to allocate one available edge from pool.""" edge_type = (nsxv_constants.VDR_EDGE if dist else nsxv_constants.SERVICE_EDGE) lrouter = {'id': resource_id, 'name': name} az_pool = self._get_az_pool(availability_zone.name) edge_pool_range = az_pool[edge_type].get(appliance_size) if edge_pool_range is None: nsxv_db.add_nsxv_router_binding( context.session, resource_id, None, None, constants.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone.name) return self._deploy_edge(context, lrouter, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone, deploy_metadata=deploy_metadata) with locking.LockManager.get_lock('nsx-edge-backup-pool'): self._clean_all_error_edge_bindings( context, availability_zone=availability_zone) available_router_binding = self._get_available_router_binding( context, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) if available_router_binding: # Update the status from ACTIVE to PENDING_UPDATE # in case of other threads select the same router binding nsxv_db.update_nsxv_router_binding( context.session, available_router_binding['router_id'], status=constants.PENDING_UPDATE) # Synchronously deploy an edge if no available edge in pool. if not available_router_binding: # store router-edge mapping binding nsxv_db.add_nsxv_router_binding( context.session, resource_id, None, None, constants.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone.name) edge_id = self._deploy_edge(context, lrouter, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone, deploy_metadata=deploy_metadata) else: LOG.debug("Select edge: %(edge_id)s from pool for %(name)s", {'edge_id': available_router_binding['edge_id'], 'name': name}) # select the first available edge in pool. nsxv_db.delete_nsxv_router_binding( context.session, available_router_binding['router_id']) nsxv_db.add_nsxv_router_binding( context.session, lrouter['id'], available_router_binding['edge_id'], None, constants.PENDING_CREATE, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone.name) edge_id = available_router_binding['edge_id'] LOG.debug("Select edge: %(edge_id)s from pool for %(name)s", {'edge_id': edge_id, 'name': name}) with locking.LockManager.get_lock(str(edge_id)): self.nsxv_manager.callbacks.complete_edge_creation( context, edge_id, lrouter['name'], lrouter['id'], dist, True, availability_zone=availability_zone, deploy_metadata=deploy_metadata) try: self.nsxv_manager.rename_edge(edge_id, name) except nsxapi_exc.VcnsApiException as e: LOG.error("Failed to update edge: %s", e.response) self.nsxv_manager.callbacks.complete_edge_update( context, edge_id, resource_id, False, set_errors=True) backup_num = len(self._get_backup_edge_bindings( context, appliance_size=appliance_size, edge_type=edge_type, db_update_lock=True, availability_zone=availability_zone)) router_ids = self._deploy_backup_edges_on_db( context, edge_pool_range['minimum_pooled_edges'] - backup_num, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) self._deploy_backup_edges_at_backend( context, router_ids, appliance_size=appliance_size, edge_type=edge_type, availability_zone=availability_zone) return edge_id def _free_edge_appliance(self, context, router_id): """Try to collect one edge to pool.""" with locking.LockManager.get_lock('nsx-edge-backup-pool'): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding: LOG.warning("router binding for router: %s " "not found", router_id) return dist = (binding['edge_type'] == nsxv_constants.VDR_EDGE) edge_id = binding['edge_id'] availability_zone_name = nsxv_db.get_edge_availability_zone( context.session, edge_id) az_pool = self._get_az_pool(availability_zone_name) edge_pool_range = az_pool[binding['edge_type']].get( binding['appliance_size']) nsxv_db.delete_nsxv_router_binding( context.session, router_id) backup_router_id = (vcns_const.BACKUP_ROUTER_PREFIX + _uuid())[:vcns_const.EDGE_NAME_LEN] nsxv_db.add_nsxv_router_binding( context.session, backup_router_id, edge_id, None, constants.PENDING_UPDATE, appliance_size=binding['appliance_size'], edge_type=binding['edge_type'], availability_zone=availability_zone_name) router_id = backup_router_id if (binding['status'] == constants.ERROR or not self.check_edge_active_at_backend(edge_id) or not edge_pool_range): nsxv_db.update_nsxv_router_binding( context.session, router_id, status=constants.PENDING_DELETE) # delete edge self._get_worker_pool().spawn_n( self.nsxv_manager.delete_edge, None, router_id, edge_id, dist=dist) return availability_zone = self._availability_zones.get_availability_zone( availability_zone_name) self._clean_all_error_edge_bindings( context, availability_zone=availability_zone) backup_router_bindings = self._get_backup_edge_bindings( context, appliance_size=binding['appliance_size'], edge_type=binding['edge_type'], availability_zone=availability_zone) backup_num = len(backup_router_bindings) # collect the edge to pool if pool not full if backup_num < edge_pool_range['maximum_pooled_edges']: # change edge's name at backend update_result = self.nsxv_manager.update_edge( context, backup_router_id, edge_id, backup_router_id, None, appliance_size=binding['appliance_size'], dist=dist, availability_zone=availability_zone) # Clean all edge vnic bindings nsxv_db.clean_edge_vnic_binding(context.session, edge_id) # Refresh edge_vnic_bindings for centralized router if not dist and edge_id: nsxv_db.init_edge_vnic_binding(context.session, edge_id) if update_result: nsxv_db.update_nsxv_router_binding( context.session, backup_router_id, status=constants.ACTIVE) LOG.debug("Collect edge: %s to pool", edge_id) else: nsxv_db.update_nsxv_router_binding( context.session, router_id, status=constants.PENDING_DELETE) # delete edge self._get_worker_pool().spawn_n( self.nsxv_manager.delete_edge, None, router_id, edge_id, dist=dist) def _allocate_dhcp_edge_appliance(self, context, resource_id, availability_zone): resource_name = (vcns_const.DHCP_EDGE_PREFIX + _uuid())[:vcns_const.EDGE_NAME_LEN] self._allocate_edge_appliance( context, resource_id, resource_name, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['dhcp'], availability_zone=availability_zone, deploy_metadata=True) def allocate_lb_edge_appliance( self, context, resource_id, availability_zone, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['lb']): return self._allocate_edge_appliance( context, resource_id, resource_id, appliance_size=appliance_size, availability_zone=availability_zone) def _free_dhcp_edge_appliance(self, context, network_id, az_name): router_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] # if there are still metadata ports on this edge - delete them now if self.plugin.metadata_proxy_handler: metadata_proxy_handler = self.plugin.get_metadata_proxy_handler( az_name) if metadata_proxy_handler: metadata_proxy_handler.cleanup_router_edge(context, router_id, warn=True) self._free_edge_appliance(context, router_id) def _build_lrouter_name(self, router_id, router_name): return ( router_name[:nsxv_constants.ROUTER_NAME_LENGTH - len(router_id)] + '-' + router_id) def update_syslog_by_flavor(self, context, router_id, flavor_id, edge_id): """Update syslog config on edge according to router flavor.""" syslog_config = self._get_syslog_config_from_flavor(context, router_id, flavor_id) if syslog_config: self.nsxv_manager.update_edge_syslog(edge_id, syslog_config, router_id) def create_lrouter( self, context, lrouter, lswitch=None, dist=False, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['router'], availability_zone=None): """Create an edge for logical router support.""" router_name = self._build_lrouter_name(lrouter['id'], lrouter['name']) edge_id = self._allocate_edge_appliance( context, lrouter['id'], router_name, appliance_size=appliance_size, dist=dist, availability_zone=availability_zone) if lrouter.get('flavor_id'): self.update_syslog_by_flavor(context, lrouter['id'], lrouter['flavor_id'], edge_id) return edge_id def delete_lrouter(self, context, router_id, dist=False): self._free_edge_appliance(context, router_id) def rename_lrouter(self, context, router_id, new_name): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding or not binding['edge_id']: LOG.warning("router binding for router: %s " "not found", router_id) return edge_id = binding['edge_id'] with locking.LockManager.get_lock(str(edge_id)): router_name = self._build_lrouter_name(router_id, new_name) self.nsxv_manager.rename_edge(edge_id, router_name) def resize_lrouter(self, context, router_id, new_size): # get the router edge-id binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding or not binding['edge_id']: LOG.warning("router binding for router: %s " "not found", router_id) return edge_id = binding['edge_id'] with locking.LockManager.get_lock(str(edge_id)): # update the router on backend self.nsxv_manager.resize_edge(edge_id, new_size) # update the DB nsxv_db.update_nsxv_router_binding( context.session, router_id, appliance_size=new_size) def update_dhcp_edge_bindings(self, context, network_id): """Reconfigure the DHCP to the edge.""" resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] edge_binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) if not edge_binding: return with locking.LockManager.get_lock(str(edge_binding['edge_id'])): self.update_dhcp_service_config(context, edge_binding['edge_id']) def _add_dhcp_option(self, static_config, opt): if 'dhcpOptions' not in static_config: static_config['dhcpOptions'] = {} opt_name = opt['opt_name'] opt_val = opt['opt_value'] if opt_name in vcns_const.SUPPORTED_DHCP_OPTIONS: key = vcns_const.SUPPORTED_DHCP_OPTIONS[opt_name] if opt_name == 'classless-static-route': if 'option121' not in static_config['dhcpOptions']: static_config['dhcpOptions']['option121'] = { 'staticRoutes': []} opt121 = static_config['dhcpOptions']['option121'] net, ip = opt_val.split(',') opt121['staticRoutes'].append({'destinationSubnet': net, 'router': ip}) elif (opt_name == 'tftp-server-address' or opt_name == 'tftp-server'): if 'option150' not in static_config['dhcpOptions']: static_config['dhcpOptions']['option150'] = { 'tftpServers': []} opt150 = static_config['dhcpOptions']['option150'] opt150['tftpServers'].append(opt_val) else: static_config['dhcpOptions'][key] = opt_val else: if 'other' not in static_config['dhcpOptions']: static_config['dhcpOptions']['others'] = [] static_config['dhcpOptions']['others'].append( {'code': opt_name, 'value': opt_val}) def create_static_binding(self, context, port): """Create the DHCP Edge static binding configuration """ static_bindings = [] static_config = {} static_config['macAddress'] = port['mac_address'] static_config['hostname'] = port['id'] static_config['leaseTime'] = cfg.CONF.nsxv.dhcp_lease_time for fixed_ip in port['fixed_ips']: # Query the subnet to get gateway and DNS try: subnet_id = fixed_ip['subnet_id'] subnet = self.nsxv_plugin._get_subnet(context, subnet_id) except n_exc.SubnetNotFound: LOG.debug("No related subnet for port %s", port['id']) continue # Only configure if subnet has DHCP support if not subnet['enable_dhcp']: continue static_config['ipAddress'] = fixed_ip['ip_address'] # Set gateway for static binding static_config['defaultGateway'] = subnet['gateway_ip'] # set primary and secondary dns name_servers = [dns['address'] for dns in subnet['dns_nameservers']] # if no nameservers have been configured then use the ones # defined in the configuration name_servers = name_servers or cfg.CONF.nsxv.nameservers if len(name_servers) == 1: static_config['primaryNameServer'] = name_servers[0] elif len(name_servers) >= 2: static_config['primaryNameServer'] = name_servers[0] static_config['secondaryNameServer'] = name_servers[1] # Set search domain for static binding sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( context.session, subnet_id) dns_search_domain = None if sub_binding and sub_binding.dns_search_domain: dns_search_domain = sub_binding.dns_search_domain elif cfg.CONF.nsxv.dns_search_domain: dns_search_domain = cfg.CONF.nsxv.dns_search_domain if dns_search_domain: static_config['domainName'] = dns_search_domain if sub_binding and sub_binding.dhcp_mtu: static_config = self.add_mtu_on_static_binding( static_config, sub_binding.dhcp_mtu) self.handle_meta_static_route( context, subnet_id, [static_config]) for host_route in subnet['routes']: self.add_host_route_on_static_bindings( [static_config], host_route['destination'], host_route['nexthop']) dhcp_opts = port.get(ext_edo.EXTRADHCPOPTS) if dhcp_opts is not None: for opt in dhcp_opts: self._add_dhcp_option(static_config, opt) static_bindings.append(static_config) return static_bindings def add_host_route_on_static_bindings(self, static_bindings, dest_cidr, nexthop): """Add one host route on a bulk of static bindings config. We can add host route on VM via dhcp option121. this func can only works at NSXv version 6.2.3 or higher. """ for binding in static_bindings: if 'dhcpOptions' not in six.iterkeys(binding): binding['dhcpOptions'] = {} if 'option121' not in six.iterkeys(binding['dhcpOptions']): binding['dhcpOptions']['option121'] = {'staticRoutes': []} binding_opt121 = binding['dhcpOptions']['option121'] if 'staticRoutes' not in six.iterkeys(binding_opt121): binding_opt121['staticRoutes'] = [] binding_opt121['staticRoutes'].append({ 'destinationSubnet': dest_cidr, 'router': nexthop}) return static_bindings def add_mtu_on_static_binding(self, static_binding, mtu): """Add the pre-configured MTU to a static binding config. We can add the MTU via dhcp option26. This func can only works at NSXv version 6.2.3 or higher. """ if 'dhcpOptions' not in six.iterkeys(static_binding): static_binding['dhcpOptions'] = {} static_binding['dhcpOptions']['option26'] = mtu return static_binding def handle_meta_static_route(self, context, subnet_id, static_bindings): is_dhcp_option121 = self.nsxv_plugin.is_dhcp_metadata(context, subnet_id) if is_dhcp_option121: dhcp_ip = self.nsxv_plugin._get_dhcp_ip_addr_from_subnet( context, subnet_id) if dhcp_ip: self.add_host_route_on_static_bindings( static_bindings, '169.254.169.254/32', dhcp_ip) else: LOG.error("Failed to find the dhcp port on subnet " "%s to do metadata host route insertion", subnet_id) def update_dhcp_service_config(self, context, edge_id): """Reconfigure the DHCP to the edge.""" # Get all networks attached to the edge edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) dhcp_networks = [edge_vnic_binding.network_id for edge_vnic_binding in edge_vnic_bindings] subnets = self.nsxv_plugin.get_subnets( context.elevated(), filters={'network_id': dhcp_networks, 'enable_dhcp': [True]}) static_bindings = [] for subnet in subnets: ports = self.nsxv_plugin.get_ports( context.elevated(), filters={'network_id': [subnet['network_id']], 'fixed_ips': {'subnet_id': [subnet['id']]}}) inst_ports = [port for port in ports if port['device_owner'].startswith('compute')] for port in inst_ports: static_bindings.extend( self.create_static_binding( context.elevated(), port)) dhcp_request = { 'featureType': "dhcp_4.0", 'enabled': True, 'staticBindings': {'staticBindings': static_bindings}} self.nsxv_manager.vcns.reconfigure_dhcp_service( edge_id, dhcp_request) bindings_get = get_dhcp_binding_mappings(self.nsxv_manager, edge_id) # Refresh edge_dhcp_static_bindings attached to edge nsxv_db.clean_edge_dhcp_static_bindings_by_edge( context.session, edge_id) for mac_address, binding_id in bindings_get.items(): nsxv_db.create_edge_dhcp_static_binding(context.session, edge_id, mac_address, binding_id) def _get_random_available_edge(self, available_edge_ids): while available_edge_ids: # Randomly select an edge ID from the pool. new_id = random.choice(available_edge_ids) # Validate whether the edge exists on the backend. if not self.check_edge_active_at_backend(new_id): # Remove edge_id from available edges pool. available_edge_ids.remove(new_id) LOG.warning("Skipping edge: %s due to inactive status on " "the backend.", new_id) else: return new_id def _get_available_edges(self, context, network_id, conflicting_nets, availability_zone): if conflicting_nets is None: conflicting_nets = [] conflict_edge_ids = [] available_edge_ids = [] filters = {'availability_zone': [availability_zone.name]} router_bindings = nsxv_db.get_nsxv_router_bindings(context.session, filters=filters) all_dhcp_edges = {binding['router_id']: binding['edge_id'] for binding in router_bindings if (binding['router_id']. startswith(vcns_const.DHCP_EDGE_PREFIX) and binding['status'] == constants.ACTIVE)} # Special case if there is more than one subnet per exclusive DHCP # network if availability_zone.exclusive_dhcp_edge: router_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] edge_id = all_dhcp_edges.get(router_id) if edge_id: LOG.info("Reusing the same DHCP edge for network %s", network_id) available_edge_ids.append(edge_id) return (conflict_edge_ids, available_edge_ids) if all_dhcp_edges: for dhcp_edge_id in set(all_dhcp_edges.values()): edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, dhcp_edge_id) free_number = ((vcns_const.MAX_VNIC_NUM - 1) * vcns_const.MAX_TUNNEL_NUM - len(edge_vnic_bindings)) # metadata internal network will use one vnic or # exclusive_dhcp_edge is set for the AZ if (free_number <= (vcns_const.MAX_TUNNEL_NUM - 1) or availability_zone.exclusive_dhcp_edge): conflict_edge_ids.append(dhcp_edge_id) for net_id in conflicting_nets: router_id = (vcns_const.DHCP_EDGE_PREFIX + net_id)[:36] edge_id = all_dhcp_edges.get(router_id) if (edge_id and edge_id not in conflict_edge_ids): conflict_edge_ids.append(edge_id) for x in all_dhcp_edges.values(): if (x not in conflict_edge_ids and x not in available_edge_ids): available_edge_ids.append(x) return (conflict_edge_ids, available_edge_ids) def _get_used_edges(self, context, subnet, availability_zone): """Returns conflicting and available edges for the subnet.""" conflicting = self.plugin._get_conflicting_networks_for_subnet( context, subnet) return self._get_available_edges(context, subnet['network_id'], conflicting, availability_zone) def remove_network_from_dhcp_edge(self, context, network_id, edge_id): # If DHCP edge was created initially for this network, metadata port # Might use this network's DHCP router_id as device_id. Call the # following to validate this self.reconfigure_shared_edge_metadata_port( context, (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36]) old_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, network_id) if not old_binding: LOG.error("Remove network %(id)s failed since no binding " "found on edge %(edge_id)s", {'id': network_id, 'edge_id': edge_id}) self._delete_dhcp_router_binding(context, network_id, edge_id) return old_vnic_index = old_binding['vnic_index'] old_tunnel_index = old_binding['tunnel_index'] # Cut off the port group/virtual wire connection nsxv_db.free_edge_vnic_by_network(context.session, edge_id, network_id) try: # update dhcp service config on edge_id self.update_dhcp_service_config(context, edge_id) except nsxapi_exc.VcnsApiException: LOG.exception('Failed to delete vnic %(vnic_index)d ' 'tunnel %(tunnel_index)d on edge %(edge_id)s', {'vnic_index': old_vnic_index, 'tunnel_index': old_tunnel_index, 'edge_id': edge_id}) self._mark_router_bindings_status_error( context, edge_id, error_reason="remove network from dhcp edge failure") except Exception: LOG.exception('Failed to delete vnic %(vnic_index)d ' 'tunnel %(tunnel_index)d on edge %(edge_id)s', {'vnic_index': old_vnic_index, 'tunnel_index': old_tunnel_index, 'edge_id': edge_id}) self._delete_dhcp_internal_interface(context, edge_id, old_vnic_index, old_tunnel_index, network_id) def reuse_existing_dhcp_edge(self, context, edge_id, resource_id, network_id, availability_zone): app_size = vcns_const.SERVICE_SIZE_MAPPING['dhcp'] # There may be edge cases when we are waiting for edges to deploy # and the underlying db session may hit a timeout. So this creates # a new session context = q_context.get_admin_context() nsxv_db.add_nsxv_router_binding( context.session, resource_id, edge_id, None, constants.ACTIVE, appliance_size=app_size, availability_zone=availability_zone.name) nsxv_db.allocate_edge_vnic_with_tunnel_index( context.session, edge_id, network_id, availability_zone.name) def reconfigure_shared_edge_metadata_port(self, context, org_router_id): if not self.plugin.metadata_proxy_handler: return org_binding = nsxv_db.get_nsxv_router_binding(context.session, org_router_id) if not org_binding: return az_name = org_binding['availability_zone'] int_net = nsxv_db.get_nsxv_internal_network( context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, az_name) if not int_net: return # Query the ports of this internal network internal_nets = [int_net['network_id']] ports = self.nsxv_plugin.get_ports( context, filters={'device_id': [org_router_id], 'network_id': internal_nets}) if not ports: LOG.debug('No metadata ports found for %s', org_router_id) return elif len(ports) > 1: LOG.debug('Expecting one metadata port for %s. Found %d ports', org_router_id, len(ports)) edge_id = org_binding['edge_id'] bindings = nsxv_db.get_nsxv_router_bindings( context.session, filters={'edge_id': [edge_id]}) for binding in bindings: if binding['router_id'] != org_router_id: for port in ports: self.plugin.update_port( context, port['id'], {'port': {'device_id': binding['router_id']}}) return def allocate_new_dhcp_edge(self, context, network_id, resource_id, availability_zone): self._allocate_dhcp_edge_appliance(context, resource_id, availability_zone) new_edge = nsxv_db.get_nsxv_router_binding(context.session, resource_id) nsxv_db.allocate_edge_vnic_with_tunnel_index( context.session, new_edge['edge_id'], network_id, availability_zone.name) return new_edge['edge_id'] def create_dhcp_edge_service(self, context, network_id, subnet): """ Create an edge if there is no available edge for dhcp service, Update an edge if there is available edge for dhcp service If new edge was allocated, return resource_id, else return None """ availability_zone = self.plugin.get_network_az_by_net_id( context, network_id) # Check if the network has one related dhcp edge resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] dhcp_edge_binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) allocate_new_edge = False with locking.LockManager.get_lock('nsx-dhcp-edge-pool'): (conflict_edge_ids, available_edge_ids) = self._get_used_edges(context, subnet, availability_zone) LOG.debug("The available edges %s, the conflict edges %s ", available_edge_ids, conflict_edge_ids) edge_id = None # Check if the network can stay on the existing DHCP edge if dhcp_edge_binding: edge_id = dhcp_edge_binding['edge_id'] LOG.debug("At present network %s is using edge %s", network_id, edge_id) with locking.LockManager.get_lock(str(edge_id)): # Delete the existing vnic interface if there is # an overlapping subnet or the binding is in ERROR status if (edge_id in conflict_edge_ids or dhcp_edge_binding['status'] == constants.ERROR): LOG.debug("Removing network %s from dhcp edge %s", network_id, edge_id) self.remove_network_from_dhcp_edge(context, network_id, edge_id) edge_id = None if not edge_id: #Attach the network to a new Edge and update vnic: #1. Find an available existing edge or create a new one #2. For the existing one, cut off the old port group # connection #3. Create the new port group connection to an existing one #4. Update the address groups to the vnic if available_edge_ids: new_id = self._get_random_available_edge( available_edge_ids) if new_id: LOG.debug("Select edge %s to support dhcp for " "network %s", new_id, network_id) self.reuse_existing_dhcp_edge( context, new_id, resource_id, network_id, availability_zone) else: allocate_new_edge = True else: allocate_new_edge = True if allocate_new_edge: self.allocate_new_dhcp_edge(context, network_id, resource_id, availability_zone) # If a new Edge was allocated, return resource_id return resource_id def update_dhcp_edge_service(self, context, network_id, address_groups=None): """Update the subnet to the dhcp edge vnic.""" if address_groups is None: address_groups = [] resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] edge_binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) if not edge_binding: LOG.warning('Edge binding does not exist for network %s', network_id) return dhcp_binding = nsxv_db.get_edge_vnic_binding(context.session, edge_binding['edge_id'], network_id) if dhcp_binding: edge_id = dhcp_binding['edge_id'] with locking.LockManager.get_lock(str(edge_id)): vnic_index = dhcp_binding['vnic_index'] tunnel_index = dhcp_binding['tunnel_index'] LOG.debug('Update the dhcp service for %s on vnic %d tunnel ' '%d', edge_id, vnic_index, tunnel_index) try: self._update_dhcp_internal_interface( context, edge_id, vnic_index, tunnel_index, network_id, address_groups) ports = self.nsxv_plugin.get_ports( context, filters={'network_id': [network_id]}) inst_ports = [port for port in ports if port['device_owner'].startswith( "compute")] if inst_ports: # update dhcp service config for the new added network self.update_dhcp_service_config(context, edge_id) except nsxapi_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.exception( 'Failed to update the dhcp service for ' '%(edge_id)s on vnic %(vnic_index)d ' 'tunnel %(tunnel_index)d', {'edge_id': edge_id, 'vnic_index': vnic_index, 'tunnel_index': tunnel_index}) self._mark_router_bindings_status_error( context, edge_id, error_reason="update dhcp edge service") except Exception: with excutils.save_and_reraise_exception(): LOG.exception( 'Failed to update the dhcp service for ' '%(edge_id)s on vnic %(vnic_index)d ' 'tunnel %(tunnel_index)d', {'edge_id': edge_id, 'vnic_index': vnic_index, 'tunnel_index': tunnel_index}) def delete_dhcp_edge_service(self, context, network_id): """Delete an edge for dhcp service.""" resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] edge_binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) if edge_binding: dhcp_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_binding['edge_id'], network_id) if dhcp_binding: edge_id = dhcp_binding['edge_id'] with locking.LockManager.get_lock(str(edge_id)): vnic_index = dhcp_binding['vnic_index'] tunnel_index = dhcp_binding['tunnel_index'] LOG.debug("Delete the tunnel %d on vnic %d from DHCP Edge " "%s", tunnel_index, vnic_index, edge_id) nsxv_db.free_edge_vnic_by_network(context.session, edge_id, network_id) try: self._delete_dhcp_internal_interface(context, edge_id, vnic_index, tunnel_index, network_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to delete the tunnel ' '%(tunnel_index)d on vnic ' '%(vnic_index)d' 'from DHCP Edge %(edge_id)s', {'tunnel_index': tunnel_index, 'vnic_index': vnic_index, 'edge_id': edge_id}) def _update_address_in_dict(self, address_groups, old_ip, new_ip, subnet_mask): """Update the address_groups data structure to replace the old ip with a new one. If the old ip is None - if the ip matches an existing subnet: add it as a secondary ip. else - add a new address group for the new ip If the new ip is none - delete the primary/secondary entry with the old ip. If the old ip was not found - return False Otherwise - return True """ if old_ip is None: # Adding a new IP # look for an address group with a primary ip in the same subnet # as the new ip for address_group in address_groups['addressGroups']: if (netaddr.IPAddress(new_ip) in netaddr.IPNetwork(address_group['primaryAddress'] + '/' + address_group['subnetPrefixLength'])): # we should add the new ip as a secondary address in this # address group if (address_group.get('secondaryAddresses') is not None): secondary = address_group['secondaryAddresses'] secondary['ipAddress'].append(new_ip) else: address_group['secondaryAddresses'] = { 'type': 'secondary_addresses', 'ipAddress': [new_ip]} return True # Could not find the same subnet - add a new address group address_group = { 'primaryAddress': new_ip, 'subnetMask': subnet_mask } address_groups['addressGroups'].append(address_group) return True else: for ind, address_group in enumerate( address_groups['addressGroups']): if address_group['primaryAddress'] == old_ip: # this is the one we should update if new_ip: address_group['primaryAddress'] = new_ip else: # delete this entry address_groups['addressGroups'].pop(ind) return True # try to find a match in the secondary ips if (address_group.get('secondaryAddresses') is not None): secondary = address_group['secondaryAddresses'] secondary_ips = secondary['ipAddress'] if old_ip in secondary_ips: # We should update the secondary addresses if new_ip: # replace the old with the new secondary_ips.remove(old_ip) secondary_ips.append(new_ip) else: # delete this entry if len(secondary_ips) == 1: # delete the whole structure del address_group['secondaryAddresses'] else: secondary_ips.remove(old_ip) return True # The old ip was not found return False def update_interface_addr(self, context, edge_id, old_ip, new_ip, subnet_mask, is_uplink=False): with locking.LockManager.get_lock(edge_id): # get the current interfaces configuration r = self.nsxv_manager.vcns.get_interfaces(edge_id)[1] vnics = r.get('vnics', []) # Go over the vnics to find the one we should update for vnic in vnics: if ((is_uplink and vnic['type'] == 'uplink') or not is_uplink and vnic['type'] != 'uplink'): if self._update_address_in_dict( vnic['addressGroups'], old_ip, new_ip, subnet_mask): self.nsxv_manager.vcns.update_interface(edge_id, vnic) return # If we got here - we didn't find the old ip: error = (_("Failed to update interface ip " "on edge %(eid)s: Cannot find the previous ip %(ip)s") % {'eid': edge_id, 'ip': old_ip}) raise nsx_exc.NsxPluginException(err_msg=error) def update_vdr_interface_addr(self, context, edge_id, vnic_index, old_ip, new_ip, subnet_mask): with locking.LockManager.get_lock(edge_id): # get the current interfaces configuration vnic = self.nsxv_manager.vcns.get_vdr_internal_interface( edge_id, vnic_index)[1] if self._update_address_in_dict( vnic['addressGroups'], old_ip, new_ip, subnet_mask): interface_req = {'interface': vnic} self.nsxv_manager.vcns.update_vdr_internal_interface( edge_id, vnic_index, interface_req) return # If we got here - we didn't find the old ip: error = (_("Failed to update VDR interface ip " "on edge %(eid)s: Cannot find the previous ip %(ip)s") % {'eid': edge_id, 'ip': old_ip}) raise nsx_exc.NsxPluginException(err_msg=error) def get_plr_by_tlr_id(self, context, router_id): lswitch_id = None binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) if binding: lswitch_id = binding.lswitch_id if lswitch_id: edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_int_lswitch( context.session, lswitch_id) if edge_vnic_bindings: for edge_vnic_binding in edge_vnic_bindings: plr_router_id = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_vnic_binding.edge_id)[0].router_id if plr_router_id != router_id: return plr_router_id def create_plr_with_tlr_id(self, context, router_id, router_name, availability_zone): # Add an internal network preparing for connecting the VDR # to a PLR tlr_edge_id = nsxv_db.get_nsxv_router_binding( context.session, router_id).edge_id # First create an internal lswitch lswitch_name = ('int-' + router_name + router_id)[:36] virtual_wire = {"name": lswitch_name, "tenantId": "virtual wire tenant"} config_spec = {"virtualWireCreateSpec": virtual_wire} vdn_scope_id = availability_zone.vdn_scope_id h, lswitch_id = self.nsxv_manager.vcns.create_virtual_wire( vdn_scope_id, config_spec) # add vdr's external interface to the lswitch tlr_vnic_index = self.nsxv_manager.add_vdr_internal_interface( tlr_edge_id, lswitch_id, address=get_vdr_transit_network_tlr_address(), netmask=get_vdr_transit_network_netmask(), type="uplink") nsxv_db.create_edge_vnic_binding( context.session, tlr_edge_id, tlr_vnic_index, lswitch_id) # store the lswitch_id into nsxv_router_binding nsxv_db.update_nsxv_router_binding( context.session, router_id, lswitch_id=lswitch_id) # Handle plr relative op plr_router = {'name': router_name, 'id': (vcns_const.PLR_EDGE_PREFIX + _uuid())[:36]} self.create_lrouter( context, plr_router, availability_zone=availability_zone, appliance_size=cfg.CONF.nsxv.exclusive_router_appliance_size) binding = nsxv_db.get_nsxv_router_binding( context.session, plr_router['id']) plr_edge_id = binding['edge_id'] plr_vnic_index = nsxv_db.allocate_edge_vnic( context.session, plr_edge_id, lswitch_id).vnic_index #TODO(berlin): the internal ip should change based on vnic_index self.nsxv_manager.update_interface( plr_router['id'], plr_edge_id, plr_vnic_index, lswitch_id, address=get_vdr_transit_network_plr_address(), netmask=get_vdr_transit_network_netmask()) return plr_router['id'] def delete_plr_by_tlr_id(self, context, plr_id, router_id): # Delete plr's internal interface which connects to internal switch tlr_binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) lswitch_id = tlr_binding.lswitch_id tlr_edge_id = tlr_binding.edge_id router_binding = nsxv_db.get_nsxv_router_binding( context.session, plr_id) if router_binding is None: LOG.error("Router binding not found for router: %s", router_id) else: plr_edge_id = router_binding.edge_id vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, plr_edge_id, lswitch_id) if vnic_binding is None: LOG.error("Vnic binding not found for router: %s", router_id) else: # Clear static routes before delete internal vnic self.nsxv_manager.update_routes(plr_edge_id, None, []) # Delete internal vnic self.nsxv_manager.delete_interface(plr_id, plr_edge_id, vnic_binding.vnic_index) nsxv_db.free_edge_vnic_by_network( context.session, plr_edge_id, lswitch_id) # Delete the PLR self.delete_lrouter(context, plr_id) # Clear static routes of vdr self.nsxv_manager.update_routes(tlr_edge_id, None, []) #First delete the vdr's external interface tlr_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, tlr_edge_id, lswitch_id) if tlr_vnic_binding is None: LOG.error("Vnic binding not found for router: %s", router_id) else: self.nsxv_manager.delete_vdr_internal_interface( tlr_edge_id, tlr_vnic_binding.vnic_index) nsxv_db.delete_edge_vnic_binding_by_network( context.session, tlr_edge_id, lswitch_id) try: # Then delete the internal lswitch self.nsxv_manager.delete_virtual_wire(lswitch_id) except Exception: LOG.warning("Failed to delete virtual wire: %s", lswitch_id) def get_routers_on_edge(self, context, edge_id): router_ids = [] valid_router_ids = [] if edge_id: router_ids = [ binding['router_id'] for binding in nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_id)] if router_ids: valid_router_ids = self.plugin.get_routers( context.elevated(), filters={'id': router_ids}, fields=['id']) valid_router_ids = [ele['id'] for ele in valid_router_ids] if set(valid_router_ids) != set(router_ids): LOG.error("Get invalid router bindings with " "router ids: %s", str(set(router_ids) - set(valid_router_ids))) return valid_router_ids def get_routers_on_same_edge(self, context, router_id): edge_binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) if edge_binding: return self.get_routers_on_edge(context, edge_binding['edge_id']) return [] def bind_router_on_available_edge( self, context, target_router_id, optional_router_ids, conflict_router_ids, conflict_network_ids, network_number, availability_zone): """Bind logical shared router on an available edge. Return True if the logical router is bound to a new edge. """ with locking.LockManager.get_lock('nsx-edge-router'): optional_edge_ids = [] conflict_edge_ids = [] for router_id in optional_router_ids: binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) if (binding and binding.status == constants.ACTIVE and binding.availability_zone == availability_zone.name and binding.edge_id not in optional_edge_ids): optional_edge_ids.append(binding.edge_id) for router_id in conflict_router_ids: binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) if binding and binding.edge_id not in conflict_edge_ids: conflict_edge_ids.append(binding.edge_id) optional_edge_ids = list( set(optional_edge_ids) - set(conflict_edge_ids)) max_net_number = 0 available_edge_id = None for edge_id in optional_edge_ids: edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) # one vnic is used to provide external access. net_number = ( vcns_const.MAX_VNIC_NUM - len(edge_vnic_bindings) - 1) if (net_number > max_net_number and net_number >= network_number): net_ids = [vnic_binding.network_id for vnic_binding in edge_vnic_bindings] if not (set(conflict_network_ids) & set(net_ids)): max_net_number = net_number available_edge_id = edge_id else: # TODO(yangyu): Remove conflict_network_ids LOG.warning( "Failed to query conflict_router_ids") if available_edge_id: edge_binding = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, available_edge_id)[0] nsxv_db.add_nsxv_router_binding( context.session, target_router_id, edge_binding.edge_id, None, edge_binding.status, edge_binding.appliance_size, edge_binding.edge_type, availability_zone=availability_zone.name) else: router_name = ('shared' + '-' + _uuid())[ :vcns_const.EDGE_NAME_LEN] self._allocate_edge_appliance( context, target_router_id, router_name, appliance_size=cfg.CONF.nsxv.shared_router_appliance_size, availability_zone=availability_zone) return True def unbind_router_on_edge(self, context, router_id): """Unbind a logical router from edge. Return True if no logical router bound to the edge. """ with locking.LockManager.get_lock('nsx-edge-router'): # free edge if no other routers bound to the edge router_ids = self.get_routers_on_same_edge(context, router_id) if router_ids == [router_id]: self._free_edge_appliance(context, router_id) return True else: nsxv_db.delete_nsxv_router_binding(context.session, router_id) def is_router_conflict_on_edge(self, context, router_id, conflict_router_ids, conflict_network_ids, intf_num=0): with locking.LockManager.get_lock('nsx-edge-router'): router_ids = self.get_routers_on_same_edge(context, router_id) if set(router_ids) & set(conflict_router_ids): return True router_binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, router_binding.edge_id) if (vcns_const.MAX_VNIC_NUM - len(edge_vnic_bindings ) - 1 < intf_num): LOG.debug("There isn't available edge vnic for the router: %s", router_id) return True for binding in edge_vnic_bindings: if binding.network_id in conflict_network_ids: return True return False def delete_dhcp_binding(self, context, port_id, network_id, mac_address): edge_id = get_dhcp_edge_id(context, network_id) if edge_id: dhcp_binding = nsxv_db.get_edge_dhcp_static_binding( context.session, edge_id, mac_address) if dhcp_binding: with locking.LockManager.get_lock(str(edge_id)): # We need to read the binding from the NSX to check that # we are not deleting a updated entry. This may be the # result of a async nova create and nova delete and the # same port IP is selected binding = get_dhcp_binding_for_binding_id( self.nsxv_manager, edge_id, dhcp_binding.binding_id) # The hostname is the port_id so we have a unique # identifier if binding and binding['hostname'] == port_id: self.nsxv_manager.vcns.delete_dhcp_binding( edge_id, dhcp_binding.binding_id) else: LOG.warning("Failed to find binding on edge " "%(edge_id)s for port " "%(port_id)s with %(binding_id)s", {'edge_id': edge_id, 'port_id': port_id, 'binding_id': dhcp_binding.binding_id}) nsxv_db.delete_edge_dhcp_static_binding( context.session, edge_id, mac_address) else: LOG.warning("Failed to find dhcp binding on edge " "%(edge_id)s to DELETE for port " "%(port_id)s", {'edge_id': edge_id, 'port_id': port_id}) else: # This happens during network/subnet deletion LOG.info("Didn't delete dhcp binding for port %(port_id)s: " "No edge id", {'port_id': port_id}) @vcns.retry_upon_exception(nsxapi_exc.VcnsApiException, max_delay=10) def _create_dhcp_binding(self, context, edge_id, binding): try: h, c = self.nsxv_manager.vcns.create_dhcp_binding( edge_id, binding) binding_id = h['location'].split('/')[-1] nsxv_db.create_edge_dhcp_static_binding( context.session, edge_id, binding['macAddress'], binding_id) except nsxapi_exc.VcnsApiException as e: with excutils.save_and_reraise_exception(): binding_id = None if e.response: desc = jsonutils.loads(e.response) if desc.get('errorCode') == ( vcns_const.NSX_ERROR_DHCP_DUPLICATE_MAC): bindings = get_dhcp_binding_mappings(self.nsxv_manager, edge_id) binding_id = bindings.get( binding['macAddress'].lower()) LOG.debug("Duplicate MAC for %s with binding %s", binding['macAddress'], binding_id) elif desc.get('errorCode') == ( vcns_const.NSX_ERROR_DHCP_OVERLAPPING_IP): bindings = get_dhcp_binding_mappings_for_ips( self.nsxv_manager, edge_id) binding_id = bindings.get(binding['ipAddress']) LOG.debug("Overlapping IP %s with binding %s", binding['ipAddress'], binding_id) elif desc.get('errorCode') == ( vcns_const.NSX_ERROR_DHCP_DUPLICATE_HOSTNAME): bindings = get_dhcp_binding_mappings_for_hostname( self.nsxv_manager, edge_id) binding_id = bindings.get(binding['hostname']) LOG.debug("Overlapping hostname %s with binding %s", binding['hostname'], binding_id) if binding_id: self.nsxv_manager.vcns.delete_dhcp_binding( edge_id, binding_id) nsxv_db.delete_edge_dhcp_static_binding_id( context.session, edge_id, binding_id) return binding_id def create_dhcp_bindings(self, context, port_id, network_id, bindings): edge_id = get_dhcp_edge_id(context, network_id) if edge_id: # Check port is still there try: # Reload port db info context.session.expire_all() self.plugin.get_port(context, port_id) except n_exc.PortNotFound: LOG.warning( "port %(port_id)s is deleted, so we would pass " "creating dhcp binding on edge %(edge_id)s", {'port_id': port_id, 'edge_id': edge_id}) return configured_bindings = [] try: for binding in bindings: with locking.LockManager.get_lock(str(edge_id)): binding_id = self._create_dhcp_binding( context, edge_id, binding) configured_bindings.append((binding_id, binding['macAddress'])) except nsxapi_exc.VcnsApiException: with excutils.save_and_reraise_exception(): for binding_id, mac_address in configured_bindings: with locking.LockManager.get_lock(str(edge_id)): self.nsxv_manager.vcns.delete_dhcp_binding( edge_id, binding_id) nsxv_db.delete_edge_dhcp_static_binding( context.session, edge_id, mac_address) else: LOG.warning("Failed to create dhcp bindings since dhcp edge " "for net %s not found at the backend", network_id) def _get_syslog_config_from_flavor(self, context, router_id, flavor_id): if not validators.is_attr_set(flavor_id): return metainfo = self.plugin.get_flavor_metainfo(context, flavor_id) return metainfo.get('syslog') def update_external_interface( self, nsxv_manager, context, router_id, ext_net_id, ipaddr, netmask, secondary=None): secondary = secondary or [] binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) # If no binding was found, no interface to update - exit if not binding: LOG.error('Edge binding not found for router %s', router_id) return net_bindings = nsxv_db.get_network_bindings( context.session, ext_net_id) if not net_bindings: az_name = binding.availability_zone az = self._availability_zones.get_availability_zone(az_name) vcns_network_id = az.external_network else: vcns_network_id = net_bindings[0].phy_uuid # reorganize external vnic's address groups if netmask: address_groups = [] addr_list = [] for str_cidr in netmask: ip_net = netaddr.IPNetwork(str_cidr) address_group = {'primaryAddress': None, 'subnetPrefixLength': str(ip_net.prefixlen)} if (ipaddr not in addr_list and _check_ipnet_ip(ip_net, ipaddr)): address_group['primaryAddress'] = ipaddr addr_list.append(ipaddr) for sec_ip in secondary: if (sec_ip not in addr_list and _check_ipnet_ip(ip_net, sec_ip)): if not address_group['primaryAddress']: address_group['primaryAddress'] = sec_ip else: if not address_group.get('secondaryAddresses'): address_group['secondaryAddresses'] = { 'ipAddress': [sec_ip], 'type': 'secondary_addresses'} else: address_group['secondaryAddresses'][ 'ipAddress'].append(sec_ip) addr_list.append(sec_ip) if address_group['primaryAddress']: address_groups.append(address_group) if ipaddr not in addr_list: LOG.error("primary address %s of ext vnic is not " "configured", ipaddr) if secondary: missed_ip_sec = set(secondary) - set(addr_list) if missed_ip_sec: LOG.error("secondary address %s of ext vnic are not " "configured", str(missed_ip_sec)) nsxv_manager.update_interface(router_id, binding['edge_id'], vcns_const.EXTERNAL_VNIC_INDEX, vcns_network_id, address_groups=address_groups) else: nsxv_manager.update_interface(router_id, binding['edge_id'], vcns_const.EXTERNAL_VNIC_INDEX, vcns_network_id, address=ipaddr, netmask=netmask, secondary=secondary) def create_lrouter(nsxv_manager, context, lrouter, lswitch=None, dist=False, availability_zone=None): """Create an edge for logical router support.""" router_id = lrouter['id'] router_name = lrouter['name'] + '-' + router_id appliance_size = vcns_const.SERVICE_SIZE_MAPPING['router'] # store router-edge mapping binding nsxv_db.add_nsxv_router_binding( context.session, router_id, None, None, constants.PENDING_CREATE, appliance_size=appliance_size, availability_zone=availability_zone.name) # deploy edge nsxv_manager.deploy_edge( context, router_id, router_name, internal_network=None, dist=dist, appliance_size=appliance_size, availability_zone=availability_zone) def delete_lrouter(nsxv_manager, context, router_id, dist=False): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if binding: nsxv_db.update_nsxv_router_binding( context.session, router_id, status=constants.PENDING_DELETE) edge_id = binding['edge_id'] # delete edge nsxv_manager.delete_edge(context, router_id, edge_id, dist=dist) else: LOG.warning("router binding for router: %s not found", router_id) def remove_irrelevant_keys_from_edge_request(edge_request): """Remove some unnecessary keys from the edge request. Having these keys fail the update edge NSX transaction """ for key in ['status', 'datacenterMoid', 'fqdn', 'version', 'tenant', 'datacenterName', 'hypervisorAssist', 'universal', 'enableFips']: edge_request.pop(key, None) def _retrieve_nsx_switch_id(context, network_id, az_name): """Helper method to retrieve backend switch ID.""" bindings = nsxv_db.get_network_bindings(context.session, network_id) if bindings: binding = bindings[0] network_type = binding['binding_type'] if (network_type == c_utils.NsxVNetworkTypes.VLAN and binding['phy_uuid'] != ''): if ',' not in binding['phy_uuid']: dvs_id = binding['phy_uuid'] else: # If network is of type VLAN and multiple dvs associated with # one neutron network, retrieve the logical network id for the # edge/mgmt cluster's DVS from the networks availability zone. azs = nsx_az.NsxVAvailabilityZones() az = azs.get_availability_zone(az_name) dvs_id = az.dvs_id return nsx_db.get_nsx_switch_id_for_dvs( context.session, network_id, dvs_id) # Get the physical port group /wire id of the network id mappings = nsx_db.get_nsx_switch_ids(context.session, network_id) if mappings: return mappings[0] raise nsx_exc.NsxPluginException( err_msg=_("Network %s not found at the backend") % network_id) def get_dhcp_edge_id(context, network_id): # Query edge id resource_id = (vcns_const.DHCP_EDGE_PREFIX + network_id)[:36] binding = nsxv_db.get_nsxv_router_binding(context.session, resource_id) if binding: edge_id = binding['edge_id'] return edge_id def get_dhcp_binding_mappings(nsxv_manager, edge_id): dhcp_config = query_dhcp_service_config(nsxv_manager, edge_id) bindings_get = {} if dhcp_config: for binding in dhcp_config['staticBindings']['staticBindings']: bindings_get[binding['macAddress'].lower()] = binding['bindingId'] return bindings_get def get_dhcp_binding_mappings_for_ips(nsxv_manager, edge_id): dhcp_config = query_dhcp_service_config(nsxv_manager, edge_id) bindings_get = {} if dhcp_config: for binding in dhcp_config['staticBindings']['staticBindings']: bindings_get[binding['ipAddress']] = binding['bindingId'] return bindings_get def get_dhcp_binding_mappings_for_hostname(nsxv_manager, edge_id): dhcp_config = query_dhcp_service_config(nsxv_manager, edge_id) bindings_get = {} if dhcp_config: for binding in dhcp_config['staticBindings']['staticBindings']: bindings_get[binding['hostname']] = binding['bindingId'] return bindings_get def _get_dhcp_binding_for_binding_id(nsxv_manager, edge_id, binding_id): dhcp_config = query_dhcp_service_config(nsxv_manager, edge_id) if dhcp_config: for binding in dhcp_config['staticBindings']['staticBindings']: if binding['bindingId'] == binding_id: return binding def _get_dhcp_binding(nsxv_manager, edge_id, binding_id): try: h, dhcp_binding = nsxv_manager.vcns.get_dhcp_binding(edge_id, binding_id) return dhcp_binding except Exception: return def get_dhcp_binding_for_binding_id(nsxv_manager, edge_id, binding_id): # API for specific binding is supported in NSX 6.2.8 and 6.3.3 onwards ver = nsxv_manager.vcns.get_version() if c_utils.is_nsxv_dhcp_binding_supported(ver): return _get_dhcp_binding(nsxv_manager, edge_id, binding_id) else: return _get_dhcp_binding_for_binding_id(nsxv_manager, edge_id, binding_id) def query_dhcp_service_config(nsxv_manager, edge_id): """Retrieve the current DHCP configuration from the edge.""" _, dhcp_config = nsxv_manager.vcns.query_dhcp_configuration(edge_id) return dhcp_config def get_router_edge_id(context, router_id): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if binding: return binding['edge_id'] def update_gateway(nsxv_manager, context, router_id, nexthop, routes=None): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_id = binding['edge_id'] if routes is None: routes = [] nsxv_manager.update_routes(edge_id, nexthop, routes) def get_routes(edge_manager, context, router_id): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding: LOG.error('Router binding not found for router %s', router_id) return [] edge_id = binding['edge_id'] vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge(context.session, edge_id) if not vnic_bindings: LOG.error('vNic binding not found for edge %s', edge_id) return [] h, routes = edge_manager.vcns.get_routes(edge_id) edge_routes = routes.get('staticRoutes') routes = [] for edge_route in edge_routes.get('staticRoutes'): for vnic_binding in vnic_bindings: if vnic_binding['vnic_index'] == int(edge_route['vnic']): route = {'network_id': vnic_binding['network_id'], 'nexthop': edge_route['nextHop'], 'destination': edge_route['network']} routes.append(route) break return routes def update_routes(edge_manager, context, router_id, routes, nexthop=None): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding: LOG.error('Router binding not found for router %s', router_id) return edge_id = binding['edge_id'] edge_routes = [] for route in routes: if not route.get('network_id'): LOG.warning("There is no network info for the route %s, so " "the route entry would not be executed!", route) continue if route.get('external'): edge_routes.append({ 'vnic_index': vcns_const.EXTERNAL_VNIC_INDEX, 'cidr': route['destination'], 'nexthop': route['nexthop']}) else: vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, route['network_id']) if (netaddr.IPAddress(route['nexthop']) in netaddr.IPNetwork(route['destination'])): # check that the nexthop is not in the destination LOG.error("Cannot add route with nexthop %(nexthop)s " "contained in the destination: %(dest)s.", {'dest': route['destination'], 'nexthop': route['nexthop']}) continue if vnic_binding and vnic_binding.get('vnic_index'): edge_routes.append({ 'vnic_index': vnic_binding['vnic_index'], 'cidr': route['destination'], 'nexthop': route['nexthop']}) else: LOG.error("vnic binding on edge %(edge_id)s for network " "%(net_id)s not found, so route: destination: " "%(dest)s, nexthop: %(nexthop)s can't be " "applied!", {'edge_id': edge_id, 'net_id': route['network_id'], 'dest': route['destination'], 'nexthop': route['nexthop']}) edge_manager.update_routes(edge_id, nexthop, edge_routes) def get_internal_lswitch_id_of_plr_tlr(context, router_id): return nsxv_db.get_nsxv_router_binding( context.session, router_id).lswitch_id def get_internal_vnic_index_of_plr_tlr(context, router_id): router_binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, router_binding.edge_id, router_binding.lswitch_id) return edge_vnic_binding.vnic_index def clear_gateway(nsxv_manager, context, router_id): return update_gateway(nsxv_manager, context, router_id, None) def _check_ipnet_ip(ipnet, ip_address): """Check one ip is valid ip from ipnet.""" ip = netaddr.IPAddress(ip_address) if (ip != ipnet.netmask and ip != ipnet[-1] and ipnet.netmask & ip == ipnet.network): return True return False def update_internal_interface(nsxv_manager, context, router_id, int_net_id, address_groups, is_connected=True): # Get edge id binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_id = binding['edge_id'] # Get the pg/wire id of the network id az_name = binding['availability_zone'] vcns_network_id = _retrieve_nsx_switch_id(context, int_net_id, az_name) LOG.debug("Network id %(network_id)s corresponding ref is : " "%(net_moref)s", {'network_id': int_net_id, 'net_moref': vcns_network_id}) edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, int_net_id) # if edge_vnic_binding is None, then first select one available # internal vnic for connection. if not edge_vnic_binding: edge_vnic_binding = nsxv_db.allocate_edge_vnic( context.session, edge_id, int_net_id) nsxv_manager.update_interface(router_id, edge_id, edge_vnic_binding.vnic_index, vcns_network_id, is_connected=is_connected, address_groups=address_groups) def add_vdr_internal_interface(nsxv_manager, context, router_id, int_net_id, address_groups, is_connected=True): # Get edge id binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_id = binding['edge_id'] # Get the pg/wire id of the network id az_name = binding['availability_zone'] vcns_network_id = _retrieve_nsx_switch_id(context, int_net_id, az_name) LOG.debug("Network id %(network_id)s corresponding ref is : " "%(net_moref)s", {'network_id': int_net_id, 'net_moref': vcns_network_id}) edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, int_net_id) if not edge_vnic_binding: vnic_index = nsxv_manager.add_vdr_internal_interface( edge_id, vcns_network_id, address_groups=address_groups, is_connected=is_connected) nsxv_db.create_edge_vnic_binding( context.session, edge_id, vnic_index, int_net_id) else: msg = (_("Distributed Router doesn't support multiple subnets " "with same network attached to it.")) raise n_exc.BadRequest(resource='vdr', msg=msg) def update_vdr_internal_interface(nsxv_manager, context, router_id, int_net_id, address_groups, is_connected=True): # Get edge id binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_id = binding['edge_id'] # Get the pg/wire id of the network id az_name = binding['availability_zone'] vcns_network_id = _retrieve_nsx_switch_id(context, int_net_id, az_name) LOG.debug("Network id %(network_id)s corresponding ref is : " "%(net_moref)s", {'network_id': int_net_id, 'net_moref': vcns_network_id}) edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, int_net_id) nsxv_manager.update_vdr_internal_interface( edge_id, edge_vnic_binding.vnic_index, vcns_network_id, address_groups=address_groups, is_connected=is_connected) def delete_interface(nsxv_manager, context, router_id, network_id, dist=False): # Get edge id binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not binding: LOG.warning("Failed to find the router binding for router %s", router_id) return edge_id = binding['edge_id'] # Get the pg/wire id of the network id az_name = binding['availability_zone'] vcns_network_id = _retrieve_nsx_switch_id(context, network_id, az_name) LOG.debug("Network id %(network_id)s corresponding ref is : " "%(net_moref)s", {'network_id': network_id, 'net_moref': vcns_network_id}) edge_vnic_binding = nsxv_db.get_edge_vnic_binding( context.session, edge_id, network_id) if not edge_vnic_binding: LOG.warning("Failed to find the network %(net_id)s " "corresponding vnic index on edge %(edge_id)s", {'net_id': network_id, 'edge_id': edge_id}) return if not dist: nsxv_manager.delete_interface( router_id, edge_id, edge_vnic_binding.vnic_index) nsxv_db.free_edge_vnic_by_network( context.session, edge_id, network_id) else: nsxv_manager.delete_vdr_internal_interface( edge_id, edge_vnic_binding.vnic_index) nsxv_db.delete_edge_vnic_binding_by_network( context.session, edge_id, network_id) def update_nat_rules(nsxv_manager, context, router_id, snat, dnat, az=None): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if binding: if not az: azs = nsx_az.NsxVAvailabilityZones() az = azs.get_availability_zone(binding['availability_zone']) bind_to_all = az.bind_floatingip_to_all_interfaces indices = None if bind_to_all: # from 6.2.4 onwards, unspecified vnic will result # in binding the rule to all interfaces ver = nsxv_manager.vcns.get_version() if version.LooseVersion(ver) < version.LooseVersion('6.2.4'): LOG.debug("NSX version %s requires explicit nat rule " "for each interface", ver) edge_id = binding['edge_id'] vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, edge_id) indices = [vnic_binding.vnic_index for vnic_binding in vnic_bindings] indices.append(vcns_const.EXTERNAL_VNIC_INDEX) else: LOG.debug("Configuring nat rules on external " "interface only for %s", router_id) indices = [vcns_const.EXTERNAL_VNIC_INDEX] nsxv_manager.update_nat_rules(binding['edge_id'], snat, dnat, indices) else: LOG.warning("Bindings do not exists for %s", router_id) def clear_nat_rules(nsxv_manager, context, router_id): update_nat_rules(nsxv_manager, context, router_id, [], []) def update_firewall(nsxv_manager, context, router_id, firewall, allow_external=True): binding = nsxv_db.get_nsxv_router_binding( context.session, router_id) if binding: edge_id = binding['edge_id'] nsxv_manager.update_firewall(edge_id, firewall, context, allow_external=allow_external) else: LOG.warning("Bindings do not exists for %s", router_id) def check_network_in_use_at_backend(context, network_id): retries = max(cfg.CONF.nsxv.retries, 1) delay = 0.5 for attempt in range(1, retries + 1): if attempt != 1: time.sleep(delay) delay = min(2 * delay, 60) edge_vnic_bindings = nsxv_db.get_edge_vnic_bindings_by_int_lswitch( context.session, network_id) if not edge_vnic_bindings: return LOG.warning('NSXv: network is still in use at the backend') LOG.error('NSXv: network is still in use at the backend') def default_loglevel_modifier(config, level): """Modify log level settings in edge config bulk (standard syntax)""" if 'logging' not in config: LOG.error("Logging section missing in configuration") return False enable = True if level == 'none': enable = False level = 'info' # default config['logging']['enable'] = enable config['logging']['logLevel'] = level return True def routing_loglevel_modifier(config, level): """Modify log level in routing global settings""" if 'routingGlobalConfig' not in config: LOG.error("routingGlobalConfig section missing in config") return False return default_loglevel_modifier(config['routingGlobalConfig'], level) def get_loglevel_modifier(module, level): """Pick modifier according to module and set log level""" special_modifiers = {'routing': routing_loglevel_modifier} modifier = default_loglevel_modifier if module in special_modifiers.keys(): modifier = special_modifiers[module] def wrapper(config): return modifier(config, level) return wrapper def update_edge_loglevel(vcns, edge_id, module, level): """Update loglevel on edge for specified module""" if module not in SUPPORTED_EDGE_LOG_MODULES: LOG.error("Unrecognized logging module %s - ignored", module) return if level not in SUPPORTED_EDGE_LOG_LEVELS: LOG.error("Unrecognized log level %s - ignored", level) return vcns.update_edge_config_with_modifier(edge_id, module, get_loglevel_modifier(module, level)) def update_edge_host_groups(vcns, edge_id, dvs, availability_zone, validate=False): # Update edge DRS host groups h, appliances = vcns.get_edge_appliances(edge_id) vms = [appliance['vmId'] for appliance in appliances['appliances']] if validate: configured_vms = dvs.get_configured_vms( availability_zone.resource_pool, availability_zone.edge_host_groups) for vm in vms: if vm in configured_vms: LOG.info('Edge %s already configured', edge_id) return LOG.info('Create DRS groups for %(vms)s on edge %(edge_id)s', {'vms': vms, 'edge_id': edge_id}) # Ensure random distribution of the VMs if availability_zone.ha_placement_random: if len(vms) < len(availability_zone.edge_host_groups): # add some empty vms to the list, so it will randomize between # all host groups vms.extend([None] * (len(availability_zone.edge_host_groups) - len(vms))) random.shuffle(vms) try: dvs.update_cluster_edge_failover( availability_zone.resource_pool, vms, availability_zone.edge_host_groups) except Exception as e: LOG.error('Unable to create DRS groups for ' '%(vms)s on edge %(edge_id)s. Error: %(e)s', {'vms': vms, 'edge_id': edge_id, 'e': e}) def clean_host_groups(dvs, availability_zone): try: LOG.info('Cleaning up host groups for AZ %s', availability_zone.name) dvs.cluster_host_group_cleanup( availability_zone.resource_pool, availability_zone.edge_host_groups) except Exception as e: LOG.error('Unable to cleanup. Error: %s', e) class NsxVCallbacks(object): """Edge callback implementation Callback functions for asynchronous tasks. """ def __init__(self, plugin): self.plugin = plugin if cfg.CONF.nsxv.use_dvs_features: self._vcm = dvs.VCManager() else: self._vcm = None @log_helpers.log_method_call def complete_edge_creation(self, context, edge_id, name, router_id, dist, deploy_successful, availability_zone=None, deploy_metadata=False): router_db = None if uuidutils.is_uuid_like(router_id): try: router_db = self.plugin._get_router(context, router_id) except l3_exc.RouterNotFound: # Router might have been deleted before deploy finished LOG.warning("Router %s not found", name) if deploy_successful: metadata_proxy_handler = self.plugin.get_metadata_proxy_handler( availability_zone.name) if deploy_metadata and metadata_proxy_handler: LOG.debug('Update metadata for resource %s', router_id) metadata_proxy_handler.configure_router_edge( context, router_id) self.plugin.setup_dhcp_edge_fw_rules(context, self.plugin, router_id) LOG.debug("Successfully deployed %(edge_id)s for router %(name)s", {'edge_id': edge_id, 'name': name}) if (router_db and router_db['status'] == constants.PENDING_CREATE): router_db['status'] = constants.ACTIVE nsxv_db.update_nsxv_router_binding( context.session, router_id, status=constants.ACTIVE) if (not dist and self._vcm and availability_zone and availability_zone.edge_ha and availability_zone.edge_host_groups): with locking.LockManager.get_lock('nsx-vc-drs-update'): update_edge_host_groups(self.plugin.nsx_v.vcns, edge_id, self._vcm, availability_zone, validate=True) else: LOG.error("Failed to deploy Edge for router %s", name) if router_db: router_db['status'] = constants.ERROR nsxv_db.update_nsxv_router_binding( context.session, router_id, status=constants.ERROR) if not dist and edge_id: nsxv_db.clean_edge_vnic_binding( context.session, edge_id) def complete_edge_update( self, context, edge_id, router_id, successful, set_errors): if successful: LOG.debug("Successfully updated %(edge_id)s for router " "%(router_id)s", {'edge_id': edge_id, 'router_id': router_id}) else: LOG.error("Failed to update %(edge_id)s for router " "%(router_id)s", {'edge_id': edge_id, 'router_id': router_id}) admin_ctx = q_context.get_admin_context() if nsxv_db.get_nsxv_router_binding(admin_ctx.session, router_id): nsxv_db.update_nsxv_router_binding( admin_ctx.session, router_id, status=constants.ERROR) if set_errors and context: # Set the router status to ERROR try: router_db = self.plugin._get_router(context, router_id) router_db['status'] = constants.ERROR except l3_exc.RouterNotFound: # Router might have been deleted before deploy finished LOG.warning("Router %s not found", router_id) def interface_update_result(self, task): LOG.debug("interface_update_result %d", task.status) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/nsxv_edge_cfg_obj.py0000644000175000017500000000340700000000000027655 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_serialization import jsonutils import six from vmware_nsx.plugins.nsx_v.vshield import vcns @six.add_metaclass(abc.ABCMeta) class NsxvEdgeCfgObj(object): def __init__(self): return @abc.abstractmethod def get_service_name(self): return @abc.abstractmethod def serializable_payload(self): return @staticmethod def get_object(vcns_obj, edge_id, service_name): uri = "%s/%s/%s" % (vcns.URI_PREFIX, edge_id, service_name) h, v = vcns_obj.do_request( vcns.HTTP_GET, uri, decode=True) return v def submit_to_backend(self, vcns_obj, edge_id): uri = "%s/%s/%s/config" % (vcns.URI_PREFIX, edge_id, self.get_service_name()) payload = jsonutils.dumps(self.serializable_payload(), sort_keys=True) if payload: return vcns_obj.do_request( vcns.HTTP_PUT, uri, payload, format='json', encode=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/nsxv_loadbalancer.py0000644000175000017500000003141700000000000027711 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log as logging from vmware_nsx.plugins.nsx_v.vshield import nsxv_edge_cfg_obj LOG = logging.getLogger(__name__) class NsxvLoadbalancer(nsxv_edge_cfg_obj.NsxvEdgeCfgObj): SERVICE_NAME = 'loadbalancer' def __init__( self, enabled=True, enable_service_insertion=False, acceleration_enabled=False): super(NsxvLoadbalancer, self).__init__() self.payload = { 'enabled': enabled, 'enableServiceInsertion': enable_service_insertion, 'accelerationEnabled': acceleration_enabled} self.virtual_servers = {} def get_service_name(self): return self.SERVICE_NAME def add_virtual_server(self, virtual_server): self.virtual_servers[virtual_server.payload['name']] = virtual_server def del_virtual_server(self, name): self.virtual_servers.pop(name, None) def serializable_payload(self): virt_servers = [] app_profiles = [] app_rules = [] pools = [] monitors = [] virt_id = 1 app_prof_id = 1 app_rule_id = 1 pool_id = 1 monitor_id = 1 member_id = 1 for virtual_server in self.virtual_servers.values(): s_virt = virtual_server.payload.copy() s_virt['virtualServerId'] = 'virtualServer-%d' % virt_id virt_id += 1 # Setup app profile s_app_prof = virtual_server.app_profile.payload.copy() s_app_prof['applicationProfileId'] = ('applicationProfile-%d' % app_prof_id) app_profiles.append(s_app_prof) app_prof_id += 1 # Bind virtual server to app profile s_virt['applicationProfileId'] = s_app_prof['applicationProfileId'] # Setup app rules if virtual_server.app_rules.values(): s_virt['applicationRuleId'] = [] for app_rule in virtual_server.app_rules.values(): s_app_rule = app_rule.payload.copy() s_app_rule['applicationRuleId'] = ('applicationRule-%d' % app_rule_id) app_rule_id += 1 # Add to LB object, bind to virtual server app_rules.append(s_app_rule) s_virt['applicationRuleId'].append( s_app_rule['applicationRuleId']) # Setup pools s_pool = virtual_server.default_pool.payload.copy() s_pool['poolId'] = 'pool-%d' % pool_id pool_id += 1 pools.append(s_pool) # Add pool members s_pool['member'] = [] for member in virtual_server.default_pool.members.values(): s_m = member.payload.copy() s_m['memberId'] = 'member-%d' % member_id member_id += 1 s_pool['member'].append(s_m) # Bind pool to virtual server s_virt['defaultPoolId'] = s_pool['poolId'] s_pool['monitorId'] = [] # Add monitors for monitor in virtual_server.default_pool.monitors.values(): s_mon = monitor.payload.copy() s_mon['monitorId'] = 'monitor-%d' % monitor_id monitor_id += 1 s_pool['monitorId'].append(s_mon['monitorId']) monitors.append(s_mon) virt_servers.append(s_virt) payload = self.payload.copy() payload['applicationProfile'] = app_profiles if app_rules: payload['applicationRule'] = app_rules payload['monitor'] = monitors payload['pool'] = pools payload['virtualServer'] = virt_servers payload['featureType'] = 'loadbalancer_4.0' return payload @staticmethod def get_loadbalancer(vcns_obj, edge_id): edge_lb = nsxv_edge_cfg_obj.NsxvEdgeCfgObj.get_object( vcns_obj, edge_id, '%s/config' % NsxvLoadbalancer.SERVICE_NAME) lb_obj = NsxvLoadbalancer( edge_lb['enabled'], edge_lb['enableServiceInsertion'], edge_lb['accelerationEnabled']) # Construct loadbalancer objects for virt_srvr in edge_lb['virtualServer']: v_s = NsxvLBVirtualServer( virt_srvr['name'], virt_srvr['ipAddress'], virt_srvr['port'], virt_srvr['protocol'], virt_srvr['enabled'], virt_srvr['accelerationEnabled'], virt_srvr['connectionLimit']) # Find application profile objects, attach to virtual server for app_prof in edge_lb['applicationProfile']: if (virt_srvr['applicationProfileId'] == app_prof['applicationProfileId']): a_p = NsxvLBAppProfile( app_prof['name'], app_prof['serverSslEnabled'], app_prof['sslPassthrough'], app_prof['template'], app_prof['insertXForwardedFor']) if app_prof.get('persistence'): a_p.set_persistence( True, app_prof['persistence']['method'], app_prof['persistence'].get('cookieName'), app_prof['persistence'].get('cookieMode'), app_prof['persistence'].get('expire')) v_s.set_app_profile(a_p) # Find default pool, attach to virtual server for pool in edge_lb['pool']: if virt_srvr['defaultPoolId'] == pool['poolId']: p = NsxvLBPool( pool['name'], pool['algorithm'], pool['transparent']) # Add pool members to pool for member in pool['member']: m = NsxvLBPoolMember( member['name'], member['ipAddress'], member['port'], member['monitorPort'], member['condition'], member['weight'], member['minConn'], member['maxConn']) p.add_member(m) # Add monitors to pool for mon in edge_lb['monitor']: if mon['monitorId'] in pool['monitorId']: m = NsxvLBMonitor( mon['name'], mon['interval'], mon['maxRetries'], mon['method'], mon['timeout'], mon['type'], mon['url']) p.add_monitor(m) v_s.set_default_pool(p) # Add application rules to virtual server for rule in edge_lb['applicationRule']: if rule['applicationRuleId'] in virt_srvr['applicationRuleId']: r = NsxvLBAppRule( rule['name'], rule['script']) v_s.add_app_rule(r) lb_obj.add_virtual_server(v_s) return lb_obj class NsxvLBAppProfile(object): def __init__( self, name, server_ssl_enabled=False, ssl_pass_through=False, template='TCP', insert_xff=False, client_ssl_cert=None, persist=False, persist_method='cookie', persist_cookie_name='JSESSIONID', persist_cookie_mode='insert', persist_expire=30): self.payload = { 'name': name, 'serverSslEnabled': server_ssl_enabled, 'sslPassthrough': ssl_pass_through, 'template': template, 'insertXForwardedFor': insert_xff} if persist: self.payload['persistence'] = { 'method': persist_method, 'expire': persist_expire } if persist_cookie_mode == 'cookie': self.payload['persistence']['cookieMode'] = persist_cookie_mode self.payload['persistence']['cookieName'] = persist_cookie_name if client_ssl_cert: self.payload['clientSsl'] = { 'clientAuth': 'ignore', 'serviceCertificate': [client_ssl_cert] } def set_persistence( self, persist=False, persist_method='cookie', persist_cookie_name='JSESSIONID', persist_cookie_mode='insert', persist_expire=30): if persist: self.payload['persistence'] = { 'method': persist_method, 'expire': persist_expire } if persist_cookie_mode == 'cookie': self.payload['persistence']['cookieMode'] = persist_cookie_mode self.payload['persistence']['cookieName'] = persist_cookie_name else: self.payload.pop('persistence', None) class NsxvLBAppRule(object): def __init__(self, name, script): self.payload = { 'name': name, 'script': script} class NsxvLBVirtualServer(object): def __init__( self, name, ip_address, port=80, protocol='HTTP', enabled=True, acceleration_enabled=False, connection_limit=0, enable_service_insertion=False): self.payload = { 'name': name, 'ipAddress': ip_address, 'port': port, 'protocol': protocol, 'enabled': enabled, 'accelerationEnabled': acceleration_enabled, 'connectionLimit': connection_limit, 'enableServiceInsertion': enable_service_insertion} self.app_rules = {} self.app_profile = None self.default_pool = None def add_app_rule(self, app_rule): self.app_rules[app_rule.payload['name']] = app_rule def del_app_rule(self, name): self.app_rules.pop(name, None) def set_default_pool(self, pool): self.default_pool = pool def set_app_profile(self, app_profile): self.app_profile = app_profile class NsxvLBMonitor(object): def __init__( self, name, interval=10, max_retries=3, method='GET', timeout=15, mon_type='http', url='/'): self.payload = { 'name': name, 'interval': interval, 'maxRetries': max_retries, 'method': method, 'timeout': timeout, 'type': mon_type, 'url': url} class NsxvLBPoolMember(object): def __init__( self, name, ip_address, port, monitor_port=None, condition='enabled', weight=1, min_conn=0, max_conn=0): self.payload = { 'name': name, 'ipAddress': ip_address, 'port': port, 'monitorPort': monitor_port, 'condition': condition, 'weight': weight, 'minConn': min_conn, 'maxConn': max_conn} class NsxvLBPool(object): def __init__( self, name, algorithm='round-robin', transparent=False): self.payload = { 'name': name, 'algorithm': algorithm, 'transparent': transparent} self.members = {} self.monitors = {} def add_member(self, member): self.members[member.payload['name']] = member def del_member(self, name): self.members.pop(name, None) def add_monitor(self, monitor): self.monitors[monitor.payload['name']] = monitor def del_monitor(self, name): self.monitors.pop(name, None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/securitygroup_utils.py0000644000175000017500000002314300000000000030365 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import xml.etree.ElementTree as et from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import utils WAIT_INTERVAL = 2000 MAX_ATTEMPTS = 5 LOG = logging.getLogger(__name__) class NsxSecurityGroupUtils(object): def __init__(self, nsxv_manager): LOG.debug("Start Security Group Utils initialization") self.nsxv_manager = nsxv_manager def to_xml_string(self, element): return et.tostring(element) def get_section_with_rules(self, name, rules, section_id=None): """Helper method to create section dict with rules.""" section = et.Element('section') section.attrib['name'] = name if section_id: section.attrib['id'] = section_id for rule in rules: section.append(rule) return section def get_container(self, nsx_sg_id): container = {'type': 'SecurityGroup', 'value': nsx_sg_id} return container def get_remote_container(self, remote_group_id, remote_ip_mac): container = None if remote_group_id is not None: return self.get_container(remote_group_id) if remote_ip_mac is not None: container = {'type': 'Ipv4Address', 'value': remote_ip_mac} return container def get_rule_config(self, applied_to_ids, name, action='allow', applied_to='SecurityGroup', source=None, destination=None, services=None, flags=None, logged=False, tag=None, application_services=None, notes=None): """Helper method to create a nsx rule dict.""" ruleTag = et.Element('rule') ruleTag.attrib['logged'] = 'true' if logged else 'false' nameTag = et.SubElement(ruleTag, 'name') nameTag.text = name actionTag = et.SubElement(ruleTag, 'action') actionTag.text = action notesTag = et.SubElement(ruleTag, 'notes') notesTag.text = notes apList = et.SubElement(ruleTag, 'appliedToList') for applied_to_id in applied_to_ids: apTag = et.SubElement(apList, 'appliedTo') apTypeTag = et.SubElement(apTag, 'type') apTypeTag.text = applied_to apValueTag = et.SubElement(apTag, 'value') apValueTag.text = applied_to_id if source is not None: sources = et.SubElement(ruleTag, 'sources') sources.attrib['excluded'] = 'false' srcTag = et.SubElement(sources, 'source') srcTypeTag = et.SubElement(srcTag, 'type') srcTypeTag.text = source['type'] srcValueTag = et.SubElement(srcTag, 'value') srcValueTag.text = source['value'] if destination is not None: dests = et.SubElement(ruleTag, 'destinations') dests.attrib['excluded'] = 'false' destTag = et.SubElement(dests, 'destination') destTypeTag = et.SubElement(destTag, 'type') destTypeTag.text = destination['type'] destValueTag = et.SubElement(destTag, 'value') destValueTag.text = destination['value'] if services: s = et.SubElement(ruleTag, 'services') for protocol, port, icmptype, icmpcode in services: svcTag = et.SubElement(s, 'service') try: int(protocol) svcProtocolTag = et.SubElement(svcTag, 'protocol') svcProtocolTag.text = str(protocol) except ValueError: svcProtocolTag = et.SubElement(svcTag, 'protocolName') svcProtocolTag.text = protocol if port is not None: svcPortTag = et.SubElement(svcTag, 'destinationPort') svcPortTag.text = str(port) if icmptype is not None: svcPortTag = et.SubElement(svcTag, 'subProtocol') svcPortTag.text = str(icmptype) if icmpcode is not None: if icmptype in ('0', '8') and icmpcode == '0': # icmpcode 0 should not be sent # TODO(asarfaty): Validate if this is needed for all # NSX versions and all icmp types pass else: svcPortTag = et.SubElement(svcTag, 'icmpCode') svcPortTag.text = str(icmpcode) if application_services: s = et.SubElement(ruleTag, 'services') for application_service in application_services: svcTag = et.SubElement(s, 'service') svcProtocolTag = et.SubElement(svcTag, 'value') svcProtocolTag.text = str(application_service) if flags: if flags.get('ethertype') is not None: pktTag = et.SubElement(ruleTag, 'packetType') pktTag.text = flags.get('ethertype') if flags.get('direction') is not None: dirTag = et.SubElement(ruleTag, 'direction') dirTag.text = flags.get('direction') if tag: tagTag = et.SubElement(ruleTag, 'tag') tagTag.text = tag return ruleTag def get_rule_id_pair_from_section(self, resp): root = et.fromstring(resp) pairs = [] for rule in root.findall('rule'): pair = {'nsx_id': rule.attrib.get('id'), 'neutron_id': rule.find('name').text} pairs.append(pair) return pairs def fix_existing_section_rules(self, section): # fix section existing rules before extending it with new rules # TODO(asarfaty): Validate if this is needed for all NSX versions for rule in section.iter('rule'): services = rule.find('services') if services: for service in services: subProt = service.find('subProtocolName') icmpCode = service.find('icmpCode') if (icmpCode is not None and icmpCode.text == '0' and subProt is not None and subProt.text in ('echo-request', 'echo-reply')): # ICMP code should not exist in the payload service.remove(icmpCode) def extend_section_with_rules(self, section, nsx_rules): section.extend(nsx_rules) def parse_section(self, xml_string): return et.fromstring(xml_string) def get_nsx_sg_name(self, sg_data): try: return cfg.CONF.nsxv.nsx_sg_name_format % sg_data except Exception as e: # Illegal format: LOG.error("get_nsx_sg_name failed due to invalid format %s: %s", cfg.CONF.nsxv.nsx_sg_name_format, e) return '%(name)s (%(id)s)' % sg_data def get_nsx_section_name(self, sg_data): return 'SG Section: %s' % self.get_nsx_sg_name(sg_data) def parse_and_get_section_id(self, section_xml): section = et.fromstring(section_xml) return section.attrib['id'] def is_section_logged(self, section): # Determine if this section rules are being logged by the first rule # 'logged' value. rule = section.find('rule') if rule is not None: return rule.attrib.get('logged') == 'true' return False def set_rules_logged_option(self, section, logged): value = 'true' if logged else 'false' rules = section.findall('rule') updated = False for rule in rules: if rule.attrib['logged'] != value: rule.attrib['logged'] = value updated = True return updated def del_nsx_security_group_from_policy(self, policy_id, sg_id): if not policy_id: return policy = self.nsxv_manager.vcns.get_security_policy(policy_id) policy = utils.normalize_xml(policy) # check if the security group is already bounded to the policy for binding in policy.iter('securityGroupBinding'): if binding.find('objectId').text == sg_id: # delete this entry policy.remove(binding) return self.nsxv_manager.vcns.update_security_policy( policy_id, et.tostring(policy)) def add_nsx_security_group_to_policy(self, policy_id, sg_id): if not policy_id: return # Get the policy configuration policy = self.nsxv_manager.vcns.get_security_policy(policy_id) policy = utils.normalize_xml(policy) # check if the security group is already bounded to the policy for binding in policy.iter('securityGroupBinding'): if binding.find('objectId').text == sg_id: # Already there return # Add a new binding entry new_binding = et.SubElement(policy, 'securityGroupBinding') et.SubElement(new_binding, 'objectId').text = sg_id return self.nsxv_manager.vcns.update_security_policy( policy_id, et.tostring(policy)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.206254 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/tasks/0000755000175000017500000000000000000000000024771 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/tasks/__init__.py0000644000175000017500000000000000000000000027070 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/tasks/constants.py0000644000175000017500000000260600000000000027363 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class TaskStatus(object): """Task running status. This is used by execution/status callback function to notify the task manager what's the status of current task, and also used for indication the final task execution result. """ PENDING = 1 COMPLETED = 2 ERROR = 3 ABORT = 4 class TaskState(object): """Current state of a task. This is to keep track of the current state of a task. NONE: the task is still in the queue START: the task is pull out from the queue and is about to be executed EXECUTED: the task has been executed STATUS: we're running periodic status check for this task RESULT: the task has finished and result is ready """ NONE = -1 START = 0 EXECUTED = 1 STATUS = 2 RESULT = 3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/tasks/tasks.py0000644000175000017500000003104600000000000026474 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import uuid from eventlet import event from eventlet import greenthread from neutron_lib import exceptions from oslo_log import log as logging from oslo_service import loopingcall import six from vmware_nsx._i18n import _ from vmware_nsx.plugins.nsx_v.vshield.tasks import constants DEFAULT_INTERVAL = 1000 LOG = logging.getLogger(__name__) def nop(task): return constants.TaskStatus.COMPLETED class TaskException(exceptions.NeutronException): def __init__(self, message=None, **kwargs): if message is not None: self.message = message super(TaskException, self).__init__(**kwargs) class InvalidState(TaskException): message = _("Invalid state %(state)d") class TaskStateSkipped(TaskException): message = _("State %(state)d skipped. Current state %(current)d") class Task(object): def __init__(self, name, resource_id, execute_callback, status_callback=nop, result_callback=nop, userdata=None): self.name = name self.resource_id = resource_id self._execute_callback = execute_callback self._status_callback = status_callback self._result_callback = result_callback self.userdata = userdata self.id = None self.status = None self._monitors = { constants.TaskState.START: [], constants.TaskState.EXECUTED: [], constants.TaskState.RESULT: [] } self._states = [None, None, None, None] self._state = constants.TaskState.NONE def _add_monitor(self, action, func): self._monitors[action].append(func) return self def _move_state(self, state): self._state = state if self._states[state] is not None: e = self._states[state] self._states[state] = None e.send() for s in range(state): if self._states[s] is not None: e = self._states[s] self._states[s] = None e.send_exception( TaskStateSkipped(state=s, current=self._state)) def _invoke_monitor(self, state): for func in self._monitors[state]: try: func(self) except Exception: LOG.exception("Task %(task)s encountered exception in " "%(func)s at state %(state)s", {'task': str(self), 'func': str(func), 'state': state}) self._move_state(state) return self def _start(self): return self._invoke_monitor(constants.TaskState.START) def _executed(self): return self._invoke_monitor(constants.TaskState.EXECUTED) def _update_status(self, status): if self.status == status: return self self.status = status def _finished(self): return self._invoke_monitor(constants.TaskState.RESULT) def add_start_monitor(self, func): return self._add_monitor(constants.TaskState.START, func) def add_executed_monitor(self, func): return self._add_monitor(constants.TaskState.EXECUTED, func) def add_result_monitor(self, func): return self._add_monitor(constants.TaskState.RESULT, func) def wait(self, state): if (state < constants.TaskState.START or state > constants.TaskState.RESULT or state == constants.TaskState.STATUS): raise InvalidState(state=state) if state <= self._state: # we already passed this current state, so no wait return e = event.Event() self._states[state] = e e.wait() def __repr__(self): return "Task-%s-%s-%s" % ( self.name.encode('ascii', 'ignore'), self.resource_id.encode('ascii', 'ignore'), self.id) class TaskManager(object): _instance = None _default_interval = DEFAULT_INTERVAL def __init__(self, interval=None): self._interval = interval or TaskManager._default_interval # A queue to pass tasks from other threads self._tasks_queue = collections.deque() # A dict to store resource -> resource's tasks self._tasks = {} # Current task being executed in main thread self._main_thread_exec_task = None # New request event self._req = event.Event() # TaskHandler stopped event self._stopped = False # Periodic function trigger self._monitor = None self._monitor_busy = False # Thread handling the task request self._thread = None def _execute(self, task): """Execute task.""" LOG.debug("Start task %s", str(task)) task._start() try: status = task._execute_callback(task) except Exception: LOG.exception("Task %(task)s encountered exception in " "%(cb)s", {'task': str(task), 'cb': str(task._execute_callback)}) status = constants.TaskStatus.ERROR LOG.debug("Task %(task)s return %(status)s", {'task': str(task), 'status': status}) task._update_status(status) task._executed() return status def _result(self, task): """Notify task execution result.""" try: task._result_callback(task) except Exception: LOG.exception("Task %(task)s encountered exception in " "%(cb)s", {'task': str(task), 'cb': str(task._result_callback)}) LOG.debug("Task %(task)s return %(status)s", {'task': str(task), 'status': task.status}) task._finished() def _check_pending_tasks(self): """Check all pending tasks status.""" for resource_id in self._tasks.keys(): if self._stopped: # Task manager is stopped, return now return tasks = self._tasks[resource_id] # only the first task is executed and pending task = tasks[0] try: status = task._status_callback(task) except Exception: LOG.exception("Task %(task)s encountered exception in " "%(cb)s", {'task': str(task), 'cb': str(task._status_callback)}) status = constants.TaskStatus.ERROR task._update_status(status) if status != constants.TaskStatus.PENDING: self._dequeue(task, True) def _enqueue(self, task): if task.resource_id in self._tasks: # append to existing resource queue for ordered processing self._tasks[task.resource_id].append(task) else: # put the task to a new resource queue tasks = collections.deque() tasks.append(task) self._tasks[task.resource_id] = tasks def _dequeue(self, task, run_next): self._result(task) tasks = self._tasks[task.resource_id] tasks.remove(task) if not tasks: # no more tasks for this resource del self._tasks[task.resource_id] return if run_next: # process next task for this resource while tasks: task = tasks[0] status = self._execute(task) if status == constants.TaskStatus.PENDING: break self._dequeue(task, False) def _abort(self): """Abort all tasks.""" # put all tasks haven't been received by main thread to queue # so the following abort handling can cover them for t in self._tasks_queue: self._enqueue(t) self._tasks_queue.clear() resources = copy.deepcopy(self._tasks) for resource_id in resources.keys(): tasks = list(self._tasks[resource_id]) for task in tasks: task._update_status(constants.TaskStatus.ABORT) self._dequeue(task, False) def _get_task(self): """Get task request.""" while True: for t in self._tasks_queue: return self._tasks_queue.popleft() self._req.wait() self._req.reset() def run(self): while True: try: if self._stopped: # Gracefully terminate this thread if the _stopped # attribute was set to true LOG.info("Stopping TaskManager") break # get a task from queue, or timeout for periodic status check task = self._get_task() if task.resource_id in self._tasks: # this resource already has some tasks under processing, # append the task to same queue for ordered processing self._enqueue(task) continue try: self._main_thread_exec_task = task self._execute(task) finally: self._main_thread_exec_task = None if task.status is None: # The thread is killed during _execute(). To guarantee # the task been aborted correctly, put it to the queue. self._enqueue(task) elif task.status != constants.TaskStatus.PENDING: self._result(task) else: self._enqueue(task) except Exception: LOG.exception("TaskManager terminating because " "of an exception") break def add(self, task): task.id = uuid.uuid1() self._tasks_queue.append(task) if not self._req.ready(): self._req.send() return task.id def stop(self): if self._thread is None: return self._stopped = True self._thread.kill() self._thread = None # Stop looping call and abort running tasks self._monitor.stop() if self._monitor_busy: self._monitor.wait() self._abort() LOG.info("TaskManager terminated") def has_pending_task(self): if self._tasks_queue or self._tasks or self._main_thread_exec_task: return True else: return False def show_pending_tasks(self): for task in self._tasks_queue: LOG.info(str(task)) for resource, tasks in six.iteritems(self._tasks): for task in tasks: LOG.info(str(task)) if self._main_thread_exec_task: LOG.info(str(self._main_thread_exec_task)) def count(self): count = 0 for resource_id, tasks in six.iteritems(self._tasks): count += len(tasks) return count def start(self, interval=None): def _inner(): self.run() def _loopingcall_callback(): self._monitor_busy = True try: self._check_pending_tasks() except Exception: LOG.exception("Exception in _check_pending_tasks") self._monitor_busy = False if self._thread is not None: return self if interval is None or interval == 0: interval = self._interval self._stopped = False self._thread = greenthread.spawn(_inner) self._monitor = loopingcall.FixedIntervalLoopingCall( _loopingcall_callback) self._monitor.start(interval / 1000.0, interval / 1000.0) # To allow the created thread start running greenthread.sleep(0) return self @classmethod def set_default_interval(cls, interval): cls._default_interval = interval ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/vcns.py0000644000175000017500000014042600000000000025176 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import xml.etree.ElementTree as et from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import strutils import six from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils from vmware_nsx.plugins.nsx_v.vshield.common import constants from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield.common import VcnsApiClient LOG = logging.getLogger(__name__) HTTP_GET = "GET" HTTP_POST = "POST" HTTP_DELETE = "DELETE" HTTP_PUT = "PUT" URI_PREFIX = "/api/4.0/edges" #FwaaS constants FIREWALL_SERVICE = "firewall/config" FIREWALL_RULE_RESOURCE = "rules" #NSXv Constants FIREWALL_PREFIX = '/api/4.0/firewall/globalroot-0/config' FIREWALL_REDIRECT_SEC_TYPE = 'layer3redirectsections' SECURITYGROUP_PREFIX = '/api/2.0/services/securitygroup' VDN_PREFIX = '/api/2.0/vdn' SERVICES_PREFIX = '/api/2.0/services' SPOOFGUARD_PREFIX = '/api/4.0/services/spoofguard' TRUSTSTORE_PREFIX = '%s/%s' % (SERVICES_PREFIX, 'truststore') EXCLUDELIST_PREFIX = '/api/2.1/app/excludelist' SERVICE_INSERTION_PROFILE_PREFIX = '/api/2.0/si/serviceprofile' SECURITY_POLICY_PREFIX = '/api/2.0/services/policy/securitypolicy' APPLICATION_PREFIX = '%s/%s' % (SERVICES_PREFIX, 'application') TZ_CONNECTIVITY_PREFIX = '/api/4.0/edges/transportzonenetworks' #LbaaS Constants LOADBALANCER_SERVICE = "loadbalancer/config" LOADBALANCER_STATS = "loadbalancer/statistics" VIP_RESOURCE = "virtualservers" POOL_RESOURCE = "pools" MONITOR_RESOURCE = "monitors" APP_PROFILE_RESOURCE = "applicationprofiles" APP_RULE_RESOURCE = "applicationrules" # IPsec VPNaaS Constants IPSEC_VPN_SERVICE = 'ipsec/config' # Dhcp constants DHCP_SERVICE = "dhcp/config" DHCP_BINDING_RESOURCE = "bindings" # Syetem control constants SYSCTL_SERVICE = 'systemcontrol/config' # L2 gateway constants BRIDGE = "bridging/config" # IPAM constants IPAM_POOL_SCOPE = "scope/globalroot-0" IPAM_POOL_SERVICE = "ipam/pools" # Self Signed Certificate constants CSR = "csr" CERTIFICATE = "certificate" NETWORK_TYPES = ['Network', 'VirtualWire', 'DistributedVirtualPortgroup'] # Dynamic routing constants ROUTING_CONFIG = "routing/config" BGP_ROUTING_CONFIG = "routing/config/bgp" ELAPSED_TIME_THRESHOLD = 30 MAX_EDGE_DEPLOY_TIMEOUT = 1200 def retry_upon_exception_exclude_error_codes( exc, excluded_errors, delay=0.5, max_delay=4, max_attempts=0): if not max_attempts: max_attempts = cfg.CONF.nsxv.retries return utils.retry_upon_exception_exclude_error_codes( exc, excluded_errors, delay, max_delay, max_attempts) def retry_upon_exception(exc, delay=0.5, max_delay=4, max_attempts=0): if not max_attempts: max_attempts = cfg.CONF.nsxv.retries return utils.retry_upon_exception(exc, delay, max_delay, max_attempts) class Vcns(object): def __init__(self, address, user, password, ca_file, insecure): self.address = address self.user = user self.password = password self.ca_file = ca_file self.insecure = insecure self.jsonapi_client = VcnsApiClient.VcnsApiHelper( address, user, password, format='json', ca_file=ca_file, insecure=insecure, timeout=cfg.CONF.nsxv.nsx_transaction_timeout) self.xmlapi_client = VcnsApiClient.VcnsApiHelper( address, user, password, format='xml', ca_file=ca_file, insecure=insecure, timeout=cfg.CONF.nsxv.nsx_transaction_timeout) self._nsx_version = None self._normalized_scoping_objects = None self._normalized_global_objects = None @retry_upon_exception(exceptions.ServiceConflict) def _client_request(self, client, method, uri, params, headers, encodeParams, timeout=None): return client(method, uri, params, headers, encodeParams, timeout=timeout) def do_request(self, method, uri, params=None, format='json', **kwargs): msg = ("VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')" % {'method': method, 'uri': uri, 'body': jsonutils.dumps(params)}) LOG.debug(strutils.mask_password(msg)) headers = kwargs.get('headers') encodeParams = kwargs.get('encode', True) if format == 'json': _client = self.jsonapi_client.request else: _client = self.xmlapi_client.request timeout = kwargs.get('timeout') ts = time.time() header, content = self._client_request(_client, method, uri, params, headers, encodeParams, timeout=timeout) te = time.time() elapsed_time = te - ts LOG.debug('VcnsApiHelper for %(method)s %(uri)s took %(seconds)2.4f. ' 'reply: header=%(header)s content=%(content)s', {'method': method, 'uri': uri, 'header': header, 'content': content, 'seconds': elapsed_time}) if elapsed_time > ELAPSED_TIME_THRESHOLD: LOG.warning('Vcns call for %(method)s %(uri)s took %(seconds)2.4f', {'method': method, 'uri': uri, 'seconds': elapsed_time}) if content == '': return header, {} if kwargs.get('decode', True): content = jsonutils.loads(content) return header, content def edges_lock_operation(self): uri = URI_PREFIX + "?lockUpdatesOnEdge=true" return self.do_request(HTTP_POST, uri, decode=False) @retry_upon_exception(exceptions.ResourceNotFound) @retry_upon_exception(exceptions.RequestBad) def deploy_edge(self, request): uri = URI_PREFIX return self.do_request(HTTP_POST, uri, request, decode=False, timeout=MAX_EDGE_DEPLOY_TIMEOUT) def update_edge(self, edge_id, request): uri = "%s/%s" % (URI_PREFIX, edge_id) return self.do_request(HTTP_PUT, uri, request, decode=False) def get_edge_id(self, job_id): uri = URI_PREFIX + "/jobs/%s" % job_id return self.do_request(HTTP_GET, uri, decode=True) def delete_edge(self, edge_id): uri = "%s/%s" % (URI_PREFIX, edge_id) return self.do_request(HTTP_DELETE, uri) def add_vdr_internal_interface(self, edge_id, interface): uri = "%s/%s/interfaces?action=patch" % (URI_PREFIX, edge_id) return self.do_request(HTTP_POST, uri, interface, decode=True) def get_vdr_internal_interface(self, edge_id, interface_index): uri = "%s/%s/interfaces/%s" % (URI_PREFIX, edge_id, interface_index) return self.do_request(HTTP_GET, uri, decode=True) def update_vdr_internal_interface(self, edge_id, interface_index, interface): uri = "%s/%s/interfaces/%s" % (URI_PREFIX, edge_id, interface_index) return self.do_request(HTTP_PUT, uri, interface, format='xml', decode=True) @retry_upon_exception(exceptions.RequestBad) def delete_vdr_internal_interface(self, edge_id, interface_index): uri = "%s/%s/interfaces/%d" % (URI_PREFIX, edge_id, interface_index) return self.do_request(HTTP_DELETE, uri, decode=True) def get_interfaces(self, edge_id): uri = "%s/%s/vnics" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) @retry_upon_exception(exceptions.ResourceTimedOut) @retry_upon_exception(exceptions.RequestBad) def update_interface(self, edge_id, vnic): uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic['index']) return self.do_request(HTTP_PUT, uri, vnic, decode=True) def delete_interface(self, edge_id, vnic_index): uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic_index) return self.do_request(HTTP_DELETE, uri, decode=True) def get_nat_config(self, edge_id): uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) def update_nat_config(self, edge_id, nat): uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id) return self.do_request(HTTP_PUT, uri, nat, decode=True) def delete_nat_rule(self, edge_id, rule_id): uri = "%s/%s/nat/config/rules/%s" % (URI_PREFIX, edge_id, rule_id) return self.do_request(HTTP_DELETE, uri, decode=True) def get_edge_status(self, edge_id): uri = "%s/%s/status?getlatest=false" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) def get_edge(self, edge_id): uri = "%s/%s" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) def _get_edges(self, startindex=0): uri = '%s?startIndex=%d' % (URI_PREFIX, startindex) return self.do_request(HTTP_GET, uri, decode=True) def get_edges(self): edges = [] h, d = self._get_edges() edges.extend(d['edgePage']['data']) paging_info = d['edgePage']['pagingInfo'] page_size = int(paging_info['pageSize']) count = int(paging_info['totalCount']) LOG.debug("There are total %s edges and page size is %s", count, page_size) pages = int(count / page_size + 1) for i in range(1, pages): start_index = page_size * i h, d = self._get_edges(start_index) edges.extend(d['edgePage']['data']) return edges def get_edge_syslog(self, edge_id): uri = "%s/%s/syslog/config" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) def update_edge_syslog(self, edge_id, config): uri = "%s/%s/syslog/config" % (URI_PREFIX, edge_id) return self.do_request(HTTP_PUT, uri, config) def delete_edge_syslog(self, edge_id): uri = "%s/%s/syslog/config" % (URI_PREFIX, edge_id) return self.do_request(HTTP_DELETE, uri) def update_edge_config_with_modifier(self, edge_id, module, modifier): uri = "%s/%s/%s/config" % (URI_PREFIX, edge_id, module) config = self.do_request(HTTP_GET, uri)[1] if modifier(config): return self.do_request(HTTP_PUT, uri, config) def get_edge_interfaces(self, edge_id): uri = "%s/%s/interfaces" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri, decode=True) def get_routes(self, edge_id): uri = "%s/%s/routing/config/static" % (URI_PREFIX, edge_id) return self.do_request(HTTP_GET, uri) def update_routes(self, edge_id, routes): uri = "%s/%s/routing/config/static" % (URI_PREFIX, edge_id) return self.do_request(HTTP_PUT, uri, routes) def create_lswitch(self, lsconfig): uri = "/api/ws.v1/lswitch" return self.do_request(HTTP_POST, uri, lsconfig, decode=True) def delete_lswitch(self, lswitch_id): uri = "/api/ws.v1/lswitch/%s" % lswitch_id return self.do_request(HTTP_DELETE, uri) def get_loadbalancer_config(self, edge_id): uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE) return self.do_request(HTTP_GET, uri, decode=True) def get_loadbalancer_statistics(self, edge_id): uri = self._build_uri_path(edge_id, LOADBALANCER_STATS) return self.do_request(HTTP_GET, uri, decode=True) def enable_service_loadbalancer(self, edge_id, config): uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE) return self.do_request(HTTP_PUT, uri, config) def sync_firewall(self): for cluster_id in cfg.CONF.nsxv.cluster_moid: uri = '/api/4.0/firewall/forceSync/%s' % cluster_id self.do_request(HTTP_POST, uri) def update_firewall(self, edge_id, fw_req): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE) return self.do_request(HTTP_PUT, uri, fw_req) def delete_firewall(self, edge_id): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, None) return self.do_request(HTTP_DELETE, uri) def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, FIREWALL_RULE_RESOURCE, vcns_rule_id) return self.do_request(HTTP_PUT, uri, fwr_req) def delete_firewall_rule(self, edge_id, vcns_rule_id): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, FIREWALL_RULE_RESOURCE, vcns_rule_id) return self.do_request(HTTP_DELETE, uri) def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, FIREWALL_RULE_RESOURCE) uri += "?aboveRuleId=" + ref_vcns_rule_id return self.do_request(HTTP_POST, uri, fwr_req) def add_firewall_rule(self, edge_id, fwr_req): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, FIREWALL_RULE_RESOURCE) return self.do_request(HTTP_POST, uri, fwr_req) def update_firewall_default_policy(self, edge_id, fw_req): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, 'defaultpolicy') return self.do_request(HTTP_PUT, uri, fw_req) def get_firewall(self, edge_id): uri = self._build_uri_path(edge_id, FIREWALL_SERVICE) return self.do_request(HTTP_GET, uri, decode=True) def get_firewall_rule(self, edge_id, vcns_rule_id): uri = self._build_uri_path( edge_id, FIREWALL_SERVICE, FIREWALL_RULE_RESOURCE, vcns_rule_id) return self.do_request(HTTP_GET, uri, decode=True) # #Edge LBAAS call helper # def create_vip(self, edge_id, vip_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, VIP_RESOURCE) return self.do_request(HTTP_POST, uri, vip_new) def get_vip(self, edge_id, vip_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, VIP_RESOURCE, vip_vseid) return self.do_request(HTTP_GET, uri, decode=True) def update_vip(self, edge_id, vip_vseid, vip_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, VIP_RESOURCE, vip_vseid) return self.do_request(HTTP_PUT, uri, vip_new) def delete_vip(self, edge_id, vip_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, VIP_RESOURCE, vip_vseid) return self.do_request(HTTP_DELETE, uri) def create_pool(self, edge_id, pool_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, POOL_RESOURCE) return self.do_request(HTTP_POST, uri, pool_new) def get_pool(self, edge_id, pool_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, POOL_RESOURCE, pool_vseid) return self.do_request(HTTP_GET, uri, decode=True) def update_pool(self, edge_id, pool_vseid, pool_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, POOL_RESOURCE, pool_vseid) return self.do_request(HTTP_PUT, uri, pool_new) def delete_pool(self, edge_id, pool_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, POOL_RESOURCE, pool_vseid) return self.do_request(HTTP_DELETE, uri) def create_health_monitor(self, edge_id, monitor_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, MONITOR_RESOURCE) return self.do_request(HTTP_POST, uri, monitor_new) def get_health_monitor(self, edge_id, monitor_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, MONITOR_RESOURCE, monitor_vseid) return self.do_request(HTTP_GET, uri, decode=True) def update_health_monitor(self, edge_id, monitor_vseid, monitor_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, MONITOR_RESOURCE, monitor_vseid) return self.do_request(HTTP_PUT, uri, monitor_new) def delete_health_monitor(self, edge_id, monitor_vseid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, MONITOR_RESOURCE, monitor_vseid) return self.do_request(HTTP_DELETE, uri) def create_app_profile(self, edge_id, app_profile): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_PROFILE_RESOURCE) return self.do_request(HTTP_POST, uri, app_profile) def update_app_profile(self, edge_id, app_profileid, app_profile): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_PROFILE_RESOURCE, app_profileid) return self.do_request(HTTP_PUT, uri, app_profile) def delete_app_profile(self, edge_id, app_profileid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_PROFILE_RESOURCE, app_profileid) return self.do_request(HTTP_DELETE, uri) def create_app_rule(self, edge_id, app_rule): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_RULE_RESOURCE) return self.do_request(HTTP_POST, uri, app_rule) def update_app_rule(self, edge_id, app_ruleid, app_rule): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_RULE_RESOURCE, app_ruleid) return self.do_request(HTTP_PUT, uri, app_rule) def delete_app_rule(self, edge_id, app_ruleid): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, APP_RULE_RESOURCE, app_ruleid) return self.do_request(HTTP_DELETE, uri) def update_ipsec_config(self, edge_id, ipsec_config): uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) return self.do_request(HTTP_PUT, uri, ipsec_config) def delete_ipsec_config(self, edge_id): uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) return self.do_request(HTTP_DELETE, uri) def get_ipsec_config(self, edge_id): uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE) return self.do_request(HTTP_GET, uri) @retry_upon_exception(exceptions.RequestBad) def create_virtual_wire(self, vdn_scope_id, request): """Creates a VXLAN virtual wire The method will return the virtual wire ID. """ uri = '/api/2.0/vdn/scopes/%s/virtualwires' % vdn_scope_id return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def delete_virtual_wire(self, virtualwire_id): """Deletes a virtual wire.""" uri = '/api/2.0/vdn/virtualwires/%s' % virtualwire_id return self.do_request(HTTP_DELETE, uri, format='xml') def create_port_group(self, dvs_id, request): """Creates a port group on a DVS The method will return the port group ID. """ uri = '/api/2.0/xvs/switches/%s/networks' % dvs_id return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def delete_port_group(self, dvs_id, portgroup_id): """Deletes a portgroup.""" uri = '/api/2.0/xvs/switches/%s/networks/%s' % (dvs_id, portgroup_id) return self.do_request(HTTP_DELETE, uri, format='xml', decode=False) def get_vdn_switch(self, dvs_id): uri = '/api/2.0/vdn/switches/%s' % dvs_id return self.do_request(HTTP_GET, uri, decode=True) def update_vdn_switch(self, switch): uri = '/api/2.0/vdn/switches' return self.do_request(HTTP_PUT, uri, switch) def query_interface(self, edge_id, vnic_index): uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic_index) return self.do_request(HTTP_GET, uri, decode=True) def reconfigure_dhcp_service(self, edge_id, request_config): """Reconfigure dhcp static bindings in the created Edge.""" uri = "/api/4.0/edges/%s/dhcp/config" % edge_id return self.do_request(HTTP_PUT, uri, request_config) def query_dhcp_configuration(self, edge_id): """Query DHCP configuration from the specific edge.""" uri = "/api/4.0/edges/%s/dhcp/config" % edge_id return self.do_request(HTTP_GET, uri) def create_dhcp_binding(self, edge_id, request_config): """Append one dhcp static binding on the edge.""" uri = self._build_uri_path(edge_id, DHCP_SERVICE, DHCP_BINDING_RESOURCE) return self.do_request(HTTP_POST, uri, request_config, decode=False) def delete_dhcp_binding(self, edge_id, binding_id): """Delete one dhcp static binding on the edge.""" uri = self._build_uri_path(edge_id, DHCP_SERVICE, DHCP_BINDING_RESOURCE, binding_id) return self.do_request(HTTP_DELETE, uri, decode=False) def get_dhcp_binding(self, edge_id, binding_id): """Get a dhcp static binding from the edge.""" uri = self._build_uri_path(edge_id, DHCP_SERVICE, DHCP_BINDING_RESOURCE, binding_id) return self.do_request(HTTP_GET, uri, decode=True) def create_security_group(self, request): """Creates a security group container in nsx. The method will return the security group ID. """ uri = '%s/globalroot-0' % (SECURITYGROUP_PREFIX) return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def delete_security_group(self, securitygroup_id): """Deletes a security group container.""" uri = '%s/%s?force=true' % (SECURITYGROUP_PREFIX, securitygroup_id) return self.do_request(HTTP_DELETE, uri, format='xml', decode=False) def update_security_group(self, sg_id, sg_name, description): """Updates the NSXv security group name.""" uri = '%s/%s' % (SECURITYGROUP_PREFIX, sg_id) h, c = self.do_request(HTTP_GET, uri, format='xml', decode=False) sg = et.fromstring(c) sg.find('name').text = sg_name sg.find('description').text = description return self.do_request(HTTP_PUT, uri, et.tostring(sg), format='xml', decode=False, encode=False) def list_security_groups(self): uri = '%s/scope/globalroot-0' % SECURITYGROUP_PREFIX return self.do_request(HTTP_GET, uri, format='xml', decode=False) def get_security_group_id(self, sg_name): """Returns NSXv security group id which match the given name.""" h, secgroups = self.list_security_groups() root = utils.normalize_xml(secgroups) for sg in root.iter('securitygroup'): if sg.find('name').text == sg_name: return sg.find('objectId').text @retry_upon_exception(exceptions.VcnsApiException) def create_bridge(self, edge_id, request): """Create a bridge.""" uri = self._build_uri_path(edge_id, BRIDGE) return self.do_request(HTTP_PUT, uri, request, format='xml', decode=False) @retry_upon_exception(exceptions.VcnsApiException) def delete_bridge(self, edge_id): """Delete a bridge.""" uri = self._build_uri_path(edge_id, BRIDGE) return self.do_request(HTTP_DELETE, uri, format='xml', decode=False) def create_redirect_section(self, request): """Creates a layer 3 redirect section in nsx rule table. The method will return the uri to newly created section. """ sec_type = FIREWALL_REDIRECT_SEC_TYPE uri = '%s/%s?autoSaveDraft=false' % (FIREWALL_PREFIX, sec_type) uri += '&operation=insert_before&anchorId=1002' return self.do_request(HTTP_POST, uri, request, format='xml', decode=False, encode=False) def create_section(self, type, request, insert_top=False, insert_before=None): """Creates a layer 3 or layer 2 section in nsx rule table. The method will return the uri to newly created section. """ if type == 'ip': sec_type = 'layer3sections' else: sec_type = 'layer2sections' uri = '%s/%s?autoSaveDraft=false' % (FIREWALL_PREFIX, sec_type) if insert_top: uri += '&operation=insert_top' # We want to place security-group sections before the default cluster # section, and we want to place the default cluster section before the # global default section. elif insert_before: uri += '&operation=insert_before&anchorId=%s' % insert_before else: uri += '&operation=insert_before&anchorId=1003' return self.do_request(HTTP_POST, uri, request, format='xml', decode=False, encode=False) def update_section(self, section_uri, request, h): """Replaces a section in nsx rule table.""" uri = '%s?autoSaveDraft=false' % section_uri headers = self._get_section_header(section_uri, h) return self.do_request(HTTP_PUT, uri, request, format='xml', decode=False, encode=False, headers=headers) def delete_section(self, section_uri): """Deletes a section in nsx rule table.""" uri = '%s?autoSaveDraft=false' % section_uri return self.do_request(HTTP_DELETE, uri, format='xml', decode=False) def get_section(self, section_uri): return self.do_request(HTTP_GET, section_uri, format='xml', decode=False) def get_default_l3_id(self): """Retrieve the id of the default l3 section.""" h, firewall_config = self.get_dfw_config() root = utils.normalize_xml(firewall_config) for child in root: if str(child.tag) == 'layer3Sections': sections = list(child.iter('section')) default = sections[-1] return default.attrib['id'] def get_dfw_config(self): uri = FIREWALL_PREFIX return self.do_request(HTTP_GET, uri, decode=False, format='xml') def update_dfw_config(self, request, h): uri = FIREWALL_PREFIX headers = self._get_section_header(None, h) return self.do_request(HTTP_PUT, uri, request, format='xml', decode=False, encode=False, headers=headers) def get_section_id(self, section_name): """Retrieve the id of a section from nsx.""" h, firewall_config = self.get_dfw_config() root = utils.normalize_xml(firewall_config) for sec in root.iter('section'): if sec.attrib['name'] == section_name: return sec.attrib['id'] def update_section_by_id(self, id, type, request): """Update a section while building its uri from the id.""" if type == 'ip': sec_type = 'layer3sections' else: sec_type = 'layer2sections' section_uri = '%s/%s/%s' % (FIREWALL_PREFIX, sec_type, id) self.update_section(section_uri, request, h=None) def _get_section_header(self, section_uri, h=None): if h is None: h, c = self.do_request(HTTP_GET, section_uri, format='xml', decode=False) etag = h['etag'] # remove extra "" from the etag etag = etag.replace('"', '') headers = {'If-Match': etag} return headers def remove_rule_from_section(self, section_uri, rule_id): """Deletes a rule from nsx section table.""" uri = '%s/rules/%s?autoSaveDraft=false' % (section_uri, rule_id) headers = self._get_section_header(section_uri) return self.do_request(HTTP_DELETE, uri, format='xml', headers=headers) def get_section_rules(self, section_uri): headers = self._get_section_header(section_uri) h, c = self.do_request(HTTP_GET, section_uri, headers=headers, decode=True) return c['rules'] @retry_upon_exception(exceptions.RequestBad) def add_member_to_security_group(self, security_group_id, member_id): """Adds a vnic member to nsx security group.""" uri = '%s/%s/members/%s?failIfExists=false' % ( SECURITYGROUP_PREFIX, security_group_id, member_id) return self.do_request(HTTP_PUT, uri, format='xml', decode=False) def remove_member_from_security_group(self, security_group_id, member_id): """Removes a vnic member from nsx security group.""" uri = '%s/%s/members/%s?failIfAbsent=false' % ( SECURITYGROUP_PREFIX, security_group_id, member_id) return self.do_request(HTTP_DELETE, uri, format='xml', decode=False) def set_system_control(self, edge_id, prop): uri = self._build_uri_path(edge_id, SYSCTL_SERVICE) payload = { 'featureType': 'systemcontrol', 'property': prop } return self.do_request(HTTP_PUT, uri, payload, decode=True) def _get_enforcement_point_body(self, enforcement_points): e_point_list = [] for enforcement_point in enforcement_points: e_point_list.append({ 'enforcementPoint': { 'id': enforcement_point, 'type': enforcement_point.split('-')[0] } }) return {'__enforcementPoints': e_point_list} @retry_upon_exception_exclude_error_codes( exceptions.RequestBad, [constants.NSX_ERROR_ALREADY_HAS_SG_POLICY]) def create_spoofguard_policy(self, enforcement_points, name, enable): uri = '%s/policies/' % SPOOFGUARD_PREFIX body = {'spoofguardPolicy': {'name': name, 'operationMode': 'MANUAL' if enable else 'DISABLE', 'allowLocalIPs': 'true'}} body['spoofguardPolicy'].update( self._get_enforcement_point_body(enforcement_points)) return self.do_request(HTTP_POST, uri, body, format='xml', encode=True, decode=False) @retry_upon_exception(exceptions.RequestBad) def update_spoofguard_policy(self, policy_id, enforcement_points, name, enable): update_uri = '%s/policies/%s' % (SPOOFGUARD_PREFIX, policy_id) publish_uri = '%s/%s?action=publish' % (SPOOFGUARD_PREFIX, policy_id) body = {'spoofguardPolicy': {'policyId': policy_id, 'name': name, 'operationMode': 'MANUAL' if enable else 'DISABLE', 'allowLocalIPs': 'true'}} body['spoofguardPolicy'].update( self._get_enforcement_point_body(enforcement_points)) self.do_request(HTTP_PUT, update_uri, body, format='xml', encode=True, decode=False) return self.do_request(HTTP_POST, publish_uri, decode=False) @retry_upon_exception(exceptions.RequestBad) def delete_spoofguard_policy(self, policy_id): uri = '%s/policies/%s' % (SPOOFGUARD_PREFIX, policy_id) return self.do_request(HTTP_DELETE, uri, decode=False) def get_spoofguard_policy(self, policy_id): uri = '%s/policies/%s' % (SPOOFGUARD_PREFIX, policy_id) return self.do_request(HTTP_GET, uri, decode=True) def get_spoofguard_policy_data(self, policy_id, list_type='ALL'): uri = '%s/%s?list=%s' % (SPOOFGUARD_PREFIX, policy_id, list_type) return self.do_request(HTTP_GET, uri, decode=True) def get_spoofguard_policies(self): uri = '%s/policies/' % SPOOFGUARD_PREFIX return self.do_request(HTTP_GET, uri, decode=True) def _approve_assigned_addresses(self, policy_id, vnic_id, mac_addr, addresses): uri = '%s/%s' % (SPOOFGUARD_PREFIX, policy_id) addresses = [{'ipAddress': ip_addr} for ip_addr in addresses] body = {'spoofguardList': {'spoofguard': {'id': vnic_id, 'vnicUuid': vnic_id, 'approvedIpAddress': addresses, 'approvedMacAddress': mac_addr, 'publishedIpAddress': addresses, 'publishedMacAddress': mac_addr}}} try: return self.do_request(HTTP_POST, '%s?action=approve' % uri, body, format='xml', decode=False) except exceptions.VcnsApiException as e: nsx_errcode = self.xmlapi_client._get_nsx_errorcode(e.response) if nsx_errcode == constants.NSX_ERROR_ALREADY_EXISTS: LOG.warning("Spoofguard entry for %s already exists", vnic_id) raise exceptions.AlreadyExists(resource=vnic_id) # raise original exception for retries raise @retry_upon_exception(exceptions.RequestBad) def approve_assigned_addresses(self, policy_id, vnic_id, mac_addr, addresses): return self._approve_assigned_addresses( policy_id, vnic_id, mac_addr, addresses) @retry_upon_exception(exceptions.VcnsApiException) def publish_assigned_addresses(self, policy_id, vnic_id): uri = '%s/%s' % (SPOOFGUARD_PREFIX, policy_id) publish_vnic_uri = '%s?action=publish&vnicId=%s' % (uri, vnic_id) return self.do_request(HTTP_POST, publish_vnic_uri, decode=False) def inactivate_vnic_assigned_addresses(self, policy_id, vnic_id): try: self._approve_assigned_addresses(policy_id, vnic_id, '', []) except exceptions.RequestBad: LOG.debug("Request failed: inactivate vnic %s assigned addresses", vnic_id) else: return self.publish_assigned_addresses(policy_id, vnic_id) def _build_uri_path(self, edge_id, service, resource=None, resource_id=None, parent_resource_id=None, fields=None, relations=None, filters=None, types=None, is_attachment=False, is_async=False): uri_prefix = "%s/%s/%s" % (URI_PREFIX, edge_id, service) if resource: res_path = resource + (resource_id and "/%s" % resource_id or '') uri_path = "%s/%s" % (uri_prefix, res_path) else: uri_path = uri_prefix if is_async: return (uri_path + "?async=true") else: return uri_path def add_vm_to_exclude_list(self, vm_id): uri = '%s/%s' % (EXCLUDELIST_PREFIX, vm_id) return self.do_request(HTTP_PUT, uri) def delete_vm_from_exclude_list(self, vm_id): uri = '%s/%s' % (EXCLUDELIST_PREFIX, vm_id) return self.do_request(HTTP_DELETE, uri) def get_scoping_objects(self): uri = '%s/usermgmt/scopingobjects' % SERVICES_PREFIX h, scoping_objects = self.do_request(HTTP_GET, uri, decode=False, format='xml') return scoping_objects def _scopingobjects_lookup(self, type_names, object_id, name=None, use_cache=False): """Look for a specific object in the NSX scoping objects.""" # used cached scoping objects during plugin init since it is # a big structure to retrieve and parse each time. if use_cache and self._normalized_scoping_objects is not None: # Use the cached data root = self._normalized_scoping_objects else: # Not using cache, or we do want to use it, # but it was not saved yet: # So get the data from the NSX and parse it so_list = self.get_scoping_objects() root = utils.normalize_xml(so_list) # Save it for possible usage next time (even if not using cache) self._normalized_scoping_objects = root for obj in root.iter('object'): if (obj.find('objectTypeName').text in type_names and obj.find('objectId').text == object_id and (name is None or obj.find('name').text == name)): return True return False def validate_datacenter_moid(self, object_id, during_init=False): return self._scopingobjects_lookup(['Datacenter'], object_id, use_cache=during_init) def validate_network(self, object_id, during_init=False): return self._scopingobjects_lookup(NETWORK_TYPES, object_id, use_cache=during_init) def validate_network_name(self, object_id, name, during_init=False): return self._scopingobjects_lookup(NETWORK_TYPES, object_id, name=name, use_cache=during_init) def validate_vdn_scope(self, object_id): uri = '%s/scopes' % VDN_PREFIX h, scope_list = self.do_request(HTTP_GET, uri, decode=False, format='xml') root = utils.normalize_xml(scope_list) for obj_id in root.iter('objectId'): if obj_id.text == object_id: return True return False def get_dvs_list(self): uri = '%s/switches' % VDN_PREFIX h, dvs_list = self.do_request(HTTP_GET, uri, decode=False, format='xml') root = utils.normalize_xml(dvs_list) dvs_list = [] for obj_id in root.iter('objectId'): if obj_id.text: dvs_list.append(obj_id.text) return dvs_list def validate_dvs(self, object_id, dvs_list=None): if not dvs_list: dvs_list = self.get_dvs_list() for dvs in dvs_list: if dvs == object_id: return True return False def validate_inventory(self, object_id): uri = '%s/inventory/%s/basicinfo' % (SERVICES_PREFIX, object_id) try: h, c = self.do_request(HTTP_GET, uri, decode=False) except exceptions.ResourceNotFound: return False return True def get_inventory_name(self, object_id): uri = '%s/inventory/%s/basicinfo' % (SERVICES_PREFIX, object_id) h, c = self.do_request(HTTP_GET, uri, decode=True) return c['name'] def _get_version(self): uri = '/api/2.0/services/vsmconfig' h, c = self.do_request(HTTP_GET, uri, decode=True) version = c['version'] LOG.debug("NSX Version: %s", version) return version def get_version(self): if self._nsx_version is None: try: self._nsx_version = self._get_version() except Exception as e: # Versions prior to 6.2.0 do not support the above API LOG.error("Unable to get NSX version. Exception: %s", e) # Minimum supported version is 6.1 self._nsx_version = '6.1' return self._nsx_version def get_tuning_configuration(self): uri = '/api/4.0/edgePublish/tuningConfiguration' h, c = self.do_request(HTTP_GET, uri, decode=True) return c def configure_aggregate_publishing(self): uri = "/api/4.0/edgePublish/tuningConfiguration" # Ensure that configured values are not changed config = self.get_tuning_configuration() LOG.debug("Tuning configuration: %s", config) tuning = et.Element('tuningConfiguration') for opt, val in six.iteritems(config): child = et.Element(opt) if opt == 'aggregatePublishing': child.text = 'true' else: child.text = str(val) tuning.append(child) return self.do_request(HTTP_PUT, uri, et.tostring(tuning), format='xml', decode=True) def configure_reservations(self): uri = "/api/4.0/edgePublish/tuningConfiguration" config = self.get_tuning_configuration() tuning = et.Element('tuningConfiguration') for opt, val in six.iteritems(config): child = et.Element(opt) if (opt == 'edgeVCpuReservationPercentage' or opt == 'edgeMemoryReservationPercentage'): child.text = '0' elif opt == 'megaHertzPerVCpu': child.text = '1500' else: child.text = str(val) tuning.append(child) return self.do_request(HTTP_PUT, uri, et.tostring(tuning), format='xml', decode=True) def enable_ha(self, edge_id, request_config): """Enable HA in the given edge.""" uri = "/api/4.0/edges/%s/highavailability/config" % edge_id return self.do_request(HTTP_PUT, uri, request_config) def change_edge_appliance_size(self, edge_id, size): """Change the size of edge appliances.""" uri = ("/api/4.0/edges/%s/appliances/?size=%s" % (edge_id, size)) return self.do_request(HTTP_POST, uri) def change_edge_appliance(self, edge_id, request): uri = "/api/4.0/edges/%s/appliances" % edge_id return self.do_request(HTTP_PUT, uri, request) def get_edge_appliances(self, edge_id): uri = "/api/4.0/edges/%s/appliances" % edge_id return self.do_request(HTTP_GET, uri) def upload_edge_certificate(self, edge_id, request): """Creates a certificate on the specified Edge appliance.""" uri = '%s/%s/%s' % (TRUSTSTORE_PREFIX, CERTIFICATE, edge_id) return self.do_request(HTTP_POST, uri, request, decode=True) def create_csr(self, edge_id, request=nsxv_constants.CSR_REQUEST): """Create a CSR on the specified Edge appliance.""" uri = '%s/%s/%s' % (TRUSTSTORE_PREFIX, CSR, edge_id) return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def create_csr_cert(self, csr_id): """Create a CSR self signed cert on the specified Edge appliance.""" uri = '%s/%s/%s?noOfDays=%s' % (TRUSTSTORE_PREFIX, CSR, csr_id, nsxv_constants.CERT_NUMBER_OF_DAYS) return self.do_request(HTTP_PUT, uri) def get_service_insertion_profile(self, profile_id): profiles_uri = '%s/%s' % (SERVICE_INSERTION_PROFILE_PREFIX, profile_id) return self.do_request(HTTP_GET, profiles_uri, format='xml', decode=False) def update_service_insertion_profile_binding(self, profile_id, request): profiles_uri = '%s/%s/%s' % (SERVICE_INSERTION_PROFILE_PREFIX, profile_id, 'binding') return self.do_request(HTTP_POST, profiles_uri, request, format='xml', decode=False) def create_ipam_ip_pool(self, request): uri = '%s/%s/%s' % (SERVICES_PREFIX, IPAM_POOL_SERVICE, IPAM_POOL_SCOPE) return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def delete_ipam_ip_pool(self, pool_id): uri = '%s/%s/%s' % (SERVICES_PREFIX, IPAM_POOL_SERVICE, pool_id) return self.do_request(HTTP_DELETE, uri) def get_ipam_ip_pool(self, pool_id): uri = '%s/%s/%s' % (SERVICES_PREFIX, IPAM_POOL_SERVICE, pool_id) return self.do_request(HTTP_GET, uri, decode=True) def allocate_ipam_ip_from_pool(self, pool_id, ip_addr=None): uri = '%s/%s/%s/%s' % (SERVICES_PREFIX, IPAM_POOL_SERVICE, pool_id, 'ipaddresses') if ip_addr: request = {'ipAddressRequest': {'allocationMode': 'RESERVE', 'ipAddress': ip_addr}} else: request = {'ipAddressRequest': {'allocationMode': 'ALLOCATE'}} return self.do_request(HTTP_POST, uri, request, format='xml', decode=False) def release_ipam_ip_to_pool(self, pool_id, ip_addr): uri = '%s/%s/%s/%s/%s' % (SERVICES_PREFIX, IPAM_POOL_SERVICE, pool_id, 'ipaddresses', ip_addr) return self.do_request(HTTP_DELETE, uri) def get_security_policy(self, policy_id, return_xml=True): # get the policy configuration as an xml string / dictionary uri = '%s/%s' % (SECURITY_POLICY_PREFIX, policy_id) if return_xml: format = 'xml' decode = False else: format = 'json' decode = True h, policy = self.do_request(HTTP_GET, uri, format=format, decode=decode) return policy def update_security_policy(self, policy_id, request): # update the policy configuration. request should be an xml string uri = '%s/%s' % (SECURITY_POLICY_PREFIX, policy_id) return self.do_request(HTTP_PUT, uri, request, format='xml', decode=False, encode=True) def get_security_policies(self): # get the policies configuration dictionary uri = '%s/all' % (SECURITY_POLICY_PREFIX) h, policies = self.do_request(HTTP_GET, uri, decode=True) return policies def list_applications(self): uri = '%s/scope/globalroot-0' % APPLICATION_PREFIX h, apps = self.do_request(HTTP_GET, uri, decode=True) return apps def update_edge_routing_config(self, edge_id, request_config): uri = self._build_uri_path(edge_id, ROUTING_CONFIG) return self.do_request(HTTP_PUT, uri, VcnsApiClient.xmldumps(request_config), format='xml') def get_edge_routing_config(self, edge_id): uri = self._build_uri_path(edge_id, ROUTING_CONFIG) return self.do_request(HTTP_GET, uri) def update_bgp_dynamic_routing(self, edge_id, bgp_request): uri = self._build_uri_path(edge_id, BGP_ROUTING_CONFIG) return self.do_request(HTTP_PUT, uri, VcnsApiClient.xmldumps(bgp_request), format='xml') def get_bgp_routing_config(self, edge_id): uri = self._build_uri_path(edge_id, BGP_ROUTING_CONFIG) return self.do_request(HTTP_GET, uri) def delete_bgp_routing_config(self, edge_id): uri = self._build_uri_path(edge_id, BGP_ROUTING_CONFIG) return self.do_request(HTTP_DELETE, uri) def get_global_objects(self): uri = '%s/application/scope/globalroot-0' % SERVICES_PREFIX h, scoping_objects = self.do_request(HTTP_GET, uri, decode=False, format='xml') return scoping_objects def _globalobjects_lookup(self, name, use_cache=False): """Return objectId a specific name in the NSX global objects.""" # used cached scoping objects during plugin init since it is # a big structure to retrieve and parse each time. if use_cache and self._normalized_global_objects is not None: # Use the cached data root = self._normalized_global_objects else: # Not using cache, or we do want to use it, # but it was not saved yet: # So get the data from the NSX and parse it so_list = self.get_global_objects() root = utils.normalize_xml(so_list) # Save it for possible usage next time (even if not using cache) self._normalized_global_objects = root for obj in root.iter('application'): if obj.find('name').text == name: return obj.find('objectId').text def get_application_id(self, name): return self._globalobjects_lookup(name, use_cache=True) def get_tz_connectivity_info(self, vdn_scope_id): uri = '%s/%s' % (TZ_CONNECTIVITY_PREFIX, vdn_scope_id) h, info = self.do_request(HTTP_GET, uri, decode=True) return info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v/vshield/vcns_driver.py0000644000175000017500000000437500000000000026553 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.plugins.nsx_v.vshield import edge_appliance_driver from vmware_nsx.plugins.nsx_v.vshield import edge_dynamic_routing_driver from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks from vmware_nsx.plugins.nsx_v.vshield import vcns LOG = logging.getLogger(__name__) class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver, edge_firewall_driver.EdgeFirewallDriver, edge_dynamic_routing_driver.EdgeDynamicRoutingDriver): def __init__(self, callbacks): super(VcnsDriver, self).__init__() self.callbacks = callbacks self.vcns_uri = cfg.CONF.nsxv.manager_uri self.vcns_user = cfg.CONF.nsxv.user self.vcns_passwd = cfg.CONF.nsxv.password self.ca_file = cfg.CONF.nsxv.ca_file self.insecure = cfg.CONF.nsxv.insecure self.deployment_container_id = cfg.CONF.nsxv.deployment_container_id self._pid = None self._task_manager = None self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd, self.ca_file, self.insecure) @property def task_manager(self): if (self._task_manager is None or self._pid != os.getpid()): LOG.debug("Creating task manager") self._pid = os.getpid() interval = cfg.CONF.nsxv.task_status_check_interval self._task_manager = tasks.TaskManager(interval) LOG.debug("Starting task manager") self._task_manager.start() return self._task_manager ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.206254 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/0000755000175000017500000000000000000000000022271 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/__init__.py0000644000175000017500000000000000000000000024370 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.206254 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/api_replay/0000755000175000017500000000000000000000000024416 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/api_replay/__init__.py0000644000175000017500000000000000000000000026515 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/availability_zones.py0000644000175000017500000001777400000000000026553 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from vmware_nsx.common import availability_zones as common_az from vmware_nsx.common import config from vmware_nsx.plugins.common_v3 import availability_zones as v3_az from vmware_nsxlib.v3 import core_resources DEFAULT_NAME = common_az.DEFAULT_NAME + 'v3' class NsxV3AvailabilityZone(v3_az.NsxV3AvailabilityZone): def get_az_opts(self): return config.get_nsxv3_az_opts(self.name) def _has_native_dhcp_metadata(self): return cfg.CONF.nsx_v3.native_dhcp_metadata def init_from_config_section(self, az_name): super(NsxV3AvailabilityZone, self).init_from_config_section(az_name) az_info = self.get_az_opts() switching_profiles = az_info.get('switching_profiles') if switching_profiles: self.switching_profiles = switching_profiles dhcp_relay_service = az_info.get('dhcp_relay_service') if dhcp_relay_service: self.dhcp_relay_service = dhcp_relay_service def init_defaults(self): # use the default configuration self.metadata_proxy = cfg.CONF.nsx_v3.metadata_proxy self.dhcp_profile = cfg.CONF.nsx_v3.dhcp_profile self.native_metadata_route = cfg.CONF.nsx_v3.native_metadata_route self.dns_domain = cfg.CONF.nsx_v3.dns_domain self.nameservers = cfg.CONF.nsx_v3.nameservers self.default_overlay_tz = cfg.CONF.nsx_v3.default_overlay_tz self.default_vlan_tz = cfg.CONF.nsx_v3.default_vlan_tz self.switching_profiles = cfg.CONF.nsx_v3.switching_profiles self.dhcp_relay_service = cfg.CONF.nsx_v3.dhcp_relay_service self.default_tier0_router = cfg.CONF.nsx_v3.default_tier0_router self.edge_cluster = cfg.CONF.nsx_v3.edge_cluster def translate_configured_names_to_uuids(self, nsxlib, search_scope=None): # Mandatory configurations (in AZ or inherited from global values) # Unless this is the default AZ, and metadata is disabled. if self.edge_cluster: edge_cluster_uuid = None if cfg.CONF.nsx_v3.init_objects_by_tags: # Find the edge cluster by its tag edge_cluster_uuid = nsxlib.get_id_by_resource_and_tag( nsxlib.edge_cluster.resource_type, cfg.CONF.nsx_v3.search_objects_scope, self.edge_cluster) if not edge_cluster_uuid: edge_cluster_uuid = (nsxlib.edge_cluster .get_id_by_name_or_id(self.edge_cluster)) self._edge_cluster_uuid = edge_cluster_uuid else: self._edge_cluster_uuid = None if self.default_overlay_tz: tz_id = None if search_scope: # Find the TZ by its tag resource_type = (nsxlib.transport_zone.resource_type + ' AND transport_type:OVERLAY') tz_id = nsxlib.get_id_by_resource_and_tag( resource_type, search_scope, self.default_overlay_tz) if not tz_id: # Find the TZ by its name or id tz_id = nsxlib.transport_zone.get_id_by_name_or_id( self.default_overlay_tz) self._default_overlay_tz_uuid = tz_id else: self._default_overlay_tz_uuid = None self._translate_dhcp_profile(nsxlib, search_scope=search_scope) self._translate_metadata_proxy(nsxlib, search_scope=search_scope) # Optional configurations (may be None) if self.default_vlan_tz: tz_id = None if search_scope: # Find the TZ by its tag resource_type = (nsxlib.transport_zone.resource_type + ' AND transport_type:VLAN') tz_id = nsxlib.get_id_by_resource_and_tag( resource_type, search_scope, self.default_vlan_tz) if not tz_id: # Find the TZ by its name or id tz_id = nsxlib.transport_zone.get_id_by_name_or_id( self.default_vlan_tz) self._default_vlan_tz_uuid = tz_id else: self._default_vlan_tz_uuid = None # switching profiles are already uuids, but we need to translate # those to objects profiles = [] if self.switching_profiles: for profile in self.switching_profiles: nsx_profile = nsxlib.switching_profile.get(profile) # TODO(asarfaty): skip or alert on unsupported types profiles.append(core_resources.SwitchingProfileTypeId( nsx_profile.get('resource_type'), nsx_profile.get('id'))) self.switching_profiles_objs = profiles if self.dhcp_relay_service: relay_id = None if search_scope: # Find the relay service by its tag relay_id = nsxlib.get_id_by_resource_and_tag( nsxlib.relay_service.resource_type, search_scope, self.dhcp_relay_service) if not relay_id: # Find the service by its name or id relay_id = nsxlib.relay_service.get_id_by_name_or_id( self.dhcp_relay_service) self.dhcp_relay_service = relay_id # if there is a relay service - also find the server ips if self.dhcp_relay_service: self.dhcp_relay_servers = nsxlib.relay_service.get_server_ips( self.dhcp_relay_service) else: self.dhcp_relay_service = None self.dhcp_relay_servers = None if self.default_tier0_router: rtr_id = None if search_scope: # Find the router by its tag resource_type = (nsxlib.logical_router.resource_type + ' AND router_type:TIER0') rtr_id = nsxlib.get_id_by_resource_and_tag( resource_type, search_scope, self.default_tier0_router) if not rtr_id: # find the router by name or id rtr_id = nsxlib.logical_router.get_id_by_name_or_id( self.default_tier0_router) self._default_tier0_router = rtr_id else: self._default_tier0_router = None class NsxV3AvailabilityZones(common_az.ConfiguredAvailabilityZones): default_name = DEFAULT_NAME def __init__(self, use_tvd_config=False): if use_tvd_config: default_azs = cfg.CONF.nsx_tvd.nsx_v3_default_availability_zones else: default_azs = cfg.CONF.default_availability_zones super(NsxV3AvailabilityZones, self).__init__( cfg.CONF.nsx_v3.availability_zones, NsxV3AvailabilityZone, default_availability_zones=default_azs) self.non_default_dns_domain = self.dns_domain_configured_non_default() def dhcp_relay_configured(self): for az in self.availability_zones.values(): if az.dhcp_relay_service: return True return False def dns_domain_configured_non_default(self): for az in self.availability_zones.values(): if az.dns_domain and az.dns_domain != cfg.CONF.nsx_v3.dns_domain: return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/cert_utils.py0000644000175000017500000000716000000000000025024 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import hashlib from cryptography import fernet from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.db import db as nsx_db LOG = logging.getLogger(__name__) NSX_OPENSTACK_IDENTITY = "com.vmware.nsx.openstack" # 32-byte base64-encoded secret for symmetric password encryption # generated on init based on password provided in configuration _SECRET = None def reset_secret(): global _SECRET _SECRET = None def generate_secret_from_password(password): m = hashlib.md5() m.update(password.encode('ascii')) return base64.b64encode(m.hexdigest().encode('ascii')) def symmetric_encrypt(secret, plaintext): if not isinstance(plaintext, bytes): plaintext = plaintext.encode('ascii') return fernet.Fernet(secret).encrypt(plaintext).decode('ascii') def symmetric_decrypt(secret, ciphertext): if not isinstance(ciphertext, bytes): ciphertext = ciphertext.encode('ascii') return fernet.Fernet(secret).decrypt(ciphertext).decode('ascii') class DbCertificateStorageDriver(object): """Storage for certificate and private key in neutron DB""" def __init__(self, context): global _SECRET self._context = context if cfg.CONF.nsx_v3.nsx_client_cert_pk_password and not _SECRET: _SECRET = generate_secret_from_password( cfg.CONF.nsx_v3.nsx_client_cert_pk_password) def store_cert(self, purpose, certificate, private_key): # encrypt private key if _SECRET: private_key = symmetric_encrypt(_SECRET, private_key) nsx_db.save_certificate(self._context.session, purpose, certificate, private_key) def get_cert(self, purpose): cert, private_key = nsx_db.get_certificate(self._context.session, purpose) if _SECRET and private_key: try: # Encrypted PK is stored in DB as string, while fernet expects # bytearray. private_key = symmetric_decrypt(_SECRET, private_key) except fernet.InvalidToken: # unable to decrypt - probably due to change of password # cert and PK are useless, need to delete them LOG.error("Unable to decrypt private key, possibly due " "to change of password. Certificate needs to be " "regenerated") self.delete_cert(purpose) return None, None return cert, private_key def delete_cert(self, purpose): return nsx_db.delete_certificate(self._context.session, purpose) class DummyCertificateStorageDriver(object): """Dummy driver API implementation Used for external certificate import scenario (nsx_client_cert_storage == None) """ def store_cert(self, purpose, certificate, private_key): pass def get_cert(self, purpose): return None, None def delete_cert(self, purpose): pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.206254 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/housekeeper/0000755000175000017500000000000000000000000024610 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/housekeeper/__init__.py0000644000175000017500000000000000000000000026707 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/housekeeper/mismatch_logical_port.py0000644000175000017500000000703500000000000031532 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v3 import utils as v3_utils LOG = log.getLogger(__name__) class MismatchLogicalportJob(base_job.BaseJob): def __init__(self, global_readonly, readonly_jobs): super(MismatchLogicalportJob, self).__init__( global_readonly, readonly_jobs) def get_project_plugin(self, plugin): return plugin.get_plugin_by_type(projectpluginmap.NsxPlugins.NSX_T) def get_name(self): return 'mismatch_logical_port' def get_description(self): return 'Detect mismatched configuration on NSX logical ports' def run(self, context, readonly=False): super(MismatchLogicalportJob, self).run(context) # get all orphaned DHCP servers mismatch_ports = v3_utils.get_mismatch_logical_ports( context, self.plugin.nsxlib, self.plugin) info = "" if not mismatch_ports: msg = 'No mismatched logical ports detected.' info = base_job.housekeeper_info(info, msg) return {'error_count': 0, 'fixed_count': 0, 'error_info': info} msg = ("Found %(len)s mismatched logical port%(plural)s:" % {'len': len(mismatch_ports), 'plural': 's' if len(mismatch_ports) > 1 else ''}) info = base_job.housekeeper_warning(info, msg) fixed_count = 0 for port_problem in mismatch_ports: msg = ("Logical port %(nsx_id)s " "[neutron id: %(id)s] error: %(err)s" % {'nsx_id': port_problem['nsx_id'], 'id': port_problem['neutron_id'], 'err': port_problem['error']}) if not readonly: # currently we mitigate only address bindings mismatches err_type = port_problem['error_type'] if err_type == v3_utils.PORT_ERROR_TYPE_BINDINGS: # Create missing address bindings on backend port = port_problem['port'] try: address_bindings = self.plugin._build_address_bindings( port) self.plugin.nsxlib.logical_port.update( port_problem['nsx_id'], port_problem['neutron_id'], address_bindings=address_bindings) except Exception as e: msg = "%s failed to be fixed: %s" % (msg, e) else: fixed_count = fixed_count + 1 msg = "%s was fixed." % msg else: msg = "%s cannot be fixed automatically." % msg info = base_job.housekeeper_warning(info, msg) return {'error_count': len(mismatch_ports), 'error_info': info, 'fixed_count': fixed_count} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/housekeeper/orphaned_dhcp_server.py0000644000175000017500000000567100000000000031357 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v3 import utils as v3_utils LOG = log.getLogger(__name__) class OrphanedDhcpServerJob(base_job.BaseJob): def __init__(self, global_readonly, readonly_jobs): super(OrphanedDhcpServerJob, self).__init__( global_readonly, readonly_jobs) def get_project_plugin(self, plugin): return plugin.get_plugin_by_type(projectpluginmap.NsxPlugins.NSX_T) def get_name(self): return 'orphaned_dhcp_server' def get_description(self): return 'Detect orphaned DHCP server' def run(self, context, readonly=False): super(OrphanedDhcpServerJob, self).run(context) # get all orphaned DHCP servers orphaned_servers = v3_utils.get_orphaned_dhcp_servers( context, self.plugin, self.plugin.nsxlib) info = "" if not orphaned_servers: msg = 'No orphaned DHCP servers detected.' info = base_job.housekeeper_info(info, msg) return {'error_count': 0, 'fixed_count': 0, 'error_info': msg} msg = ("Found %(len)s orphaned DHCP server%(plural)s:" % {'len': len(orphaned_servers), 'plural': 's' if len(orphaned_servers) > 1 else ''}) info = base_job.housekeeper_warning(info, msg) fixed_count = 0 for server in orphaned_servers: msg = ("DHCP server %(name)s [id: %(id)s] " "(neutron network: %(net)s)" % {'name': server['display_name'], 'id': server['id'], 'net': server['neutron_net_id'] if server.get('neutron_net_id') else 'Unknown'}) if not readonly: success, error = v3_utils.delete_orphaned_dhcp_server( context, self.plugin.nsxlib, server) if success: msg = "%s was removed." % msg fixed_count = fixed_count + 1 else: msg = "%s failed to be removed: %s." % (msg, error) info = base_job.housekeeper_warning(info, msg) return {'error_count': len(orphaned_servers), 'error_info': info, 'fixed_count': fixed_count} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/housekeeper/orphaned_firewall_section.py0000644000175000017500000000572200000000000032401 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v3 import utils as v3_utils LOG = log.getLogger(__name__) class OrphanedFirewallSectionJob(base_job.BaseJob): def __init__(self, global_readonly, readonly_jobs): super(OrphanedFirewallSectionJob, self).__init__( global_readonly, readonly_jobs) def get_project_plugin(self, plugin): return plugin.get_plugin_by_type(projectpluginmap.NsxPlugins.NSX_T) def get_name(self): return 'orphaned_firewall_section' def get_description(self): return 'Detect orphaned firewall sections' def run(self, context, readonly=False): super(OrphanedFirewallSectionJob, self).run(context) # get all orphaned firewall sections orphaned_sections = v3_utils.get_orphaned_firewall_sections( context, self.plugin.nsxlib) info = "" if not orphaned_sections: msg = 'No orphaned firewall sections detected.' info = base_job.housekeeper_info(info, msg) return {'error_count': 0, 'fixed_count': 0, 'error_info': info} msg = ("Found %(len)s orphaned firewall section%(plural)s:" % {'len': len(orphaned_sections), 'plural': 's' if len(orphaned_sections) > 1 else ''}) info = base_job.housekeeper_warning(info, msg) fixed_count = 0 for section in orphaned_sections: msg = ("Firewall section %(name)s [id: %(id)s] " "neutron security group: %(sg)s" % {'name': section['display_name'], 'id': section['id'], 'sg': section['neutron_sg_id'] if section['neutron_sg_id'] else 'Unknown'}) if not readonly: try: self.plugin.nsxlib.firewall_section.delete(section['id']) except Exception as e: msg = "%s failed to be removed: %s." % (msg, e) else: fixed_count = fixed_count + 1 msg = "%s was removed." % msg info = base_job.housekeeper_warning(info, msg) return {'error_count': len(orphaned_sections), 'error_info': info, 'fixed_count': fixed_count} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/housekeeper/orphaned_logical_router.py0000644000175000017500000000567200000000000032066 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v3 import utils as v3_utils LOG = log.getLogger(__name__) class OrphanedLogicalRouterJob(base_job.BaseJob): def __init__(self, global_readonly, readonly_jobs): super(OrphanedLogicalRouterJob, self).__init__( global_readonly, readonly_jobs) def get_project_plugin(self, plugin): return plugin.get_plugin_by_type(projectpluginmap.NsxPlugins.NSX_T) def get_name(self): return 'orphaned_logical_router' def get_description(self): return 'Detect orphaned logical routers' def run(self, context, readonly=False): super(OrphanedLogicalRouterJob, self).run(context) # get all orphaned DHCP servers orphaned_routers = v3_utils.get_orphaned_routers( context, self.plugin.nsxlib) info = "" if not orphaned_routers: msg = 'No orphaned logical routers detected.' info = base_job.housekeeper_info(info, msg) return {'error_count': 0, 'fixed_count': 0, 'error_info': info} msg = ("Found %(len)s orphaned logical router%(plural)s:" % {'len': len(orphaned_routers), 'plural': 's' if len(orphaned_routers) > 1 else ''}) info = base_job.housekeeper_warning(info, msg) fixed_count = 0 for router in orphaned_routers: msg = ("Logical router %(name)s [id: %(id)s] " "(neutron router: %(rtr)s)" % {'name': router['display_name'], 'id': router['id'], 'rtr': router['neutron_router_id'] if router['neutron_router_id'] else 'Unknown'}) if not readonly: success, error = v3_utils.delete_orphaned_router( self.plugin.nsxlib, router['id']) if success: fixed_count = fixed_count + 1 msg = "%s was removed." % msg else: msg = "%s failed to be removed: %s." % (msg, error) info = base_job.housekeeper_warning(info, msg) return {'error_count': len(orphaned_routers), 'error_info': info, 'fixed_count': fixed_count} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/housekeeper/orphaned_logical_switch.py0000644000175000017500000000566000000000000032044 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v3 import utils as v3_utils LOG = log.getLogger(__name__) class OrphanedLogicalSwitchJob(base_job.BaseJob): def __init__(self, global_readonly, readonly_jobs): super(OrphanedLogicalSwitchJob, self).__init__( global_readonly, readonly_jobs) def get_project_plugin(self, plugin): return plugin.get_plugin_by_type(projectpluginmap.NsxPlugins.NSX_T) def get_name(self): return 'orphaned_logical_switch' def get_description(self): return 'Detect orphaned logical switches' def run(self, context, readonly=False): super(OrphanedLogicalSwitchJob, self).run(context) # get all orphaned DHCP servers orphaned_swithces = v3_utils.get_orphaned_networks( context, self.plugin.nsxlib) info = "" if not orphaned_swithces: msg = 'No orphaned logical switches detected.' info = base_job.housekeeper_info(info, msg) return {'error_count': 0, 'fixed_count': 0, 'error_info': info} msg = ("Found %(len)s orphaned logical switch%(plural)s:" % {'len': len(orphaned_swithces), 'plural': 'es' if len(orphaned_swithces) > 1 else ''}) info = base_job.housekeeper_warning(info, msg) fixed_count = 0 for switch in orphaned_swithces: msg = ("Logical switch %(name)s [id: %(id)s] " "(neutron network: %(net)s)" % {'name': switch['display_name'], 'id': switch['id'], 'net': switch['neutron_net_id'] if switch['neutron_net_id'] else 'Unknown'}) if not readonly: try: self.plugin.nsxlib.logical_switch.delete(switch['id']) except Exception as e: msg = "%s failed to be removed: %s." % (msg, e) else: fixed_count = fixed_count + 1 msg = "%s was removed." % (msg) info = base_job.housekeeper_warning(info, msg) return {'error_count': len(orphaned_swithces), 'error_info': info, 'fixed_count': fixed_count} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/plugin.py0000644000175000017500000046661700000000000024165 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import time import mock import netaddr from neutron_lib.api.definitions import address_scope from neutron_lib.api.definitions import agent as agent_apidef from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import availability_zone from neutron_lib.api.definitions import dhcpagentscheduler from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import extra_dhcp_opt as ext_edo from neutron_lib.api.definitions import extraroute from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import network_availability_zone from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings as pbin_apidef from neutron_lib.api.definitions import provider_net from neutron_lib.api.definitions import router_availability_zone from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions as callback_exc from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib import context as q_context from neutron_lib.db import api as db_api from neutron_lib.db import resource_extend from neutron_lib.db import utils as db_utils from neutron_lib import exceptions as n_exc from neutron_lib.exceptions import l3 as l3_exc from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import metadata_rpc from neutron.db import agents_db from neutron.db import l3_db from neutron.db.models import l3 as l3_db_models from neutron.db.models import securitygroup as securitygroup_model from neutron.db import models_v2 from neutron.extensions import securitygroup as ext_sg from neutron.quota import resource_registry from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import uuidutils from vmware_nsx._i18n import _ from vmware_nsx.common import config # noqa from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import l3_rpc_agent_api from vmware_nsx.common import locking from vmware_nsx.common import managers from vmware_nsx.common import nsx_constants from vmware_nsx.common import utils from vmware_nsx.db import db as nsx_db from vmware_nsx.dhcp_meta import rpc as nsx_rpc from vmware_nsx.extensions import api_replay from vmware_nsx.extensions import housekeeper as hk_ext from vmware_nsx.extensions import maclearning as mac_ext from vmware_nsx.extensions import projectpluginmap from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.extensions import secgroup_rule_local_ip_prefix from vmware_nsx.extensions import securitygrouplogging as sg_logging from vmware_nsx.plugins.common.housekeeper import housekeeper from vmware_nsx.plugins.common_v3 import plugin as nsx_plugin_common from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.services.fwaas.common import utils as fwaas_utils from vmware_nsx.services.fwaas.nsx_v3 import fwaas_callbacks_v2 from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3.implementation import healthmonitor_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import l7policy_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import l7rule_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import listener_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import loadbalancer_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import member_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import pool_mgr from vmware_nsx.services.lbaas.octavia import constants as oct_const from vmware_nsx.services.lbaas.octavia import octavia_listener from vmware_nsx.services.qos.common import utils as qos_com_utils from vmware_nsx.services.qos.nsx_v3 import driver as qos_driver from vmware_nsx.services.qos.nsx_v3 import utils as qos_utils from vmware_nsx.services.trunk.nsx_v3 import driver as trunk_driver from vmware_nsxlib.v3 import core_resources as nsx_resources from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts from vmware_nsxlib.v3 import router as nsxlib_router from vmware_nsxlib.v3 import security from vmware_nsxlib.v3 import utils as nsxlib_utils LOG = log.getLogger(__name__) NSX_V3_NO_PSEC_PROFILE_NAME = 'nsx-default-spoof-guard-vif-profile' NSX_V3_MAC_LEARNING_PROFILE_NAME = 'neutron_port_mac_learning_profile' NSX_V3_MAC_DISABLED_PROFILE_NAME = 'neutron_port_mac_learning_disabled_profile' NSX_V3_FW_DEFAULT_SECTION = 'OS Default Section for Neutron Security-Groups' NSX_V3_FW_DEFAULT_NS_GROUP = 'os_default_section_ns_group' NSX_V3_DEFAULT_SECTION = 'OS-Default-Section' NSX_V3_EXCLUDED_PORT_NSGROUP_NAME = 'neutron_excluded_port_nsgroup' NSX_V3_NON_VIF_PROFILE = 'nsx-default-switch-security-non-vif-profile' NSX_V3_NON_VIF_ENS_PROFILE = \ 'nsx-default-switch-security-non-vif-profile-for-ens' NSX_V3_SERVER_SSL_PROFILE = 'nsx-default-server-ssl-profile' NSX_V3_CLIENT_SSL_PROFILE = 'nsx-default-client-ssl-profile' @resource_extend.has_resource_extenders class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base, hk_ext.Housekeeper): __native_bulk_support = True __native_pagination_support = True __native_sorting_support = True supported_extension_aliases = [addr_apidef.ALIAS, address_scope.ALIAS, "quotas", pbin_apidef.ALIAS, ext_edo.ALIAS, agent_apidef.ALIAS, dhcpagentscheduler.ALIAS, "ext-gw-mode", "security-group", secgroup_rule_local_ip_prefix.ALIAS, psec.ALIAS, provider_net.ALIAS, extnet_apidef.ALIAS, extraroute.ALIAS, l3_apidef.ALIAS, availability_zone.ALIAS, network_availability_zone.ALIAS, router_availability_zone.ALIAS, "subnet_allocation", sg_logging.ALIAS, provider_sg.ALIAS, hk_ext.ALIAS, "port-security-groups-filtering", "advanced-service-providers"] @resource_registry.tracked_resources( network=models_v2.Network, port=models_v2.Port, subnet=models_v2.Subnet, subnetpool=models_v2.SubnetPool, security_group=securitygroup_model.SecurityGroup, security_group_rule=securitygroup_model.SecurityGroupRule, router=l3_db_models.Router, floatingip=l3_db_models.FloatingIP) def __init__(self): self.fwaas_callbacks = None self._is_sub_plugin = tvd_utils.is_tvd_core_plugin() self.init_is_complete = False self.octavia_listener = None self.octavia_stats_collector = None nsxlib_utils.set_is_attr_callback(validators.is_attr_set) self._extend_fault_map() if self._is_sub_plugin: extension_drivers = cfg.CONF.nsx_tvd.nsx_v3_extension_drivers self._update_project_mapping() else: extension_drivers = cfg.CONF.nsx_extension_drivers self._extension_manager = managers.ExtensionManager( extension_drivers=extension_drivers) self.cfg_group = 'nsx_v3' # group name for nsx_v3 section in nsx.ini super(NsxV3Plugin, self).__init__() # Bind the dummy L3 notifications self.l3_rpc_notifier = l3_rpc_agent_api.L3NotifyAPI() LOG.info("Starting NsxV3Plugin") self._extension_manager.initialize() self.supported_extension_aliases.extend( self._extension_manager.extension_aliases()) self.nsxlib = v3_utils.get_nsxlib_wrapper() nsxlib_utils.set_inject_headers_callback(v3_utils.inject_headers) registry.subscribe( self.on_subnetpool_address_scope_updated, resources.SUBNETPOOL_ADDRESS_SCOPE, events.AFTER_UPDATE) self._nsx_version = self.nsxlib.get_version() LOG.info("NSX Version: %s", self._nsx_version) self.tier0_groups_dict = {} # Initialize the network availability zones, which will be used only # when native_dhcp_metadata is True self.init_availability_zones() # Translate configured transport zones, routers, dhcp profile and # metadata proxy names to uuid. self._translate_configured_names_to_uuids() self._init_dhcp_metadata() self._prepare_default_rules() # init profiles on nsx backend self._init_nsx_profiles() # Include exclude NSGroup LOG.debug("Initializing NSX v3 Excluded Port NSGroup") self._excluded_port_nsgroup = None self._excluded_port_nsgroup = self._init_excluded_port_nsgroup() if not self._excluded_port_nsgroup: msg = _("Unable to initialize NSX v3 Excluded Port NSGroup %s" ) % NSX_V3_EXCLUDED_PORT_NSGROUP_NAME raise nsx_exc.NsxPluginException(err_msg=msg) qos_driver.register(qos_utils.QosNotificationsHandler()) self._unsubscribe_callback_events() if cfg.CONF.api_replay_mode: self.supported_extension_aliases.append(api_replay.ALIAS) # Support transparent VLANS from 2.2.0 onwards. The feature is only # supported if the global configuration flag vlan_transparent is # True if cfg.CONF.vlan_transparent: self.supported_extension_aliases.append(vlan_apidef.ALIAS) # Register NSXv3 trunk driver to support trunk extensions self.trunk_driver = trunk_driver.NsxV3TrunkDriver.create(self) registry.subscribe(self.spawn_complete, resources.PROCESS, events.AFTER_SPAWN) # subscribe the init complete method last, so it will be called only # if init was successful registry.subscribe(self.init_complete, resources.PROCESS, events.AFTER_INIT) def _update_project_mapping(self): ctx = q_context.get_admin_context() try: nsx_db.add_project_plugin_mapping( ctx.session, nsx_constants.INTERNAL_V3_TENANT_ID, projectpluginmap.NsxPlugins.NSX_T) except db_exc.DBDuplicateEntry: pass def _ensure_default_rules(self): # Include default section NSGroup LOG.debug("Initializing NSX v3 default section NSGroup") self._default_section_nsgroup = None self._default_section_nsgroup = self._init_default_section_nsgroup() if not self._default_section_nsgroup: msg = _("Unable to initialize NSX v3 default section NSGroup %s" ) % NSX_V3_FW_DEFAULT_NS_GROUP raise nsx_exc.NsxPluginException(err_msg=msg) self.default_section = self._init_default_section_rules() LOG.info("Initializing NSX v3 default section %(section)s " "and NSGroup %(nsgroup)s", {'section': self.default_section, 'nsgroup': self._default_section_nsgroup.get('id')}) def _ensure_global_sg_placeholder(self, context): found_sg = False try: super(NsxV3Plugin, self).get_security_group( context, v3_utils.NSX_V3_OS_DFW_UUID, fields=['id']) except ext_sg.SecurityGroupNotFound: LOG.warning('Creating a global security group') sec_group = {'security_group': {'id': v3_utils.NSX_V3_OS_DFW_UUID, 'tenant_id': nsx_constants.INTERNAL_V3_TENANT_ID, 'name': 'NSX Internal', 'description': ''}} try: # ensure that the global default is created, and only once # without retrying on DB errors with mock.patch("oslo_db.api.wrap_db_retry." "_is_exception_expected", return_value=False): super(NsxV3Plugin, self).create_security_group( context, sec_group, True) except Exception: # Treat a race of multiple processing creating the sec group LOG.warning('Unable to create global security group. Probably ' 'already created by another server') found_sg = True else: LOG.info('Found a global security group') found_sg = True if found_sg: # check if the section and nsgroup are already in the DB. If not # it means another server is creating them right now. nsgroup_id, section_id = nsx_db.get_sg_mappings( context.session, v3_utils.NSX_V3_OS_DFW_UUID) if nsgroup_id is None or section_id is None: LOG.info("Global security exists without NSX objects") # Wait a bit to let the other server finish # TODO(asarfaty): consider sleeping until it is in the DB time.sleep(3) def _cleanup_duplicates(self, ns_group_id, section_id): LOG.warning("Duplicate rules created! Deleting NS group %(nsgroup)s " "and section %(section)s", {'nsgroup': ns_group_id, 'section': section_id}) # Delete duplicates created if section_id: self.nsxlib.firewall_section.delete(section_id) if ns_group_id: self.nsxlib.ns_group.delete(ns_group_id) # Ensure global variables are updated self._ensure_default_rules() def _prepare_default_rules(self): ctx = q_context.get_admin_context() # Need a global placeholder as the DB below has a foreign key to # this security group self._ensure_global_sg_placeholder(ctx) self._ensure_default_rules() # Validate if there is a race between processes nsgroup_id, section_id = nsx_db.get_sg_mappings( ctx.session, v3_utils.NSX_V3_OS_DFW_UUID) LOG.debug("Default NSGroup - %s, Section %s", nsgroup_id, section_id) default_ns_group_id = self._default_section_nsgroup.get('id') duplicates = False if nsgroup_id is None or section_id is None: # This means that the DB was not updated with the NSX IDs try: LOG.debug("Updating NSGroup - %s, Section %s", default_ns_group_id, self.default_section) nsx_db.save_sg_mappings(ctx, v3_utils.NSX_V3_OS_DFW_UUID, default_ns_group_id, self.default_section) except Exception: # Another process must have update the DB at the same time # so delete the once that were just created LOG.debug("Concurrent update! Duplicates exist") duplicates = True else: if (section_id != self.default_section): LOG.debug("Section %(nsx)s doesn't match the one in the DB " "%(db)s. Duplicates exist", {'nsx': self.default_section, 'db': section_id}) duplicates = True if (nsgroup_id != default_ns_group_id): LOG.debug("NSGroup %(nsx)s doesn't match the one in the DB " "%(db)s. Duplicates exist", {'nsx': default_ns_group_id, 'db': nsgroup_id}) duplicates = True if duplicates: # deleting the NSX NS group & section found on the NSX backend # which are duplications, and use the ones from the DB self._cleanup_duplicates(default_ns_group_id, self.default_section) @staticmethod def plugin_type(): return projectpluginmap.NsxPlugins.NSX_T @staticmethod def is_tvd_plugin(): return False def spawn_complete(self, resource, event, trigger, payload=None): # Init the FWaaS support with RPC listeners for the original process self._init_fwaas(with_rpc=True) # The rest of this method should run only once, but after init_complete if not self.init_is_complete: self.init_complete(None, None, None) if not self._is_sub_plugin: self.octavia_stats_collector = ( octavia_listener.NSXOctaviaStatisticsCollector( self, self._get_octavia_stats_getter())) def init_complete(self, resource, event, trigger, payload=None): with locking.LockManager.get_lock('plugin-init-complete'): if self.init_is_complete: # Should be called only once per worker return # reinitialize the cluster upon fork for api workers to ensure # each process has its own keepalive loops + state self.nsxlib.reinitialize_cluster(resource, event, trigger, payload=payload) # Init the house keeper self.housekeeper = housekeeper.NsxHousekeeper( hk_ns='vmware_nsx.neutron.nsxv3.housekeeper.jobs', hk_jobs=cfg.CONF.nsx_v3.housekeeping_jobs, hk_readonly=cfg.CONF.nsx_v3.housekeeping_readonly, hk_readonly_jobs=cfg.CONF.nsx_v3.housekeeping_readonly_jobs) # Init octavia listener and endpoints self._init_octavia() # Init the FWaaS support without RPC listeners # for the spawn workers self._init_fwaas(with_rpc=False) self.init_is_complete = True def _init_octavia(self): if self._is_sub_plugin: # The TVD plugin will take care of this return octavia_objects = self._get_octavia_objects() self.octavia_listener = octavia_listener.NSXOctaviaListener( **octavia_objects) def _get_octavia_objects(self): return { 'loadbalancer': loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(), 'listener': listener_mgr.EdgeListenerManagerFromDict(), 'pool': pool_mgr.EdgePoolManagerFromDict(), 'member': member_mgr.EdgeMemberManagerFromDict(), 'healthmonitor': healthmonitor_mgr.EdgeHealthMonitorManagerFromDict(), 'l7policy': l7policy_mgr.EdgeL7PolicyManagerFromDict(), 'l7rule': l7rule_mgr.EdgeL7RuleManagerFromDict()} def _get_octavia_stats_getter(self): return listener_mgr.stats_getter def _init_fwaas(self, with_rpc): if self.fwaas_callbacks: # already initialized return if fwaas_utils.is_fwaas_v2_plugin_enabled(): LOG.info("NSXv3 FWaaS v2 plugin enabled") self.fwaas_callbacks = fwaas_callbacks_v2.Nsxv3FwaasCallbacksV2( with_rpc) def init_availability_zones(self): self._availability_zones_data = nsx_az.NsxV3AvailabilityZones( use_tvd_config=self._is_sub_plugin) def _init_nsx_profiles(self): LOG.debug("Initializing NSX v3 port spoofguard switching profile") if not self._init_port_security_profile(): msg = _("Unable to initialize NSX v3 port spoofguard switching " "profile: %s") % v3_utils.NSX_V3_PSEC_PROFILE_NAME raise nsx_exc.NsxPluginException(err_msg=msg) profile_client = self.nsxlib.switching_profile no_psec_prof = profile_client.find_by_display_name( NSX_V3_NO_PSEC_PROFILE_NAME)[0] self._no_psec_profile_id = profile_client.build_switch_profile_ids( profile_client, no_psec_prof)[0] LOG.debug("Initializing NSX v3 DHCP switching profile") try: self._init_dhcp_switching_profile() except Exception as e: msg = (_("Unable to initialize NSX v3 DHCP switching profile: " "%(id)s. Reason: %(reason)s") % { 'id': v3_utils.NSX_V3_DHCP_PROFILE_NAME, 'reason': str(e)}) raise nsx_exc.NsxPluginException(err_msg=msg) self._mac_learning_profile = None self._mac_learning_disabled_profile = None # create MAC Learning profile try: self._init_mac_learning_profiles() # Only expose the extension if it is supported self.supported_extension_aliases.append(mac_ext.ALIAS) except Exception as e: LOG.warning("Unable to initialize NSX v3 MAC Learning " "profiles: %(name)s. Reason: %(reason)s", {'name': NSX_V3_MAC_LEARNING_PROFILE_NAME, 'reason': e}) no_switch_security_prof = profile_client.find_by_display_name( NSX_V3_NON_VIF_PROFILE)[0] self._no_switch_security = profile_client.build_switch_profile_ids( profile_client, no_switch_security_prof)[0] no_switch_security_prof = profile_client.find_by_display_name( NSX_V3_NON_VIF_ENS_PROFILE)[0] self._no_switch_security_ens = profile_client.build_switch_profile_ids( profile_client, no_switch_security_prof)[0] self.server_ssl_profile = None self.client_ssl_profile = None LOG.debug("Initializing NSX v3 Load Balancer default profiles") try: self._init_lb_profiles() except Exception as e: msg = (_("Unable to initialize NSX v3 lb profiles: " "Reason: %(reason)s") % {'reason': str(e)}) raise nsx_exc.NsxPluginException(err_msg=msg) def _translate_configured_names_to_uuids(self): # If using tags to find the objects, make sure tag scope is configured if (cfg.CONF.nsx_v3.init_objects_by_tags and not cfg.CONF.nsx_v3.search_objects_scope): raise cfg.RequiredOptError("search_objects_scope", group=cfg.OptGroup('nsx_v3')) # Validate and translate native dhcp profiles per az if cfg.CONF.nsx_v3.native_dhcp_metadata: if not cfg.CONF.nsx_v3.dhcp_profile: raise cfg.RequiredOptError("dhcp_profile", group=cfg.OptGroup('nsx_v3')) if not cfg.CONF.nsx_v3.metadata_proxy: raise cfg.RequiredOptError("metadata_proxy", group=cfg.OptGroup('nsx_v3')) # Translate all the uuids in each of the availability search_scope = (cfg.CONF.nsx_v3.search_objects_scope if cfg.CONF.nsx_v3.init_objects_by_tags else None) for az in self.get_azs_list(): az.translate_configured_names_to_uuids( self.nsxlib, search_scope=search_scope) @nsxlib_utils.retry_upon_exception( Exception, max_attempts=cfg.CONF.nsx_v3.retries) def _init_default_section_nsgroup(self): with locking.LockManager.get_lock('nsxv3_init_default_nsgroup'): nsgroup = self._get_default_section_nsgroup() if not nsgroup: # Create a new NSGroup for default section membership_criteria = ( self.nsxlib.ns_group.get_port_tag_expression( security.PORT_SG_SCOPE, NSX_V3_DEFAULT_SECTION)) nsgroup = self.nsxlib.ns_group.create( NSX_V3_FW_DEFAULT_NS_GROUP, 'OS Default Section Port NSGroup', tags=self.nsxlib.build_v3_api_version_tag(), membership_criteria=membership_criteria) return self._get_default_section_nsgroup() def _get_default_section_nsgroup(self): if self._default_section_nsgroup: return self._default_section_nsgroup nsgroups = self.nsxlib.ns_group.find_by_display_name( NSX_V3_FW_DEFAULT_NS_GROUP) return nsgroups[0] if nsgroups else None @nsxlib_utils.retry_upon_exception( Exception, max_attempts=cfg.CONF.nsx_v3.retries) def _init_excluded_port_nsgroup(self): with locking.LockManager.get_lock('nsxv3_excluded_port_nsgroup_init'): nsgroup = self._get_excluded_port_nsgroup() if not nsgroup: # Create a new NSGroup for excluded ports. membership_criteria = ( self.nsxlib.ns_group.get_port_tag_expression( security.PORT_SG_SCOPE, nsxlib_consts.EXCLUDE_PORT)) nsgroup = self.nsxlib.ns_group.create( NSX_V3_EXCLUDED_PORT_NSGROUP_NAME, 'Neutron Excluded Port NSGroup', tags=self.nsxlib.build_v3_api_version_tag(), membership_criteria=membership_criteria) # Add this NSGroup to NSX Exclusion List. self.nsxlib.firewall_section.add_member_to_fw_exclude_list( nsgroup['id'], nsxlib_consts.NSGROUP) return self._get_excluded_port_nsgroup() def _get_excluded_port_nsgroup(self): if self._excluded_port_nsgroup: return self._excluded_port_nsgroup nsgroups = self.nsxlib.ns_group.find_by_display_name( NSX_V3_EXCLUDED_PORT_NSGROUP_NAME) return nsgroups[0] if nsgroups else None def _unsubscribe_callback_events(self): # l3_db explicitly subscribes to the port delete callback. This # callback is unsubscribed here since l3 APIs are handled by # core_plugin instead of an advanced service, in case of NSXv3 plugin, # and the prevention logic is handled by NSXv3 plugin itself. registry.unsubscribe( l3_db.L3_NAT_dbonly_mixin._prevent_l3_port_delete_callback, resources.PORT, events.BEFORE_DELETE) @nsxlib_utils.retry_upon_exception( Exception, max_attempts=cfg.CONF.nsx_v3.retries) def _init_dhcp_switching_profile(self): with locking.LockManager.get_lock('nsxv3_dhcp_profile_init'): if not self._get_dhcp_security_profile(): self.nsxlib.switching_profile.create_dhcp_profile( v3_utils.NSX_V3_DHCP_PROFILE_NAME, 'Neutron DHCP Security Profile', tags=self.nsxlib.build_v3_api_version_tag()) return self._get_dhcp_security_profile() def _get_dhcp_security_profile(self): if hasattr(self, '_dhcp_profile') and self._dhcp_profile: return self._dhcp_profile profile = self.nsxlib.switching_profile.find_by_display_name( v3_utils.NSX_V3_DHCP_PROFILE_NAME) self._dhcp_profile = nsx_resources.SwitchingProfileTypeId( profile_type=(nsx_resources.SwitchingProfileTypes. SWITCH_SECURITY), profile_id=profile[0]['id']) if profile else None return self._dhcp_profile def _init_mac_learning_profiles(self): with locking.LockManager.get_lock('nsxv3_mac_learning_profile_init'): if not self._get_mac_learning_profile(): self.nsxlib.switching_profile.create_mac_learning_profile( NSX_V3_MAC_LEARNING_PROFILE_NAME, 'Neutron MAC Learning Profile', mac_learning_enabled=True, tags=self.nsxlib.build_v3_api_version_tag()) self._get_mac_learning_profile() if not self._get_mac_learning_disabled_profile(): self.nsxlib.switching_profile.create_mac_learning_profile( NSX_V3_MAC_DISABLED_PROFILE_NAME, 'Neutron MAC Learning Disabled Profile', mac_learning_enabled=False, tags=self.nsxlib.build_v3_api_version_tag()) self._get_mac_learning_disabled_profile() def _get_mac_learning_profile(self): if (hasattr(self, '_mac_learning_profile') and self._mac_learning_profile): return self._mac_learning_profile profile = self.nsxlib.switching_profile.find_by_display_name( NSX_V3_MAC_LEARNING_PROFILE_NAME) self._mac_learning_profile = nsx_resources.SwitchingProfileTypeId( profile_type=(nsx_resources.SwitchingProfileTypes. MAC_LEARNING), profile_id=profile[0]['id']) if profile else None return self._mac_learning_profile def _get_mac_learning_disabled_profile(self): if (hasattr(self, '_mac_learning_disabled_profile') and self._mac_learning_disabled_profile): return self._mac_learning_disabled_profile profile = self.nsxlib.switching_profile.find_by_display_name( NSX_V3_MAC_DISABLED_PROFILE_NAME) self._mac_learning_disabled_profile = ( nsx_resources.SwitchingProfileTypeId( profile_type=(nsx_resources.SwitchingProfileTypes. MAC_LEARNING), profile_id=profile[0]['id']) if profile else None) return self._mac_learning_disabled_profile def _init_lb_profiles(self): ssl_c_prof_client = self.nsxlib.load_balancer.client_ssl_profile ssl_s_prof_client = self.nsxlib.load_balancer.server_ssl_profile with locking.LockManager.get_lock('nsxv3_lb_profiles_init'): if not self.client_ssl_profile: profile = ssl_c_prof_client.find_by_display_name( NSX_V3_CLIENT_SSL_PROFILE) if not profile: profile = ssl_c_prof_client.create( NSX_V3_CLIENT_SSL_PROFILE, 'Neutron LB Client SSL Profile', tags=self.nsxlib.build_v3_api_version_tag()) self.client_ssl_profile = profile[0]['id'] if profile else None if not self.server_ssl_profile: profile = ssl_s_prof_client.find_by_display_name( NSX_V3_SERVER_SSL_PROFILE) if not profile: profile = self.nsxlib.load_balancer.server_ssl_profile.create( NSX_V3_SERVER_SSL_PROFILE, 'Neutron LB Server SSL Profile', tags=self.nsxlib.build_v3_api_version_tag()) self.server_ssl_profile = profile[0]['id'] if profile else None def _get_port_security_profile_id(self): return self.nsxlib.switching_profile.build_switch_profile_ids( self.nsxlib.switching_profile, self._psec_profile)[0] def _get_port_security_profile(self): if hasattr(self, '_psec_profile') and self._psec_profile: return self._psec_profile profile = self.nsxlib.switching_profile.find_by_display_name( v3_utils.NSX_V3_PSEC_PROFILE_NAME) self._psec_profile = profile[0] if profile else None return self._psec_profile @nsxlib_utils.retry_upon_exception( Exception, max_attempts=cfg.CONF.nsx_v3.retries) def _init_port_security_profile(self): profile = self._get_port_security_profile() if profile: return profile with locking.LockManager.get_lock('nsxv3_psec_profile_init'): # NOTE(boden): double-checked locking pattern profile = self._get_port_security_profile() if profile: return profile self.nsxlib.switching_profile.create_spoofguard_profile( v3_utils.NSX_V3_PSEC_PROFILE_NAME, 'Neutron Port Security Profile', whitelist_ports=True, whitelist_switches=False, tags=self.nsxlib.build_v3_api_version_tag()) return self._get_port_security_profile() def _init_default_section_rules(self): with locking.LockManager.get_lock('nsxv3_default_section'): section_description = ("This section is handled by OpenStack to " "contain default rules on security-groups.") section_id = self.nsxlib.firewall_section.init_default( NSX_V3_FW_DEFAULT_SECTION, section_description, [self._default_section_nsgroup.get('id')], cfg.CONF.nsx_v3.log_security_groups_blocked_traffic) return section_id def _init_dhcp_metadata(self): if cfg.CONF.nsx_v3.native_dhcp_metadata: if cfg.CONF.dhcp_agent_notification: msg = _("Need to disable dhcp_agent_notification when " "native_dhcp_metadata is enabled") raise nsx_exc.NsxPluginException(err_msg=msg) self._init_native_dhcp() self._init_native_metadata() else: self._setup_dhcp() self._start_rpc_notifiers() def _setup_rpc(self): self.endpoints = [dhcp_rpc.DhcpRpcCallback(), agents_db.AgentExtRpcCallback(), metadata_rpc.MetadataRpcCallback()] def _setup_dhcp(self): """Initialize components to support DHCP.""" self.network_scheduler = importutils.import_object( cfg.CONF.network_scheduler_driver ) self.add_periodic_dhcp_agent_status_check() def _start_rpc_notifiers(self): """Initialize RPC notifiers for agents.""" self.agent_notifiers[const.AGENT_TYPE_DHCP] = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) def _get_edge_cluster(self, tier0_uuid, router): az = self._get_router_az_obj(router) if az and az._edge_cluster_uuid: return az._edge_cluster_uuid if (not self.tier0_groups_dict.get(tier0_uuid) or not self. tier0_groups_dict[tier0_uuid].get('edge_cluster_uuid')): self.nsxlib.router.validate_tier0(self.tier0_groups_dict, tier0_uuid) tier0_info = self.tier0_groups_dict[tier0_uuid] return tier0_info['edge_cluster_uuid'] def _allow_ens_networks(self): return cfg.CONF.nsx_v3.ens_support def _create_network_at_the_backend(self, context, net_data, az, transparent_vlan): provider_data = self._validate_provider_create( context, net_data, az, self.nsxlib.transport_zone, self.nsxlib.logical_switch, transparent_vlan=transparent_vlan) neutron_net_id = net_data.get('id') or uuidutils.generate_uuid() net_data['id'] = neutron_net_id if (provider_data['is_provider_net'] and provider_data['net_type'] == utils.NsxV3NetworkTypes.NSX_NETWORK): # Network already exists on the NSX backend nsx_id = provider_data['physical_net'] else: # Create network on the backend # update the network name to indicate the neutron id too. net_name = utils.get_name_and_uuid(net_data['name'] or 'network', neutron_net_id) tags = self.nsxlib.build_v3_tags_payload( net_data, resource_type='os-neutron-net-id', project_name=context.tenant_name) admin_state = net_data.get('admin_state_up', True) LOG.debug('create_network: %(net_name)s, %(physical_net)s, ' '%(tags)s, %(admin_state)s, %(vlan_id)s', {'net_name': net_name, 'physical_net': provider_data['physical_net'], 'tags': tags, 'admin_state': admin_state, 'vlan_id': provider_data['vlan_id']}) trunk_vlan_range = None if transparent_vlan: # all vlan tags are allowed for guest vlan trunk_vlan_range = [0, const.MAX_VLAN_TAG] nsx_result = self.nsxlib.logical_switch.create( net_name, provider_data['physical_net'], tags, admin_state=admin_state, vlan_id=provider_data['vlan_id'], description=net_data.get('description'), trunk_vlan_range=trunk_vlan_range) nsx_id = nsx_result['id'] return (provider_data['is_provider_net'], provider_data['net_type'], provider_data['physical_net'], provider_data['vlan_id'], nsx_id) def _is_overlay_network(self, context, network_id): """Return True if this is an overlay network 1. No binding ("normal" overlay networks will have no binding) 2. Geneve network 3. nsx network where the backend network is connected to an overlay TZ """ bindings = nsx_db.get_network_bindings(context.session, network_id) # With NSX plugin, "normal" overlay networks will have no binding if not bindings: # using the default/AZ overlay_tz return True binding = bindings[0] if binding.binding_type == utils.NsxV3NetworkTypes.GENEVE: return True if binding.binding_type == utils.NsxV3NetworkTypes.NSX_NETWORK: # check the backend network # TODO(asarfaty): Keep TZ type in DB to avoid going to the backend ls = self.nsxlib.logical_switch.get(binding.phy_uuid) tz = ls.get('transport_zone_id') if tz: # This call is cached on the nsxlib side backend_type = self.nsxlib.transport_zone.get_transport_type( tz) return (backend_type == self.nsxlib.transport_zone.TRANSPORT_TYPE_OVERLAY) return False def _tier0_validator(self, tier0_uuid): self.nsxlib.router.validate_tier0(self.tier0_groups_dict, tier0_uuid) def _get_nsx_net_tz_id(self, nsx_net): return nsx_net['transport_zone_id'] def create_network(self, context, network): net_data = network['network'] external = net_data.get(extnet_apidef.EXTERNAL) is_external_net = validators.is_attr_set(external) and external is_ddi_network = False tenant_id = net_data['tenant_id'] # validate the availability zone, and get the AZ object az = self._validate_obj_az_on_creation(context, net_data, 'network') self._ensure_default_security_group(context, tenant_id) # Update the transparent vlan if configured vlt = False if extensions.is_extension_supported(self, 'vlan-transparent'): vlt = vlan_apidef.get_vlan_transparent(net_data) self._validate_create_network(context, net_data) if is_external_net: is_provider_net, net_type, physical_net, vlan_id = ( self._validate_external_net_create( net_data, az._default_tier0_router, self._tier0_validator)) nsx_net_id = None is_backend_network = False else: is_provider_net, net_type, physical_net, vlan_id, nsx_net_id = ( self._create_network_at_the_backend(context, net_data, az, vlt)) is_backend_network = True try: rollback_network = False with db_api.CONTEXT_WRITER.using(context): # Create network in Neutron created_net = super(NsxV3Plugin, self).create_network(context, network) self._extension_manager.process_create_network( context, net_data, created_net) if psec.PORTSECURITY not in net_data: net_data[psec.PORTSECURITY] = True self._process_network_port_security_create( context, net_data, created_net) self._process_l3_create(context, created_net, net_data) self._add_az_to_net(context, created_net['id'], net_data) if is_provider_net: # Save provider network fields, needed by get_network() net_bindings = [nsx_db.add_network_binding( context.session, created_net['id'], net_type, physical_net, vlan_id)] self._extend_network_dict_provider(context, created_net, bindings=net_bindings) if is_backend_network: # Add neutron-id <-> nsx-id mapping to the DB # after the network creation is done neutron_net_id = created_net['id'] nsx_db.add_neutron_nsx_network_mapping( context.session, neutron_net_id, nsx_net_id) if extensions.is_extension_supported(self, 'vlan-transparent'): super(NsxV3Plugin, self).update_network(context, created_net['id'], {'network': {'vlan_transparent': vlt}}) rollback_network = True # this extra lookup is necessary to get the # latest db model for the extension functions net_model = self._get_network(context, created_net['id']) resource_extend.apply_funcs('networks', created_net, net_model) if is_backend_network: self._create_net_mp_mdproxy_port( context, created_net, az, nsx_net_id) except Exception: with excutils.save_and_reraise_exception(): # Undo creation on the backend LOG.exception('Failed to create network') if (nsx_net_id and net_type != utils.NsxV3NetworkTypes.NSX_NETWORK): self.nsxlib.logical_switch.delete(nsx_net_id) if (cfg.CONF.nsx_v3.native_dhcp_metadata and is_backend_network and is_ddi_network): # Delete the mdproxy port manually self._delete_nsx_port_by_network(created_net['id']) if rollback_network: super(NsxV3Plugin, self).delete_network( context, created_net['id']) # Update the QoS policy (will affect only future compute ports) qos_com_utils.set_qos_policy_on_new_net( context, net_data, created_net) if net_data.get(qos_consts.QOS_POLICY_ID): LOG.info("QoS Policy %(qos)s will be applied to future compute " "ports of network %(net)s", {'qos': net_data[qos_consts.QOS_POLICY_ID], 'net': created_net['id']}) return created_net def _ens_psec_supported(self): return self.nsxlib.feature_supported( nsxlib_consts.FEATURE_ENS_WITH_SEC) def _ens_qos_supported(self): return self.nsxlib.feature_supported( nsxlib_consts.FEATURE_ENS_WITH_QOS) def _validate_ens_net_portsecurity(self, net_data): """Validate/Update the port security of the new network for ENS TZ""" if not self._ens_psec_supported(): if cfg.CONF.nsx_v3.disable_port_security_for_ens: # Override the port-security to False if net_data[psec.PORTSECURITY]: LOG.warning("Disabling port security for new network") # Set the port security to False net_data[psec.PORTSECURITY] = False elif net_data.get(psec.PORTSECURITY): # Port security enabled is not allowed raise nsx_exc.NsxENSPortSecurity() else: # Update the default port security to False if not set net_data[psec.PORTSECURITY] = False def delete_network(self, context, network_id): if cfg.CONF.nsx_v3.native_dhcp_metadata: self._delete_network_disable_dhcp(context, network_id) nsx_net_id = self._get_network_nsx_id(context, network_id) is_nsx_net = self._network_is_nsx_net(context, network_id) is_ddi_network = self._is_ddi_supported_on_network(context, network_id) # First call DB operation for delete network as it will perform # checks on active ports self._retry_delete_network(context, network_id) if (not self._network_is_external(context, network_id) and not is_nsx_net): # TODO(salv-orlando): Handle backend failure, possibly without # requiring us to un-delete the DB object. For instance, ignore # failures occurring if logical switch is not found self.nsxlib.logical_switch.delete(nsx_net_id) else: if (cfg.CONF.nsx_v3.native_dhcp_metadata and is_nsx_net and is_ddi_network): # Delete the mdproxy port manually self._delete_nsx_port_by_network(network_id) # TODO(berlin): delete subnets public announce on the network def _get_network_nsx_id(self, context, neutron_id): # get the nsx switch id from the DB mapping mappings = nsx_db.get_nsx_switch_ids(context.session, neutron_id) if not mappings or len(mappings) == 0: LOG.debug("Unable to find NSX mappings for neutron " "network %s.", neutron_id) # fallback in case we didn't find the id in the db mapping # This should not happen, but added here in case the network was # created before this code was added. return neutron_id else: return mappings[0] def update_network(self, context, id, network): original_net = super(NsxV3Plugin, self).get_network(context, id) net_data = network['network'] # Neutron does not support changing provider network values utils.raise_if_updates_provider_attributes(net_data) extern_net = self._network_is_external(context, id) is_nsx_net = self._network_is_nsx_net(context, id) is_ens_net = self._is_ens_tz_net(context, id) # Validate the updated parameters self._validate_update_network(context, id, original_net, net_data) updated_net = super(NsxV3Plugin, self).update_network(context, id, network) self._extension_manager.process_update_network(context, net_data, updated_net) if psec.PORTSECURITY in net_data: # do not allow to enable port security on ENS networks if (net_data[psec.PORTSECURITY] and not original_net[psec.PORTSECURITY] and is_ens_net and not self._ens_psec_supported()): raise nsx_exc.NsxENSPortSecurity() self._process_network_port_security_update( context, net_data, updated_net) self._process_l3_update(context, updated_net, network['network']) self._extend_network_dict_provider(context, updated_net) if (not extern_net and not is_nsx_net and ('name' in net_data or 'admin_state_up' in net_data or 'description' in net_data)): try: # get the nsx switch id from the DB mapping nsx_id = self._get_network_nsx_id(context, id) net_name = net_data.get('name', original_net.get('name')) or 'network' self.nsxlib.logical_switch.update( nsx_id, name=utils.get_name_and_uuid(net_name, id), admin_state=net_data.get('admin_state_up'), description=net_data.get('description')) # Backend does not update the admin state of the ports on # the switch when the switch's admin state changes. Do not # update the admin state of the ports in neutron either. except nsx_lib_exc.ManagerError: LOG.exception("Unable to update NSX backend, rolling " "back changes on neutron") with excutils.save_and_reraise_exception(): # remove the AZ from the network before rollback because # it is read only, and breaks the rollback if 'availability_zone_hints' in original_net: del original_net['availability_zone_hints'] super(NsxV3Plugin, self).update_network( context, id, {'network': original_net}) if qos_consts.QOS_POLICY_ID in net_data: # attach the policy to the network in neutron DB #(will affect only future compute ports) qos_com_utils.update_network_policy_binding( context, id, net_data[qos_consts.QOS_POLICY_ID]) if net_data[qos_consts.QOS_POLICY_ID]: LOG.info("QoS Policy %(qos)s will be applied to future " "compute ports of network %(net)s", {'qos': net_data[qos_consts.QOS_POLICY_ID], 'net': id}) if not extern_net and not is_nsx_net: # update the network name & attributes in related NSX objects: if 'name' in net_data or 'dns_domain' in net_data: # update the dhcp server after finding it by tags self._update_dhcp_server_on_net_update(context, updated_net) if 'name' in net_data: # update the mdproxy port after finding it by tags self._update_mdproxy_port_on_net_update(context, updated_net) # update the DHCP port after finding it by tags self._update_dhcp_port_on_net_update(context, updated_net) return updated_net def _update_dhcp_port_on_net_update(self, context, network): """Update the NSX DHCP port when the neutron network changes""" dhcp_service = nsx_db.get_nsx_service_binding( context.session, network['id'], nsxlib_consts.SERVICE_DHCP) if dhcp_service and dhcp_service['port_id']: # get the neutron port id and search by it port_tag = [{'scope': 'os-neutron-dport-id', 'tag': dhcp_service['port_id']}] dhcpports = self.nsxlib.search_by_tags( tags=port_tag, resource_type=self.nsxlib.logical_port.resource_type) if dhcpports['results']: # There should be only 1 dhcp port # update the port name by the new network name name = self._get_dhcp_port_name(network['name'], network['id']) try: self.nsxlib.logical_port.update( dhcpports['results'][0]['id'], False, name=name, attachment_type=False) except Exception as e: LOG.warning("Failed to update network %(id)s DHCP port " "on the NSX: %(e)s", {'id': network['id'], 'e': e}) def _update_mdproxy_port_on_net_update(self, context, network): """Update the NSX MDPROXY port when the neutron network changes""" net_tag = [{'scope': 'os-neutron-net-id', 'tag': network['id']}] # find the logical port by the neutron network id & attachment mdproxy_list = self.nsxlib.search_by_tags( tags=net_tag, resource_type=self.nsxlib.logical_port.resource_type) if not mdproxy_list['results']: return for port in mdproxy_list['results']: if (port.get('attachment') and port['attachment'].get('attachment_type') == 'METADATA_PROXY'): # update the port name by the new network name name = self._get_mdproxy_port_name(network['name'], network['id']) try: self.nsxlib.logical_port.update( port['id'], False, name=name, attachment_type=False) except Exception as e: LOG.warning("Failed to update network %(id)s mdproxy port " "on the NSX: %(e)s", {'id': network['id'], 'e': e}) # There should be only 1 mdproxy port so it is safe to return return def _update_dhcp_server_on_net_update(self, context, network): """Update the NSX DHCP server when the neutron network changes""" net_tag = [{'scope': 'os-neutron-net-id', 'tag': network['id']}] # Find the DHCP server by the neutron network tag dhcp_srv_list = self.nsxlib.search_by_tags( tags=net_tag, resource_type=self.nsxlib.dhcp_server.resource_type) if dhcp_srv_list['results']: # Calculate the new name and domain by the network data dhcp_name = self.nsxlib.native_dhcp.build_server_name( network['name'], network['id']) az = self.get_network_az_by_net_id(context, network['id']) domain_name = self.nsxlib.native_dhcp.build_server_domain_name( network.get('dns_domain'), az.dns_domain) try: # There should be only 1 dhcp server # Update its name and domain self.nsxlib.dhcp_server.update( dhcp_srv_list['results'][0]['id'], name=dhcp_name, domain_name=domain_name) except Exception as e: LOG.warning("Failed to update network %(id)s dhcp server on " "the NSX: %(e)s", {'id': network['id'], 'e': e}) @nsx_plugin_common.api_replay_mode_wrapper def create_subnet(self, context, subnet): return self._create_subnet_with_mp_dhcp(context, subnet) def delete_subnet(self, context, subnet_id): # Call common V3 code to delete the subnet self.delete_subnet_with_mp_dhcp(context, subnet_id) def update_subnet(self, context, subnet_id, subnet): updated_subnet = self.update_subnet_with_mp_dhcp( context, subnet_id, subnet) if (cfg.CONF.nsx_v3.metadata_on_demand and not self._has_native_dhcp_metadata()): # If enable_dhcp is changed on a subnet attached to a router, # update internal metadata network accordingly. if 'enable_dhcp' in subnet['subnet']: port_filters = {'device_owner': const.ROUTER_INTERFACE_OWNERS, 'fixed_ips': {'subnet_id': [subnet_id]}} ports = self.get_ports(context, filters=port_filters) for port in ports: nsx_rpc.handle_router_metadata_access( self, context, port['device_id'], interface=not updated_subnet['enable_dhcp']) return updated_subnet def _build_address_bindings(self, port): address_bindings = [] for fixed_ip in port['fixed_ips']: address_bindings.append(nsx_resources.PacketAddressClassifier( fixed_ip['ip_address'], port['mac_address'], None)) for pair in port.get(addr_apidef.ADDRESS_PAIRS): address_bindings.append(nsx_resources.PacketAddressClassifier( pair['ip_address'], pair['mac_address'], None)) return address_bindings def _get_qos_profile_id(self, context, policy_id): switch_profile_id = nsx_db.get_switch_profile_by_qos_policy( context.session, policy_id) nsxlib_qos = self.nsxlib.qos_switching_profile qos_profile = nsxlib_qos.get(switch_profile_id) if qos_profile: profile_ids = nsxlib_qos.build_switch_profile_ids( self.nsxlib.switching_profile, qos_profile) if profile_ids and len(profile_ids) > 0: # We have only 1 QoS profile, so this array is of size 1 return profile_ids[0] # Didn't find it err_msg = _("Could not find QoS switching profile for policy " "%s") % policy_id LOG.error(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _create_port_at_the_backend(self, context, port_data, l2gw_port_check, psec_is_on, is_ens_tz_port): device_owner = port_data.get('device_owner') device_id = port_data.get('device_id') if device_owner == const.DEVICE_OWNER_DHCP: resource_type = 'os-neutron-dport-id' elif device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF: resource_type = 'os-neutron-rport-id' else: resource_type = 'os-neutron-port-id' tags = self.nsxlib.build_v3_tags_payload( port_data, resource_type=resource_type, project_name=context.tenant_name) resource_type = self._get_resource_type_for_device_id( device_owner, device_id) if resource_type: tags = nsxlib_utils.add_v3_tag(tags, resource_type, device_id) if self._is_excluded_port(device_owner, psec_is_on): tags.append({'scope': security.PORT_SG_SCOPE, 'tag': nsxlib_consts.EXCLUDE_PORT}) else: # If port has no security-groups then we don't need to add any # security criteria tag. if port_data[ext_sg.SECURITYGROUPS]: tags += self.nsxlib.ns_group.get_lport_tags( port_data[ext_sg.SECURITYGROUPS] + port_data[provider_sg.PROVIDER_SECURITYGROUPS]) # Add port to the default list if (device_owner != l3_db.DEVICE_OWNER_ROUTER_INTF and device_owner != const.DEVICE_OWNER_DHCP): tags.append({'scope': security.PORT_SG_SCOPE, 'tag': NSX_V3_DEFAULT_SECTION}) address_bindings = (self._build_address_bindings(port_data) if psec_is_on else []) if not device_owner: # no attachment attachment_type = None vif_uuid = None elif l2gw_port_check: # Change the attachment type for L2 gateway owned ports. # NSX backend requires the vif id be set to bridge endpoint id # for ports plugged into a Bridge Endpoint. # Also set port security to False, since L2GW port does not have # an IP address. vif_uuid = device_id attachment_type = device_owner psec_is_on = False elif device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF: # no attachment change attachment_type = False vif_uuid = False else: # default attachment attachment_type = nsxlib_consts.ATTACHMENT_VIF vif_uuid = port_data['id'] profiles = [] # Add availability zone profiles first (so that specific profiles will # override them) port_az = self.get_network_az_by_net_id(context, port_data['network_id']) if port_az.switching_profiles_objs: profiles.extend(port_az.switching_profiles_objs) force_mac_learning = False if psec_is_on: address_pairs = port_data.get(addr_apidef.ADDRESS_PAIRS) if validators.is_attr_set(address_pairs) and address_pairs: # Force mac learning profile to allow address pairs to work force_mac_learning = True profiles.append(self._get_port_security_profile_id()) else: if is_ens_tz_port: profiles.append(self._no_switch_security_ens) else: profiles.append(self._no_switch_security) if device_owner == const.DEVICE_OWNER_DHCP: if ((not is_ens_tz_port or self._ens_psec_supported()) and not cfg.CONF.nsx_v3.native_dhcp_metadata): profiles.append(self._dhcp_profile) # Add QoS switching profile, if exists qos_policy_id = self._get_port_qos_policy_id( context, None, port_data) if qos_policy_id: qos_profile_id = self._get_qos_profile_id(context, qos_policy_id) profiles.append(qos_profile_id) # Add mac_learning profile if it exists and is configured port_mac_learning = ( validators.is_attr_set(port_data.get(mac_ext.MAC_LEARNING)) and port_data.get(mac_ext.MAC_LEARNING) is True) if ((not is_ens_tz_port or self._ens_psec_supported()) and self._mac_learning_profile): if force_mac_learning or port_mac_learning: profiles.append(self._mac_learning_profile) if is_ens_tz_port: if self._no_switch_security_ens not in profiles: profiles.append(self._no_switch_security_ens) else: if self._no_switch_security not in profiles: profiles.append(self._no_switch_security) else: profiles.append(self._mac_learning_disabled_profile) name = self._build_port_name(context, port_data) nsx_net_id = self._get_network_nsx_id(context, port_data['network_id']) try: result = self.nsxlib.logical_port.create( nsx_net_id, vif_uuid, tags=tags, name=name, admin_state=port_data['admin_state_up'], address_bindings=address_bindings, attachment_type=attachment_type, switch_profile_ids=profiles, description=port_data.get('description')) except nsx_lib_exc.ManagerError as inst: # we may fail if the QoS is not supported for this port # (for example - transport zone with KVM) LOG.exception("Unable to create port on the backend: %s", inst) if inst.error_code == 8407: raise nsx_exc.BridgeEndpointAttachmentInUse( network_id=port_data['network_id']) msg = _("Unable to create port on the backend") raise nsx_exc.NsxPluginException(err_msg=msg) # Attach the policy to the port in the neutron DB if qos_policy_id: qos_com_utils.update_port_policy_binding(context, port_data['id'], qos_policy_id) return result def _get_net_tz(self, context, net_id): bindings = nsx_db.get_network_bindings(context.session, net_id) if bindings: bind_type = bindings[0].binding_type if bind_type == utils.NsxV3NetworkTypes.NSX_NETWORK: # If it is NSX network, return the TZ of the backend LS mappings = nsx_db.get_nsx_switch_ids(context.session, net_id) if mappings and mappings[0]: nsx_net = self.nsxlib.logical_switch.get(mappings[0]) return nsx_net.get('transport_zone_id') elif bind_type == utils.NetworkTypes.L3_EXT: # External network has tier0 as phy_uuid return else: return bindings[0].phy_uuid else: # Get the default one for the network AZ az = self.get_network_az_by_net_id(context, net_id) return az._default_overlay_tz_uuid def _is_ens_tz(self, tz_id): # This call is cached on the nsxlib side mode = self.nsxlib.transport_zone.get_host_switch_mode(tz_id) return mode == self.nsxlib.transport_zone.HOST_SWITCH_MODE_ENS def _has_native_dhcp_metadata(self): return cfg.CONF.nsx_v3.native_dhcp_metadata def _assert_on_dhcp_relay_without_router(self, context, port_data, original_port=None): # Prevent creating/updating port with device owner prefix 'compute' # on a subnet with dhcp relay but no router. if not original_port: original_port = port_data device_owner = port_data.get('device_owner') if (device_owner is None or not device_owner.startswith(const.DEVICE_OWNER_COMPUTE_PREFIX)): # not a compute port return if not self.get_network_az_by_net_id( context, original_port['network_id']).dhcp_relay_service: # No dhcp relay for the net of this port return # get the subnet id from the fixed ips of the port if 'fixed_ips' in port_data and port_data['fixed_ips']: subnets = self._get_subnets_for_fixed_ips_on_port(context, port_data) elif 'fixed_ips' in original_port and original_port['fixed_ips']: subnets = self._get_subnets_for_fixed_ips_on_port(context, original_port) else: return # check only dhcp enabled subnets subnets = (subnet for subnet in subnets if subnet['enable_dhcp']) if not subnets: return subnet_ids = (subnet['id'] for subnet in subnets) # check if the subnet is attached to a router interfaces = self._get_network_interface_ports( context.elevated(), original_port['network_id']) for interface in interfaces: for fixed_ip in interface['fixed_ips']: if fixed_ip['subnet_id'] in subnet_ids: # Router exists - validation passed return err_msg = _("Neutron is configured with DHCP_Relay but no router " "connected to the subnet") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def _disable_ens_portsec(self, port_data): if (cfg.CONF.nsx_v3.disable_port_security_for_ens and not self._ens_psec_supported()): LOG.warning("Disabling port security for network %s", port_data['network_id']) port_data[psec.PORTSECURITY] = False port_data['security_groups'] = [] def base_create_port(self, context, port): neutron_db = super(NsxV3Plugin, self).create_port(context, port) self._extension_manager.process_create_port( context, port['port'], neutron_db) return neutron_db def create_port(self, context, port, l2gw_port_check=False): port_data = port['port'] # validate the new port parameters self._validate_create_port(context, port_data) self._assert_on_dhcp_relay_without_router(context, port_data) is_ens_tz_port = self._is_ens_tz_port(context, port_data) if is_ens_tz_port: self._disable_ens_portsec(port_data) is_external_net = self._network_is_external( context, port_data['network_id']) direct_vnic_type = self._validate_port_vnic_type( context, port_data, port_data['network_id'], projectpluginmap.NsxPlugins.NSX_T) with db_api.CONTEXT_WRITER.using(context): neutron_db = self.base_create_port(context, port) port["port"].update(neutron_db) self.fix_direct_vnic_port_sec(direct_vnic_type, port_data) (is_psec_on, has_ip, sgids, psgids) = ( self._create_port_preprocess_security(context, port, port_data, neutron_db, is_ens_tz_port)) self._process_portbindings_create_and_update( context, port['port'], port_data, vif_type=self._vif_type_by_vnic_type(direct_vnic_type)) self._process_port_create_extra_dhcp_opts( context, port_data, port_data.get(ext_edo.EXTRADHCPOPTS)) # handle adding security groups to port self._process_port_create_security_group( context, port_data, sgids) self._process_port_create_provider_security_group( context, port_data, psgids) # add provider groups to other security groups list. # sgids is a set() so we need to | it in. if psgids: sgids = list(set(sgids) | set(psgids)) # Handle port mac learning if validators.is_attr_set(port_data.get(mac_ext.MAC_LEARNING)): # Make sure mac_learning and port sec are not both enabled if port_data.get(mac_ext.MAC_LEARNING) and is_psec_on: msg = _('Mac learning requires that port security be ' 'disabled') LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) if (is_ens_tz_port and not self._ens_psec_supported() and not port_data.get(mac_ext.MAC_LEARNING)): msg = _('Cannot disable Mac learning for ENS TZ') LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) # save the mac learning value in the DB self._create_mac_learning_state(context, port_data) elif mac_ext.MAC_LEARNING in port_data: # This is due to the fact that the default is # ATTR_NOT_SPECIFIED port_data.pop(mac_ext.MAC_LEARNING) # For a ENZ TZ mac learning is always enabled if (is_ens_tz_port and not self._ens_psec_supported() and mac_ext.MAC_LEARNING not in port_data): # Set the default and add to the DB port_data[mac_ext.MAC_LEARNING] = True self._create_mac_learning_state(context, port_data) # Operations to backend should be done outside of DB transaction. # NOTE(arosen): ports on external networks are nat rules and do # not result in ports on the backend. if not is_external_net: try: lport = self._create_port_at_the_backend( context, port_data, l2gw_port_check, is_psec_on, is_ens_tz_port) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error('Failed to create port %(id)s on NSX ' 'backend. Exception: %(e)s', {'id': neutron_db['id'], 'e': e}) self._cleanup_port(context, neutron_db['id'], None) try: net_id = self._get_network_nsx_id( context, port_data['network_id']) nsx_db.add_neutron_nsx_port_mapping( context.session, neutron_db['id'], net_id, lport['id']) except Exception as e: with excutils.save_and_reraise_exception(): LOG.debug('Failed to update mapping %s on NSX ' 'backend. Reverting port creation. ' 'Exception: %s', neutron_db['id'], e) self._cleanup_port(context, neutron_db['id'], lport['id']) # this extra lookup is necessary to get the # latest db model for the extension functions port_model = self._get_port(context, port_data['id']) resource_extend.apply_funcs('ports', port_data, port_model) self._extend_nsx_port_dict_binding(context, port_data) self._remove_provider_security_groups_from_list(port_data) # Add Mac/IP binding to native DHCP server and neutron DB. if cfg.CONF.nsx_v3.native_dhcp_metadata: try: self._add_port_mp_dhcp_binding(context, port_data) except nsx_lib_exc.ManagerError: # Rollback create port self.delete_port(context, port_data['id'], force_delete_dhcp=True) msg = _('Unable to create port. Please contact admin') LOG.exception(msg) raise nsx_exc.NsxPluginException(err_msg=msg) if not cfg.CONF.nsx_v3.native_dhcp_metadata: nsx_rpc.handle_port_metadata_access(self, context, neutron_db) kwargs = {'context': context, 'port': neutron_db} registry.notify(resources.PORT, events.AFTER_CREATE, self, **kwargs) return port_data def _pre_delete_port_check(self, context, port_id, l2gw_port_check): """Perform checks prior to deleting a port.""" try: # Send delete port notification to any interested service plugin registry.publish( resources.PORT, events.BEFORE_DELETE, self, payload=events.DBEventPayload( context, resource_id=port_id, metadata={'port_check': l2gw_port_check})) except callback_exc.CallbackFailure as e: if len(e.errors) == 1: raise e.errors[0].error raise n_exc.ServicePortInUse(port_id=port_id, reason=e) def delete_port(self, context, port_id, l3_port_check=True, l2gw_port_check=True, force_delete_dhcp=False, force_delete_vpn=False): # if needed, check to see if this is a port owned by # a l2 gateway. If so, we should prevent deletion here self._pre_delete_port_check(context, port_id, l2gw_port_check) # if needed, check to see if this is a port owned by # a l3 router. If so, we should prevent deletion here if l3_port_check: self.prevent_l3_port_deletion(context, port_id) port = self.get_port(context, port_id) # Prevent DHCP port deletion if native support is enabled if (cfg.CONF.nsx_v3.native_dhcp_metadata and not force_delete_dhcp and port['device_owner'] in [const.DEVICE_OWNER_DHCP]): msg = (_('Can not delete DHCP port %s') % port['id']) raise n_exc.BadRequest(resource='port', msg=msg) if not force_delete_vpn: self._assert_on_vpn_port_change(port) if not self._network_is_external(context, port['network_id']): _net_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, port_id) self.nsxlib.logical_port.delete(nsx_port_id) self.disassociate_floatingips(context, port_id) # Remove Mac/IP binding from native DHCP server and neutron DB. if cfg.CONF.nsx_v3.native_dhcp_metadata: self._delete_port_mp_dhcp_binding(context, port) else: nsx_rpc.handle_port_metadata_access(self, context, port, is_delete=True) super(NsxV3Plugin, self).delete_port(context, port_id) def _get_resource_type_for_device_id(self, device_owner, device_id): if device_owner in const.ROUTER_INTERFACE_OWNERS: return 'os-router-uuid' elif device_owner.startswith(const.DEVICE_OWNER_COMPUTE_PREFIX): return 'os-instance-uuid' def _update_port_on_backend(self, context, lport_id, original_port, updated_port, address_bindings, switch_profile_ids, is_ens_tz_port): original_device_owner = original_port.get('device_owner') original_device_id = original_port.get('device_id') updated_device_owner = updated_port.get('device_owner') updated_device_id = updated_port.get('device_id') tags_update = [] if original_device_id != updated_device_id: # Determine if we need to update or drop the tag. If the # updated_device_id exists then the tag will be updated. This # is done using the updated port. If the updated_device_id does # not exist then we need to get the original resource type # from original_device_owner. This enables us to drop the tag. if updated_device_id: resource_type = self._get_resource_type_for_device_id( updated_device_owner, updated_device_id) else: resource_type = self._get_resource_type_for_device_id( original_device_owner, updated_device_id) if resource_type: tags_update = nsxlib_utils.add_v3_tag( tags_update, resource_type, updated_device_id) if updated_device_owner in (original_device_owner, l3_db.DEVICE_OWNER_ROUTER_INTF, nsxlib_consts.BRIDGE_ENDPOINT): # no attachment change attachment_type = False vif_uuid = False elif updated_device_owner: # default attachment attachment_type = nsxlib_consts.ATTACHMENT_VIF vif_uuid = updated_port['id'] else: # no attachment attachment_type = None vif_uuid = None name = self._build_port_name(context, updated_port) # Update exclude list if necessary updated_ps = updated_port.get('port_security_enabled') updated_excluded = self._is_excluded_port(updated_device_owner, updated_ps) original_ps = original_port.get('port_security_enabled') original_excluded = self._is_excluded_port(original_device_owner, original_ps) if updated_excluded != original_excluded: if updated_excluded: tags_update.append({'scope': security.PORT_SG_SCOPE, 'tag': nsxlib_consts.EXCLUDE_PORT}) else: tags_update.append({'scope': security.PORT_SG_SCOPE, 'tag': None}) tags_update += self.nsxlib.ns_group.get_lport_tags( updated_port.get(ext_sg.SECURITYGROUPS, []) + updated_port.get(provider_sg.PROVIDER_SECURITYGROUPS, [])) # Only set the default section tag if there is no port security if not updated_excluded: tags_update.append({'scope': security.PORT_SG_SCOPE, 'tag': NSX_V3_DEFAULT_SECTION}) else: # Ensure that the 'exclude' tag is set tags_update.append({'scope': security.PORT_SG_SCOPE, 'tag': nsxlib_consts.EXCLUDE_PORT}) # Add availability zone profiles first (so that specific profiles will # override them) port_az = self.get_network_az_by_net_id(context, updated_port['network_id']) if port_az.switching_profiles_objs: switch_profile_ids = (port_az.switching_profiles_objs + switch_profile_ids) # Update the DHCP profile if (updated_device_owner == const.DEVICE_OWNER_DHCP and (not is_ens_tz_port or self._ens_psec_supported()) and not cfg.CONF.nsx_v3.native_dhcp_metadata): switch_profile_ids.append(self._dhcp_profile) # Update QoS switch profile qos_policy_id, qos_profile_id = self._get_port_qos_ids( context, original_port, updated_port) if qos_profile_id is not None: switch_profile_ids.append(qos_profile_id) psec_is_on = self._get_port_security_profile_id() in switch_profile_ids address_pairs = updated_port.get(addr_apidef.ADDRESS_PAIRS) force_mac_learning = ( validators.is_attr_set(address_pairs) and address_pairs and psec_is_on) port_mac_learning = updated_port.get(mac_ext.MAC_LEARNING) is True # Add mac_learning profile if it exists and is configured if ((not is_ens_tz_port or self._ens_psec_supported()) and self._mac_learning_profile): if force_mac_learning or port_mac_learning: switch_profile_ids.append(self._mac_learning_profile) if is_ens_tz_port: if self._no_switch_security_ens not in switch_profile_ids: switch_profile_ids.append(self._no_switch_security_ens) else: if self._no_switch_security not in switch_profile_ids: switch_profile_ids.append(self._no_switch_security) else: switch_profile_ids.append(self._mac_learning_disabled_profile) try: self.nsxlib.logical_port.update( lport_id, vif_uuid, name=name, attachment_type=attachment_type, admin_state=updated_port.get('admin_state_up'), address_bindings=address_bindings, switch_profile_ids=switch_profile_ids, tags_update=tags_update, description=updated_port.get('description')) except nsx_lib_exc.ManagerError as inst: # we may fail if the QoS is not supported for this port # (for example - transport zone with KVM) LOG.exception("Unable to update port on the backend: %s", inst) msg = _("Unable to update port on the backend") raise nsx_exc.NsxPluginException(err_msg=msg) # Attach/Detach the QoS policies to the port in the neutron DB qos_com_utils.update_port_policy_binding(context, updated_port['id'], qos_policy_id) def _get_port_qos_ids(self, context, original_port, updated_port): qos_policy_id = self._get_port_qos_policy_id( context, original_port, updated_port) profile_id = None if qos_policy_id is not None: profile_id = self._get_qos_profile_id(context, qos_policy_id) return qos_policy_id, profile_id def update_port(self, context, id, port): with db_api.CONTEXT_WRITER.using(context): # get the original port, and keep it honest as it is later used # for notifications original_port = super(NsxV3Plugin, self).get_port(context, id) self._extend_get_port_dict_qos_and_binding(context, original_port) self._remove_provider_security_groups_from_list(original_port) port_data = port['port'] validate_port_sec = self._should_validate_port_sec_on_update_port( port_data) nsx_lswitch_id, nsx_lport_id = nsx_db.get_nsx_switch_and_port_id( context.session, id) # Validate the changes self._validate_update_port(context, id, original_port, port_data) self._assert_on_dhcp_relay_without_router(context, port_data, original_port) is_ens_tz_port = self._is_ens_tz_port(context, original_port) direct_vnic_type = self._validate_port_vnic_type( context, port_data, original_port['network_id']) # Update the neutron port updated_port = super(NsxV3Plugin, self).update_port(context, id, port) self._extension_manager.process_update_port(context, port_data, updated_port) # copy values over - except fixed_ips as # they've already been processed port_data.pop('fixed_ips', None) updated_port.update(port_data) updated_port = self._update_port_preprocess_security( context, port, id, updated_port, is_ens_tz_port, validate_port_sec=validate_port_sec, direct_vnic_type=direct_vnic_type) self._update_extra_dhcp_opts_on_port(context, id, port, updated_port) sec_grp_updated = self.update_security_group_on_port( context, id, port, original_port, updated_port) self._process_port_update_provider_security_group( context, port, original_port, updated_port) (port_security, has_ip) = self._determine_port_security_and_has_ip( context, updated_port) self._process_portbindings_create_and_update( context, port_data, updated_port, vif_type=self._vif_type_by_vnic_type(direct_vnic_type)) self._extend_nsx_port_dict_binding(context, updated_port) mac_learning_state = updated_port.get(mac_ext.MAC_LEARNING) if mac_learning_state is not None: if (not mac_learning_state and is_ens_tz_port and not self._ens_psec_supported()): msg = _('Mac learning cannot be disabled with ENS TZ') LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) if port_security and mac_learning_state: msg = _('Mac learning requires that port security be ' 'disabled') LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) self._update_mac_learning_state(context, id, mac_learning_state) self._remove_provider_security_groups_from_list(updated_port) address_bindings = self._build_address_bindings(updated_port) if port_security and address_bindings: switch_profile_ids = [self._get_port_security_profile_id()] else: switch_profile_ids = [self._no_psec_profile_id] if is_ens_tz_port: switch_profile_ids.append(self._no_switch_security_ens) else: switch_profile_ids.append(self._no_switch_security) address_bindings = [] # update the port in the backend, only if it exists in the DB # (i.e not external net) if nsx_lport_id is not None: try: self._update_port_on_backend(context, nsx_lport_id, original_port, updated_port, address_bindings, switch_profile_ids, is_ens_tz_port) except (nsx_lib_exc.ManagerError, nsx_lib_exc.SecurityGroupMaximumCapacityReached) as e: # In case if there is a failure on NSX-v3 backend, rollback the # previous update operation on neutron side. LOG.exception("Unable to update NSX backend, rolling back " "changes on neutron") with excutils.save_and_reraise_exception(reraise=False): with db_api.CONTEXT_WRITER.using(context): self._revert_neutron_port_update( context, id, original_port, updated_port, port_security, sec_grp_updated) # NOTE(arosen): this is to translate between nsxlib # exceptions and the plugin exceptions. This should be # later refactored. if (e.__class__ is nsx_lib_exc.SecurityGroupMaximumCapacityReached): raise nsx_exc.SecurityGroupMaximumCapacityReached( err_msg=e.msg) else: raise e # Update DHCP bindings. if cfg.CONF.nsx_v3.native_dhcp_metadata: self._update_port_mp_dhcp_binding( context, original_port, updated_port) # Make sure the port revision is updated if 'revision_number' in updated_port: port_model = self._get_port(context, id) updated_port['revision_number'] = port_model.revision_number # Notifications must be sent after the above transaction is complete kwargs = { 'context': context, 'port': updated_port, 'mac_address_updated': False, 'original_port': original_port, } registry.notify(resources.PORT, events.AFTER_UPDATE, self, **kwargs) return updated_port def _extend_get_port_dict_qos_and_binding(self, context, port): # Not using the register api for this because we need the context self._extend_nsx_port_dict_binding(context, port) self._extend_qos_port_dict_binding(context, port) def get_port(self, context, id, fields=None): port = super(NsxV3Plugin, self).get_port(context, id, fields=None) self._extend_get_port_dict_qos_and_binding(context, port) self._remove_provider_security_groups_from_list(port) return db_utils.resource_fields(port, fields) def get_ports(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): filters = filters or {} self._update_filters_with_sec_group(context, filters) with db_api.CONTEXT_READER.using(context): ports = ( super(NsxV3Plugin, self).get_ports( context, filters, fields, sorts, limit, marker, page_reverse)) self._log_get_ports(ports, filters) # Add port extensions for port in ports[:]: self._extend_get_port_dict_qos_and_binding(context, port) self._remove_provider_security_groups_from_list(port) return (ports if not fields else [db_utils.resource_fields(port, fields) for port in ports]) def _validate_router_tz(self, context, tier0_uuid, subnets): # make sure the related GW (Tier0 router) belongs to the same TZ # as the subnets attached to the Tier1 router if not subnets: return tier0_tzs = self.nsxlib.router.get_tier0_router_tz(tier0_uuid) if not tier0_tzs: return for sub in subnets: tz_uuid = self._get_net_tz(context, sub['network_id']) if tz_uuid not in tier0_tzs: msg = (_("Tier0 router %(rtr)s transport zone should match " "transport zone %(tz)s of the network %(net)s") % { 'rtr': tier0_uuid, 'tz': tz_uuid, 'net': sub['network_id']}) raise n_exc.InvalidInput(error_message=msg) def verify_sr_at_backend(self, context, router_id): nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) return self.nsxlib.router.has_service_router(nsx_router_id) def service_router_has_services(self, context, router_id): router = self._get_router(context, router_id) snat_exist = router.enable_snat lb_exist = self.service_router_has_loadbalancers(context, router_id) fw_exist = self._router_has_edge_fw_rules(context, router) if snat_exist or lb_exist or fw_exist: return True return snat_exist or lb_exist or fw_exist def service_router_has_loadbalancers(self, context, router_id): nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) return nsx_db.has_nsx_lbaas_loadbalancer_binding_by_router( context.session, nsx_router_id) def create_service_router(self, context, router_id, router=None, update_firewall=True): """Create a service router and enable standby relocation""" if not router: router = self._get_router(context, router_id) tier0_uuid = self._get_tier0_uuid_by_router(context, router) if not tier0_uuid: err_msg = (_("Cannot create service router for %s without a " "gateway") % router_id) raise n_exc.InvalidInput(error_message=err_msg) edge_cluster_uuid = self._get_edge_cluster(tier0_uuid, router) nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) self.nsxlib.logical_router.update( nsx_router_id, edge_cluster_id=edge_cluster_uuid, enable_standby_relocation=True) LOG.info("Created service router for %s (NSX logical router %s)", router_id, nsx_router_id) # update firewall rules (there might be FW group waiting for a # service router) if update_firewall: self.update_router_firewall(context, router_id) def delete_service_router(self, context, router_id): nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) self.nsxlib.router.change_edge_firewall_status( nsx_router_id, nsxlib_consts.FW_DISABLE) self.nsxlib.logical_router.update( nsx_router_id, edge_cluster_id=None, enable_standby_relocation=False) LOG.info("Deleted service router for %s (NSX logical router %s)", router_id, nsx_router_id) def _update_router_gw_info(self, context, router_id, info): router = self._get_router(context, router_id) org_tier0_uuid = self._get_tier0_uuid_by_router(context, router) org_enable_snat = router.enable_snat orgaddr, orgmask, _orgnexthop = ( self._get_external_attachment_info( context, router)) router_subnets = self._load_router_subnet_cidrs_from_db( context.elevated(), router_id) self._validate_router_gw_and_tz(context, router_id, info, org_enable_snat, router_subnets) # Interface subnets cannot overlap with the GW external subnet if info and info.get('network_id'): self._validate_gw_overlap_interfaces( context, info['network_id'], [sub['network_id'] for sub in router_subnets]) # TODO(berlin): For nonat use case, we actually don't need a gw port # which consumes one external ip. But after looking at the DB logic # and we need to make a big change so don't touch it at present. super(NsxV3Plugin, self)._update_router_gw_info( context, router_id, info, router=router) new_tier0_uuid = self._get_tier0_uuid_by_router(context, router) new_enable_snat = router.enable_snat newaddr, newmask, _newnexthop = ( self._get_external_attachment_info( context, router)) nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) lb_exist = nsx_db.has_nsx_lbaas_loadbalancer_binding_by_router( context.session, nsx_router_id) fw_exist = self._router_has_edge_fw_rules(context, router) tier1_services_exist = lb_exist or fw_exist sr_currently_exists = self.verify_sr_at_backend(context, router_id) actions = self._get_update_router_gw_actions( org_tier0_uuid, orgaddr, org_enable_snat, new_tier0_uuid, newaddr, new_enable_snat, tier1_services_exist, sr_currently_exists) if actions['add_service_router']: self.create_service_router(context, router_id, router=router) if actions['revocate_bgp_announce']: # TODO(berlin): revocate bgp announce on org tier0 router pass if actions['remove_snat_rules']: self.nsxlib.router.delete_gw_snat_rules(nsx_router_id, orgaddr) if actions['remove_no_dnat_rules']: for subnet in router_subnets: self._del_subnet_no_dnat_rule(context, nsx_router_id, subnet) if actions['remove_router_link_port']: # remove the link port and reset the router transport zone self.nsxlib.router.remove_router_link_port(nsx_router_id) self.nsxlib.router.update_router_transport_zone( nsx_router_id, None) if actions['add_router_link_port']: # Add the overlay transport zone to the router config tz_uuid = self.nsxlib.router.get_tier0_router_overlay_tz( new_tier0_uuid) if tz_uuid: self.nsxlib.router.update_router_transport_zone( nsx_router_id, tz_uuid) tags = self.nsxlib.build_v3_tags_payload( router, resource_type='os-neutron-rport', project_name=context.tenant_name) self.nsxlib.router.add_router_link_port(nsx_router_id, new_tier0_uuid, tags=tags) if actions['add_snat_rules']: # Add SNAT rules for all the subnets which are in different scope # than the gw gw_address_scope = self._get_network_address_scope( context, router.gw_port.network_id) for subnet in router_subnets: self._add_subnet_snat_rule(context, router_id, nsx_router_id, subnet, gw_address_scope, newaddr) if actions['add_no_dnat_rules']: for subnet in router_subnets: self._add_subnet_no_dnat_rule(context, nsx_router_id, subnet) if actions['bgp_announce']: # TODO(berlin): bgp announce on new tier0 router pass self.nsxlib.router.update_advertisement( nsx_router_id, actions['advertise_route_nat_flag'], actions['advertise_route_connected_flag']) if actions['remove_service_router']: self.delete_service_router(context, router_id) def _add_subnet_snat_rule(self, context, router_id, nsx_router_id, subnet, gw_address_scope, gw_ip): if not self._need_router_snat_rules(context, router_id, subnet, gw_address_scope): return self.nsxlib.router.add_gw_snat_rule(nsx_router_id, gw_ip, source_net=subnet['cidr'], bypass_firewall=False) def _add_subnet_no_dnat_rule(self, context, nsx_router_id, subnet): if not self._need_router_no_dnat_rules(subnet): return # Add NO-DNAT rule to allow internal traffic between VMs, even if # they have floating ips (Only for routers with snat enabled) self.nsxlib.logical_router.add_nat_rule( nsx_router_id, "NO_DNAT", None, dest_net=subnet['cidr'], rule_priority=nsxlib_router.GW_NAT_PRI) def _del_subnet_no_dnat_rule(self, context, nsx_router_id, subnet): # Delete the previously created NO-DNAT rules self.nsxlib.logical_router.delete_nat_rule_by_values( nsx_router_id, action="NO_DNAT", match_destination_network=subnet['cidr']) def validate_router_dhcp_relay(self, context): """Fail router creation dhcp relay is configured without IPAM""" if (self._availability_zones_data.dhcp_relay_configured() and cfg.CONF.ipam_driver == 'internal'): err_msg = _("Neutron is configured with DHCP_Relay but no IPAM " "plugin configured") LOG.warning(err_msg) raise n_exc.InvalidInput(error_message=err_msg) def create_router(self, context, router): r = router['router'] self.validate_router_dhcp_relay(context) # validate the availability zone self._validate_obj_az_on_creation(context, r, 'router') gw_info = self._extract_external_gw(context, router, is_extract=True) r['id'] = (r.get('id') or uuidutils.generate_uuid()) tags = self.nsxlib.build_v3_tags_payload( r, resource_type='os-neutron-router-id', project_name=context.tenant_name) router = super(NsxV3Plugin, self).create_router(context, router) self._add_az_to_router(context, router['id'], r) router_db = self._get_router(context, r['id']) with db_api.CONTEXT_WRITER.using(context): self._process_extra_attr_router_create(context, router_db, r) # Create backend entries here in case neutron DB exception # occurred during super.create_router(), which will cause # API retry and leaves dangling backend entries. try: result = self.nsxlib.logical_router.create( display_name=utils.get_name_and_uuid( router['name'] or 'router', router['id']), description=router.get('description'), tags=tags) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error("Unable to create logical router for " "neutron router %s", router['id']) self.delete_router(context, router['id']) try: nsx_db.add_neutron_nsx_router_mapping( context.session, router['id'], result['id']) except db_exc.DBError: with excutils.save_and_reraise_exception(): LOG.error("Unable to create router mapping for " "router %s", router['id']) self.delete_router(context, router['id']) if gw_info and gw_info != const.ATTR_NOT_SPECIFIED: try: self._update_router_gw_info(context, router['id'], gw_info) except (db_exc.DBError, nsx_lib_exc.ManagerError): with excutils.save_and_reraise_exception(): LOG.error("Failed to set gateway info for router " "being created: %s - removing router", router['id']) self.delete_router(context, router['id']) LOG.info("Create router failed while setting external " "gateway. Router:%s has been removed from " "DB and backend", router['id']) return self.get_router(context, router['id']) def delete_router(self, context, router_id): if not cfg.CONF.nsx_v3.native_dhcp_metadata: nsx_rpc.handle_router_metadata_access(self, context, router_id, interface=None) gw_info = self._get_router_gw_info(context, router_id) if gw_info: self._update_router_gw_info(context, router_id, {}) nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) ret_val = super(NsxV3Plugin, self).delete_router(context, router_id) # if delete was called due to create error, there might not be a # backend id if not nsx_router_id: return ret_val # Remove logical router from the NSX backend # It is safe to do now as db-level checks for resource deletion were # passed (and indeed the resource was removed from the Neutron DB try: self.nsxlib.logical_router.delete(nsx_router_id, force=True) except nsx_lib_exc.ResourceNotFound: # If the logical router was not found on the backend do not worry # about it. The conditions has already been logged, so there is no # need to do further logging pass except nsx_lib_exc.ManagerError: # if there is a failure in deleting the router do not fail the # operation, especially since the router object has already been # removed from the neutron DB. Take corrective steps to ensure the # resulting zombie object does not forward any traffic and is # eventually removed. LOG.warning("Backend router deletion for neutron router %s " "failed. The object was however removed from the " "Neutron database", router_id) return ret_val @nsx_plugin_common.api_replay_mode_wrapper def update_router(self, context, router_id, router): gw_info = self._extract_external_gw(context, router, is_extract=False) router_data = router['router'] self._assert_on_router_admin_state(router_data) if validators.is_attr_set(gw_info): self._validate_update_router_gw(context, router_id, gw_info) router_ports = self._get_router_interfaces(context, router_id) for port in router_ports: # if setting this router as no-snat, make sure gw address scope # match those of the subnets if not gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default): for fip in port['fixed_ips']: self._validate_address_scope_for_router_interface( context.elevated(), router_id, gw_info['network_id'], fip['subnet_id']) # If the network attached to a router is a VLAN backed network # then it must be attached to an edge cluster if (not gw_info and not self._is_overlay_network(context, port['network_id'])): msg = _("A router attached to a VLAN backed network " "must have an external network assigned") raise n_exc.InvalidInput(error_message=msg) # VPNaaS need to be notified on router GW changes (there is # currently no matching upstream registration for this) vpn_plugin = directory.get_plugin(plugin_const.VPN) if vpn_plugin: vpn_driver = vpn_plugin.drivers[vpn_plugin.default_provider] vpn_driver.validate_router_gw_info(context, router_id, gw_info) nsx_router_id = None routes_added = [] routes_removed = [] try: if 'routes' in router_data: routes_added, routes_removed = self._get_static_routes_diff( context, router_id, gw_info, router_data) nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) for route in routes_removed: self.nsxlib.router.delete_static_routes(nsx_router_id, route) for route in routes_added: self.nsxlib.router.add_static_routes(nsx_router_id, route) if 'name' in router_data: # Update the name of logical router. router_name = router_data['name'] or 'router' display_name = utils.get_name_and_uuid(router_name, router_id) nsx_router_id = nsx_router_id or nsx_db.get_nsx_router_id( context.session, router_id) self.nsxlib.logical_router.update(nsx_router_id, display_name=display_name) # Update the name of associated logical ports. filters = {'device_id': [router_id], 'device_owner': const.ROUTER_INTERFACE_OWNERS} ports = self.get_ports(context, filters=filters) for port in ports: nsx_s_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, port['id']) if nsx_port_id: name = utils.get_name_and_uuid( router_name, port['id'], tag='port') try: self.nsxlib.logical_port.update(nsx_port_id, None, name=name) except Exception as e: LOG.error("Unable to update port %(port_id)s. " "Reason: %(e)s", {'port_id': nsx_port_id, 'e': e}) if 'description' in router_data: nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) self.nsxlib.logical_router.update( nsx_router_id, description=router_data['description']) return super(NsxV3Plugin, self).update_router( context, router_id, router) except nsx_lib_exc.ResourceNotFound: with db_api.CONTEXT_WRITER.using(context): router_db = self._get_router(context, router_id) router_db['status'] = const.NET_STATUS_ERROR raise nsx_exc.NsxPluginException( err_msg=(_("logical router %s not found at the backend") % router_id)) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): router_db = self._get_router(context, router_id) curr_status = router_db['status'] router_db['status'] = const.NET_STATUS_ERROR if nsx_router_id: for route in routes_added: self.nsxlib.router.delete_static_routes( nsx_router_id, route) for route in routes_removed: self.nsxlib.router.add_static_routes(nsx_router_id, route) router_db['status'] = curr_status def _get_nsx_router_and_fw_section(self, context, router_id): # find the backend router id in the DB nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) if nsx_router_id is None: msg = _("Didn't find nsx router for router %s") % router_id LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) # get the FW section id of the backend router try: section_id = self.nsxlib.logical_router.get_firewall_section_id( nsx_router_id) except Exception as e: msg = (_("Failed to find router firewall section for router " "%(id)s: %(e)s") % {'id': router_id, 'e': e}) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) if section_id is None: msg = (_("Failed to find router firewall section for router " "%(id)s.") % {'id': router_id}) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) return nsx_router_id, section_id def update_router_firewall(self, context, router_id, from_fw=False): """Rewrite all the rules in the router edge firewall This method should be called on FWaaS v2 updates, and on router interfaces changes. When FWaaS is disabled, there is no need to update the NSX router FW, as the default rule is allow-all. """ if (self.fwaas_callbacks and self.fwaas_callbacks.fwaas_enabled): # find all the relevant ports of the router for FWaaS v2 # TODO(asarfaty): Add vm ports as well ports = self._get_router_interfaces(context, router_id) nsx_router_id, section_id = self._get_nsx_router_and_fw_section( context, router_id) # let the fwaas callbacks update the router FW return self.fwaas_callbacks.update_router_firewall( context, self.nsxlib, router_id, ports, nsx_router_id, section_id, from_fw=from_fw) def _get_port_relay_servers(self, context, port_id, network_id=None): if not network_id: port = self.get_port(context, port_id) network_id = port['network_id'] net_az = self.get_network_az_by_net_id(context, network_id) return net_az.dhcp_relay_servers def _get_port_relay_services(self): # DHCP services: UDP 67, 68, 2535 #TODO(asarfaty): use configurable ports service1 = self.nsxlib.firewall_section.get_nsservice( nsxlib_consts.L4_PORT_SET_NSSERVICE, l4_protocol=nsxlib_consts.UDP, destination_ports=['67-68']) service2 = self.nsxlib.firewall_section.get_nsservice( nsxlib_consts.L4_PORT_SET_NSSERVICE, l4_protocol=nsxlib_consts.UDP, destination_ports=['2535']) return [service1, service2] def get_extra_fw_rules(self, context, router_id, port_id=None): """Return firewall rules that should be added to the router firewall This method should return a list of allow firewall rules that are required in order to enable different plugin features with north/south traffic. The returned rules will be added after the FWaaS rules, and before the default drop rule. if port_id is specified, only rules relevant for this router interface port should be returned, and the rules should be ingress/egress (but not both) and include the source/dest nsx logical port. """ # TODO(asarfaty) support only cases with port_id, as FWaaS v1 is no # longer supported extra_rules = [] # DHCP relay rules: # get the list of relevant relay servers elv_ctx = context.elevated() if port_id: relay_servers = self._get_port_relay_servers(elv_ctx, port_id) else: relay_servers = [] filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], 'device_id': [router_id]} ports = self.get_ports(elv_ctx, filters=filters) for port in ports: port_relay_servers = self._get_port_relay_servers( elv_ctx, port['id'], network_id=port['network_id']) if port_relay_servers: relay_servers.extend(port_relay_servers) # Add rules to allow dhcp traffic relay servers if relay_servers: # if it is a single port, the source/dest is this logical switch if port_id: nsx_ls_id, _nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, port_id) port_target = [{'target_type': 'LogicalSwitch', 'target_id': nsx_ls_id}] else: port_target = None # translate the relay server ips to the firewall format relay_target = [] if self.fwaas_callbacks: relay_target = (self.fwaas_callbacks.fwaas_driver. translate_addresses_to_target(set(relay_servers), self.plugin_type())) dhcp_services = self._get_port_relay_services() # ingress rule extra_rules.append({ 'display_name': "DHCP Relay ingress traffic", 'action': nsxlib_consts.FW_ACTION_ALLOW, 'sources': relay_target, 'destinations': port_target, 'services': dhcp_services, 'direction': 'IN'}) # egress rule extra_rules.append({ 'display_name': "DHCP Relay egress traffic", 'action': nsxlib_consts.FW_ACTION_ALLOW, 'destinations': relay_target, 'sources': port_target, 'services': dhcp_services, 'direction': 'OUT'}) # VPN rules: vpn_plugin = directory.get_plugin(plugin_const.VPN) if vpn_plugin: vpn_driver = vpn_plugin.drivers[vpn_plugin.default_provider] vpn_rules = ( vpn_driver._generate_ipsecvpn_firewall_rules( self.plugin_type(), context, router_id=router_id)) if vpn_rules: extra_rules.extend(vpn_rules) return extra_rules def _get_ports_and_address_groups(self, context, router_id, network_id, exclude_sub_ids=None): exclude_sub_ids = [] if not exclude_sub_ids else exclude_sub_ids address_groups = [] network_ports = self._get_router_interface_ports_by_network( context, router_id, network_id) ports = [] for port in network_ports: if port['fixed_ips']: add_port = False for fip in port['fixed_ips']: if fip['subnet_id'] not in exclude_sub_ids: add_port = True if add_port: ports.append(port) for port in ports: for fip in port['fixed_ips']: address_group = {} gateway_ip = fip['ip_address'] subnet = self.get_subnet(context, fip['subnet_id']) prefixlen = str(netaddr.IPNetwork(subnet['cidr']).prefixlen) address_group['ip_addresses'] = [gateway_ip] address_group['prefix_length'] = prefixlen address_groups.append(address_group) return (ports, address_groups) @nsx_plugin_common.api_replay_mode_wrapper def add_router_interface(self, context, router_id, interface_info): # In case on dual stack, neutron creates a separate interface per # IP version subnet = self._get_interface_subnet(context, interface_info) network_id = self._get_interface_network_id(context, interface_info, subnet=subnet) extern_net = self._network_is_external(context, network_id) overlay_net = self._is_overlay_network(context, network_id) router_db = self._get_router(context, router_id) gw_network_id = (router_db.gw_port.network_id if router_db.gw_port else None) with locking.LockManager.get_lock(str(network_id)): # disallow more than one subnets belong to same network being # attached to routers self._validate_multiple_subnets_routers( context, router_id, network_id, subnet) # A router interface cannot be an external network if extern_net: msg = _("An external network cannot be attached as " "an interface to a router") raise n_exc.InvalidInput(error_message=msg) # Non overlay networks should be configured with a centralized # router, which is allowed only if GW network is attached if not overlay_net and not gw_network_id: msg = _("A router attached to a VLAN backed network " "must have an external network assigned") raise n_exc.InvalidInput(error_message=msg) # Interface subnets cannot overlap with the GW external subnet self._validate_gw_overlap_interfaces(context, gw_network_id, [network_id]) # Update the interface of the neutron router info = super(NsxV3Plugin, self).add_router_interface( context, router_id, interface_info) try: nsx_net_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, info['port_id']) # If it is a no-snat router, interface address scope must be the # same as the gateways self._validate_interface_address_scope(context, router_db, subnet) nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) _ports, address_groups = self._get_ports_and_address_groups( context, router_id, network_id) display_name = utils.get_name_and_uuid( subnet['name'] or 'subnet', subnet['id']) tags = self.nsxlib.build_v3_tags_payload( {'id': info['port_id'], 'project_id': context.project_id}, resource_type='os-neutron-rport-id', project_name=context.tenant_name) tags.append({'scope': 'os-subnet-id', 'tag': subnet['id']}) # Add the dhcp relay service to the NSX interface relay_service = None if subnet['enable_dhcp']: net_az = self.get_network_az_by_net_id(context, network_id) relay_service = net_az.dhcp_relay_service resource_type = (None if overlay_net else nsxlib_consts.LROUTERPORT_CENTRALIZED) # Validate the TZ of the new subnet match the one of the router tier0_uuid = self._get_tier0_uuid_by_router(context.elevated(), router_db) self._validate_router_tz(context.elevated(), tier0_uuid, [subnet]) # create the interface ports on the NSX self.nsxlib.router.create_logical_router_intf_port_by_ls_id( logical_router_id=nsx_router_id, display_name=display_name, tags=tags, ls_id=nsx_net_id, logical_switch_port_id=nsx_port_id, address_groups=address_groups, relay_service_uuid=relay_service, resource_type=resource_type) if router_db.gw_port and not router_db.enable_snat: # TODO(berlin): Announce the subnet on tier0 if enable_snat # is False pass if not cfg.CONF.nsx_v3.native_dhcp_metadata: # Ensure the NSX logical router has a connection to a # 'metadata access' network (with a proxy listening on # its DHCP port), by creating it if needed. nsx_rpc.handle_router_metadata_access(self, context, router_id, interface=info) # add the SNAT/NO_DNAT rules for this interface if router_db.enable_snat and gw_network_id: if router_db.gw_port.get('fixed_ips'): gw_address_scope = self._get_network_address_scope( context, gw_network_id) for fip in router_db.gw_port['fixed_ips']: gw_ip = fip['ip_address'] self._add_subnet_snat_rule( context, router_id, nsx_router_id, subnet, gw_address_scope, gw_ip) self._add_subnet_no_dnat_rule(context, nsx_router_id, subnet) # update firewall rules self.update_router_firewall(context, router_id) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Neutron failed to add_router_interface on " "router %s, and would try to rollback.", router_id) try: self.remove_router_interface( context, router_id, interface_info) except Exception: # rollback also failed LOG.error("Neutron rollback failed to remove router " "interface on router %s.", router_id) return info def remove_router_interface(self, context, router_id, interface_info): self._validate_interface_info(interface_info, for_removal=True) # Get the interface port & subnet subnet = None subnet_id = None port_id = None network_id = None if 'port_id' in interface_info: port_id = interface_info['port_id'] # Find subnet_id which is needed for removing the SNAT rule port = self._get_port(context, port_id) network_id = port['network_id'] if port.get('fixed_ips'): for fip in port['fixed_ips']: subnet_id = fip['subnet_id'] self._confirm_router_interface_not_in_use( context, router_id, subnet_id) if not (port['device_owner'] in const.ROUTER_INTERFACE_OWNERS and port['device_id'] == router_id): raise l3_exc.RouterInterfaceNotFound( router_id=router_id, port_id=port_id) elif 'subnet_id' in interface_info: subnet_id = interface_info['subnet_id'] self._confirm_router_interface_not_in_use( context, router_id, subnet_id) subnet = self._get_subnet(context, subnet_id) network_id = subnet['network_id'] ports = self._get_router_interface_ports_by_network( context, router_id, network_id) for p in ports: fip_subnet_ids = [fixed_ip['subnet_id'] for fixed_ip in p['fixed_ips']] if subnet_id in fip_subnet_ids: port_id = p['id'] break else: raise l3_exc.RouterInterfaceNotFoundForSubnet( router_id=router_id, subnet_id=subnet_id) try: # TODO(berlin): Revocate announce the subnet on tier0 if # enable_snat is False router_db = self._get_router(context, router_id) if router_db.gw_port and not router_db.enable_snat: pass nsx_net_id, _nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, port_id) if not subnet: subnet = self._get_subnet(context, subnet_id) ports, address_groups = self._get_ports_and_address_groups( context, router_id, network_id, exclude_sub_ids=[subnet_id]) nsx_router_id = nsx_db.get_nsx_router_id( context.session, router_id) if len(ports) >= 1: new_using_port_id = ports[0]['id'] _net_id, new_nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, new_using_port_id) self.nsxlib.logical_router_port.update_by_lswitch_id( nsx_router_id, nsx_net_id, linked_logical_switch_port_id={ 'target_id': new_nsx_port_id}, subnets=address_groups) else: self.nsxlib.logical_router_port.delete_by_lswitch_id( nsx_net_id) # try to delete the SNAT/NO_DNAT rules of this subnet if router_db.gw_port and router_db.enable_snat: if router_db.gw_port.get('fixed_ips'): for fixed_ip in router_db.gw_port['fixed_ips']: gw_ip = fixed_ip['ip_address'] self.nsxlib.router.delete_gw_snat_rule_by_source( nsx_router_id, gw_ip, subnet['cidr'], skip_not_found=True) self._del_subnet_no_dnat_rule(context, nsx_router_id, subnet) except nsx_lib_exc.ResourceNotFound: LOG.error("router port on router %(router_id)s for net " "%(net_id)s not found at the backend", {'router_id': router_id, 'net_id': network_id}) # inform the FWaaS that interface port was removed if self.fwaas_callbacks: self.fwaas_callbacks.delete_port(context, port_id) info = super(NsxV3Plugin, self).remove_router_interface( context, router_id, interface_info) if not cfg.CONF.nsx_v3.native_dhcp_metadata: # Ensure the connection to the 'metadata access network' is removed # (with the network) if this is the last DHCP-disabled subnet on # the router. nsx_rpc.handle_router_metadata_access(self, context, router_id) # update firewall rules self.update_router_firewall(context, router_id) return info def _update_lb_vip(self, port, vip_address): # update the load balancer virtual server's VIP with # floating ip, but don't add NAT rules device_id = port['device_id'] if device_id.startswith(oct_const.DEVICE_ID_PREFIX): device_id = device_id[len(oct_const.DEVICE_ID_PREFIX):] lb_tag = [{'scope': 'os-lbaas-lb-id', 'tag': device_id}] vs_list = self.nsxlib.search_by_tags( tags=lb_tag, resource_type='LbVirtualServer') if vs_list['results']: vs_client = self.nsxlib.load_balancer.virtual_server for vs in vs_list['results']: vs_client.update_virtual_server_with_vip(vs['id'], vip_address) def create_floatingip(self, context, floatingip): # First do some validations fip_data = floatingip['floatingip'] port_id = fip_data.get('port_id') if port_id: port_data = self.get_port(context, port_id) self._assert_on_assoc_floatingip_to_special_ports( fip_data, port_data) # create the neutron fip new_fip = self._create_floating_ip_wrapper(context, floatingip) router_id = new_fip['router_id'] if not router_id: return new_fip if port_id: device_owner = port_data.get('device_owner') fip_address = new_fip['floating_ip_address'] if (device_owner == const.DEVICE_OWNER_LOADBALANCERV2 or device_owner == oct_const.DEVICE_OWNER_OCTAVIA or device_owner == lb_const.VMWARE_LB_VIP_OWNER): try: self._update_lb_vip(port_data, fip_address) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): super(NsxV3Plugin, self).delete_floatingip( context, new_fip['id']) return new_fip try: nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) self.nsxlib.router.add_fip_nat_rules( nsx_router_id, new_fip['floating_ip_address'], new_fip['fixed_ip_address'], bypass_firewall=False) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): self.delete_floatingip(context, new_fip['id']) return new_fip def delete_floatingip(self, context, fip_id): fip = self.get_floatingip(context, fip_id) router_id = fip['router_id'] port_id = fip['port_id'] is_lb_port = False if port_id: port_data = self.get_port(context, port_id) device_owner = port_data.get('device_owner') fixed_ip_address = fip['fixed_ip_address'] if (device_owner == const.DEVICE_OWNER_LOADBALANCERV2 or device_owner == oct_const.DEVICE_OWNER_OCTAVIA or device_owner == lb_const.VMWARE_LB_VIP_OWNER): # If the port is LB VIP port, after deleting the FIP, # update the virtual server VIP back to fixed IP. is_lb_port = True try: self._update_lb_vip(port_data, fixed_ip_address) except nsx_lib_exc.ManagerError as e: LOG.error("Exception when updating vip ip_address" "on vip_port %(port)s: %(err)s", {'port': port_id, 'err': e}) if router_id and not is_lb_port: try: nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) self.nsxlib.router.delete_fip_nat_rules( nsx_router_id, fip['floating_ip_address'], fip['fixed_ip_address']) except nsx_lib_exc.ResourceNotFound: LOG.warning("Backend NAT rules for fip: %(fip_id)s " "(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) " "not found", {'fip_id': fip_id, 'ext_ip': fip['floating_ip_address'], 'int_ip': fip['fixed_ip_address']}) super(NsxV3Plugin, self).delete_floatingip(context, fip_id) def update_floatingip(self, context, fip_id, floatingip): fip_data = floatingip['floatingip'] old_fip = self.get_floatingip(context, fip_id) old_port_id = old_fip['port_id'] new_status = (const.FLOATINGIP_STATUS_ACTIVE if fip_data.get('port_id') else const.FLOATINGIP_STATUS_DOWN) updated_port_id = fip_data.get('port_id') if updated_port_id: updated_port_data = self.get_port(context, updated_port_id) self._assert_on_assoc_floatingip_to_special_ports( fip_data, updated_port_data) new_fip = super(NsxV3Plugin, self).update_floatingip( context, fip_id, floatingip) router_id = new_fip['router_id'] new_port_id = new_fip['port_id'] try: is_lb_port = False if old_port_id: old_port_data = self.get_port(context, old_port_id) old_device_owner = old_port_data['device_owner'] old_fixed_ip = old_fip['fixed_ip_address'] if (old_device_owner == const.DEVICE_OWNER_LOADBALANCERV2 or old_device_owner == oct_const.DEVICE_OWNER_OCTAVIA or old_device_owner == lb_const.VMWARE_LB_VIP_OWNER): is_lb_port = True self._update_lb_vip(old_port_data, old_fixed_ip) # Delete old router's fip rules if old_router_id is not None. if old_fip['router_id'] and not is_lb_port: try: old_nsx_router_id = nsx_db.get_nsx_router_id( context.session, old_fip['router_id']) self.nsxlib.router.delete_fip_nat_rules( old_nsx_router_id, old_fip['floating_ip_address'], old_fip['fixed_ip_address']) except nsx_lib_exc.ResourceNotFound: LOG.warning("Backend NAT rules for fip: %(fip_id)s " "(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) " "not found", {'fip_id': old_fip['id'], 'ext_ip': old_fip['floating_ip_address'], 'int_ip': old_fip['fixed_ip_address']}) # Update LB VIP if the new port is LB port is_lb_port = False if new_port_id: new_port_data = self.get_port(context, new_port_id) new_dev_own = new_port_data['device_owner'] new_fip_address = new_fip['floating_ip_address'] if (new_dev_own == const.DEVICE_OWNER_LOADBALANCERV2 or new_dev_own == oct_const.DEVICE_OWNER_OCTAVIA or new_dev_own == lb_const.VMWARE_LB_VIP_OWNER): is_lb_port = True self._update_lb_vip(new_port_data, new_fip_address) # TODO(berlin): Associating same FIP to different internal IPs # would lead to creating multiple times of FIP nat rules at the # backend. Let's see how to fix the problem latter. # Update current router's nat rules if router_id is not None. if router_id and not is_lb_port: nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) self.nsxlib.router.add_fip_nat_rules( nsx_router_id, new_fip['floating_ip_address'], new_fip['fixed_ip_address'], bypass_firewall=False) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): super(NsxV3Plugin, self).update_floatingip( context, fip_id, {'floatingip': {'port_id': old_port_id}}) self.update_floatingip_status(context, fip_id, const.FLOATINGIP_STATUS_ERROR) if new_fip['status'] != new_status: new_fip['status'] = new_status self.update_floatingip_status(context, fip_id, new_status) return new_fip def disassociate_floatingips(self, context, port_id): fip_qry = context.session.query(l3_db_models.FloatingIP) fip_dbs = fip_qry.filter_by(fixed_port_id=port_id) for fip_db in fip_dbs: if not fip_db.router_id: continue try: nsx_router_id = nsx_db.get_nsx_router_id(context.session, fip_db.router_id) self.nsxlib.router.delete_fip_nat_rules( nsx_router_id, fip_db.floating_ip_address, fip_db.fixed_ip_address) except nsx_lib_exc.ResourceNotFound: LOG.warning("Backend NAT rules for fip: %(fip_id)s " "(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) " "not found", {'fip_id': fip_db.id, 'ext_ip': fip_db.floating_ip_address, 'int_ip': fip_db.fixed_ip_address}) self.update_floatingip_status(context, fip_db.id, const.FLOATINGIP_STATUS_DOWN) super(NsxV3Plugin, self).disassociate_floatingips( context, port_id, do_notify=False) def _create_fw_section_for_secgroup(self, nsgroup, is_provider): # NOTE(arosen): if a security group is provider we want to # insert our rules at the top. operation = (nsxlib_consts.FW_INSERT_TOP if is_provider else nsxlib_consts.FW_INSERT_BEFORE) # security-group rules are located in a dedicated firewall section. firewall_section = ( self.nsxlib.firewall_section.create_empty( nsgroup.get('display_name'), nsgroup.get('description'), [nsgroup.get('id')], nsgroup.get('tags'), operation=operation, other_section=self.default_section)) return firewall_section def _create_security_group_backend_resources(self, secgroup): tags = self.nsxlib.build_v3_tags_payload( secgroup, resource_type='os-neutron-secgr-id', project_name=secgroup['tenant_id']) name = self.nsxlib.ns_group.get_name(secgroup) tag_expression = ( self.nsxlib.ns_group.get_port_tag_expression( security.PORT_SG_SCOPE, secgroup['id'])) ns_group = self.nsxlib.ns_group.create( name, secgroup['description'], tags, tag_expression) # security-group rules are located in a dedicated firewall section. firewall_section = self._create_fw_section_for_secgroup( ns_group, secgroup.get(provider_sg.PROVIDER)) return ns_group, firewall_section def _create_firewall_rules(self, context, section_id, nsgroup_id, logging_enabled, action, sg_rules): # since the nsxlib does not have access to the nsx db, # we need to provide a mapping for the remote nsgroup ids. ruleid_2_remote_nsgroup_map = {} _sg_rules = copy.deepcopy(sg_rules) for sg_rule in _sg_rules: self._fix_sg_rule_dict_ips(sg_rule) remote_nsgroup_id = None remote_group_id = sg_rule.get('remote_group_id') # skip unnecessary db access when possible if remote_group_id == sg_rule['security_group_id']: remote_nsgroup_id = nsgroup_id elif remote_group_id: remote_nsgroup_id = nsx_db.get_nsx_security_group_id( context.session, remote_group_id) ruleid_2_remote_nsgroup_map[sg_rule['id']] = remote_nsgroup_id return self.nsxlib.firewall_section.create_section_rules( section_id, nsgroup_id, logging_enabled, action, _sg_rules, ruleid_2_remote_nsgroup_map) def create_security_group(self, context, security_group, default_sg=False): secgroup = security_group['security_group'] secgroup['id'] = secgroup.get('id') or uuidutils.generate_uuid() ns_group = {} firewall_section = {} if not default_sg: tenant_id = secgroup['tenant_id'] self._ensure_default_security_group(context, tenant_id) try: ns_group, firewall_section = ( self._create_security_group_backend_resources(secgroup)) # REVISIT(roeyc): Ideally, at this point we need not be under an # open db transactions, however, unittests fail if omitting # subtransactions=True. with db_api.CONTEXT_WRITER.using(context): # NOTE(arosen): a neutron security group be default adds rules # that allow egress traffic. We do not want this behavior for # provider security_groups if secgroup.get(provider_sg.PROVIDER) is True: secgroup_db = self.create_provider_security_group( context, security_group) else: secgroup_db = ( super(NsxV3Plugin, self).create_security_group( context, security_group, default_sg)) nsx_db.save_sg_mappings(context, secgroup_db['id'], ns_group['id'], firewall_section['id']) self._process_security_group_properties_create(context, secgroup_db, secgroup, default_sg) if cfg.CONF.api_replay_mode: self._handle_api_replay_default_sg(context, secgroup_db) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.exception("Unable to create security-group on the " "backend.") if ns_group: self.nsxlib.ns_group.delete(ns_group['id']) except Exception: with excutils.save_and_reraise_exception(): section_id = firewall_section.get('id') nsgroup_id = ns_group.get('id') LOG.debug("Neutron failed to create security-group, " "deleting backend resources: " "section %s, ns-group %s.", section_id, nsgroup_id) if nsgroup_id: self.nsxlib.ns_group.delete(nsgroup_id) if section_id: self.nsxlib.firewall_section.delete(section_id) try: sg_rules = secgroup_db['security_group_rules'] # skip if there are no rules in group. i.e provider case if sg_rules: # translate and creates firewall rules. logging = ( cfg.CONF.nsx_v3.log_security_groups_allowed_traffic or secgroup.get(sg_logging.LOGGING, False)) action = (nsxlib_consts.FW_ACTION_DROP if secgroup.get(provider_sg.PROVIDER) else nsxlib_consts.FW_ACTION_ALLOW) rules = self._create_firewall_rules( context, firewall_section['id'], ns_group['id'], logging, action, sg_rules) self.save_security_group_rule_mappings(context, rules['rules']) except nsx_lib_exc.ManagerError as ex: msg = ("Failed to create backend firewall rules " "for security-group %(name)s (%(id)s), " "rolling back changes." % secgroup_db) LOG.exception(msg) # default security group deletion requires admin context if default_sg: context = context.elevated() super(NsxV3Plugin, self).delete_security_group( context, secgroup_db['id']) self.nsxlib.ns_group.delete(ns_group['id']) self.nsxlib.firewall_section.delete(firewall_section['id']) if ex.__class__ is nsx_lib_exc.ResourceNotFound: # This may happen due to race condition during # backend reboot. The exception raised should reflect # short-term availability issue (500) rather than 404 raise nsx_exc.NsxPluginTemporaryError(err_msg=msg) else: raise ex return secgroup_db def _prevent_nsx_internal_sg_modification(self, sg_id): if sg_id == v3_utils.NSX_V3_OS_DFW_UUID: msg = _("Cannot modify NSX internal security group") raise n_exc.InvalidInput(error_message=msg) def update_security_group(self, context, id, security_group): orig_secgroup = self.get_security_group( context, id, fields=['id', 'name', 'description']) self._prevent_non_admin_edit_provider_sg(context, id) self._prevent_nsx_internal_sg_modification(id) with db_api.CONTEXT_WRITER.using(context): secgroup_res = ( super(NsxV3Plugin, self).update_security_group(context, id, security_group)) self._process_security_group_properties_update( context, secgroup_res, security_group['security_group']) try: nsgroup_id, section_id = nsx_db.get_sg_mappings( context.session, id) self.nsxlib.ns_group.update_nsgroup_and_section( secgroup_res, nsgroup_id, section_id, cfg.CONF.nsx_v3.log_security_groups_allowed_traffic) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.exception("Failed to update security-group %(name)s " "(%(id)s), rolling back changes in " "Neutron.", orig_secgroup) super(NsxV3Plugin, self).update_security_group( context, id, {'security_group': orig_secgroup}) return secgroup_res def delete_security_group(self, context, id): self._prevent_non_admin_edit_provider_sg(context, id) self._prevent_nsx_internal_sg_modification(id) nsgroup_id, section_id = nsx_db.get_sg_mappings( context.session, id) super(NsxV3Plugin, self).delete_security_group(context, id) self.nsxlib.firewall_section.delete(section_id) self.nsxlib.ns_group.delete(nsgroup_id) def create_security_group_rule(self, context, security_group_rule): bulk_rule = {'security_group_rules': [security_group_rule]} return self.create_security_group_rule_bulk(context, bulk_rule)[0] def create_security_group_rule_bulk(self, context, security_group_rules): sg_rules = security_group_rules['security_group_rules'] for r in sg_rules: self._check_local_ip_prefix(context, r['security_group_rule']) # Generate id for security group rule or use one specified, # if specified we are running in api-replay as server doesn't # allow id to be specified by default r['security_group_rule']['id'] = ( r['security_group_rule'].get('id') or uuidutils.generate_uuid()) with db_api.CONTEXT_WRITER.using(context): rules_db = (super(NsxV3Plugin, self).create_security_group_rule_bulk_native( context, security_group_rules)) for i, r in enumerate(sg_rules): self._process_security_group_rule_properties( context, rules_db[i], r['security_group_rule']) # NOTE(arosen): here are assuming that all of the security # group rules being added are part of the same security # group. We should be validating that this is the case though... sg_id = sg_rules[0]['security_group_rule']['security_group_id'] self._prevent_non_admin_edit_provider_sg(context, sg_id) self._prevent_nsx_internal_sg_modification(sg_id) security_group = self.get_security_group( context, sg_id) action = nsxlib_consts.FW_ACTION_ALLOW if security_group.get(provider_sg.PROVIDER) is True: # provider security groups are drop rules. action = nsxlib_consts.FW_ACTION_DROP sg_id = rules_db[0]['security_group_id'] nsgroup_id, section_id = nsx_db.get_sg_mappings(context.session, sg_id) logging_enabled = ( cfg.CONF.nsx_v3.log_security_groups_allowed_traffic or self._is_security_group_logged(context, sg_id)) try: rules = self._create_firewall_rules( context, section_id, nsgroup_id, logging_enabled, action, rules_db) except nsx_lib_exc.ManagerError: with excutils.save_and_reraise_exception(): for rule in rules_db: super(NsxV3Plugin, self).delete_security_group_rule( context, rule['id']) self.save_security_group_rule_mappings(context, rules['rules']) return rules_db def delete_security_group_rule(self, context, id): rule_db = self._get_security_group_rule(context, id) sg_id = rule_db['security_group_id'] self._prevent_non_admin_edit_provider_sg(context, sg_id) self._prevent_nsx_internal_sg_modification(sg_id) nsgroup_id, section_id = nsx_db.get_sg_mappings(context.session, sg_id) fw_rule_id = nsx_db.get_sg_rule_mapping(context.session, id) self.nsxlib.firewall_section.delete_rule(section_id, fw_rule_id) super(NsxV3Plugin, self).delete_security_group_rule(context, id) def save_security_group_rule_mappings(self, context, firewall_rules): rules = [(rule['display_name'], rule['id']) for rule in firewall_rules] nsx_db.save_sg_rule_mappings(context.session, rules) def recalculate_snat_rules_for_router(self, context, router, subnets): """Recalculate router snat rules for specific subnets. Invoked when subnetpool address scope changes. """ nsx_router_id = nsx_db.get_nsx_router_id(context.session, router['id']) if not router['external_gateway_info']: return LOG.info("Recalculating snat rules for router %s", router['id']) fips = router['external_gateway_info']['external_fixed_ips'] ext_addrs = [fip['ip_address'] for fip in fips] gw_address_scope = self._get_network_address_scope( context, router['external_gateway_info']['network_id']) # TODO(annak): improve amount of backend calls by rebuilding all # snat rules when API is available for subnet in subnets: if gw_address_scope: subnet_address_scope = self._get_subnetpool_address_scope( context, subnet['subnetpool_id']) LOG.info("Deleting SNAT rule for %(router)s " "and subnet %(subnet)s", {'router': router['id'], 'subnet': subnet['id']}) # Delete rule for this router/subnet pair if it exists for ext_addr in ext_addrs: self.nsxlib.router.delete_gw_snat_rule_by_source( nsx_router_id, ext_addr, subnet['cidr'], skip_not_found=True) if (gw_address_scope != subnet_address_scope): # subnet is no longer under same address scope with GW LOG.info("Adding SNAT rule for %(router)s " "and subnet %(subnet)s", {'router': router['id'], 'subnet': subnet['id']}) self.nsxlib.router.add_gw_snat_rule( nsx_router_id, ext_addr, source_net=subnet['cidr'], bypass_firewall=False) def _get_tier0_uplink_cidrs(self, tier0_id): # return a list of tier0 uplink ip/prefix addresses return self.nsxlib.logical_router_port.get_tier0_uplink_cidrs( tier0_id) def _get_neutron_net_ids_by_nsx_id(self, context, lswitch_id): return nsx_db.get_net_ids(context.session, lswitch_id) def _get_net_dhcp_relay(self, context, net_id): return self.get_network_az_by_net_id( context, net_id).dhcp_relay_service def update_port_nsx_tags(self, context, port_id, tags, is_delete=False): """Update backend NSX port with tags from the tagging plugin""" ctx = q_context.get_admin_context() _, nsx_lport_id = nsx_db.get_nsx_switch_and_port_id( ctx.session, port_id) if not nsx_lport_id: LOG.info("Ignoring tags on port %s: this port has no backend " "NSX logical port", port_id) return # Get the current tags on this port lport = self.nsxlib.logical_port.get(nsx_lport_id) port_tags = lport.get('tags') orig_len = len(port_tags) # Update and validate the list of tags extra_tags = self._translate_external_tags(tags, port_id) if is_delete: port_tags = [tag for tag in port_tags if tag not in extra_tags] else: port_tags.extend( [tag for tag in extra_tags if tag not in port_tags]) if len(port_tags) > nsxlib_utils.MAX_TAGS: LOG.warning("Cannot add external tags to port %s: " "too many tags", port_id) # Update the NSX port if len(port_tags) != orig_len: self.nsxlib.logical_port.update( nsx_lport_id, False, tags=port_tags) def _validate_net_mdproxy_tz(self, az, tz_uuid, mdproxy_uuid): """Validate that the network TZ matches the mdproxy edge cluster""" mdproxy_obj = self.nsxlib.native_md_proxy.get(mdproxy_uuid) ec_id = mdproxy_obj['edge_cluster_id'] ec_nodes = self.nsxlib.edge_cluster.get_transport_nodes(ec_id) ec_tzs = [] for tn_uuid in ec_nodes: ec_tzs.extend(self.nsxlib.transport_node.get_transport_zones( tn_uuid)) if tz_uuid not in ec_tzs: return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/plugins/nsx_v3/utils.py0000644000175000017500000005327100000000000024013 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import random import re from oslo_config import cfg from oslo_context import context as context_utils from oslo_log import log as logging from oslo_utils import fileutils from sqlalchemy.orm import exc from neutron.db.models import securitygroup from neutron import version as n_version from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib import constants as const from neutron_lib import context as q_context from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsx_models from vmware_nsx.plugins.nsx_v3 import cert_utils from vmware_nsx.services.qos.common import utils as qos_utils from vmware_nsxlib import v3 from vmware_nsxlib.v3 import client_cert from vmware_nsxlib.v3 import config from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import policy NSX_NEUTRON_PLUGIN = 'NSX Neutron plugin' OS_NEUTRON_ID_SCOPE = 'os-neutron-id' NSX_V3_PSEC_PROFILE_NAME = 'neutron_port_spoof_guard_profile' NSX_V3_DHCP_PROFILE_NAME = 'neutron_port_dhcp_profile' PORT_ERROR_TYPE_MISSING = "Missing port" PORT_ERROR_TYPE_PROFILE = "Wrong switching profiles" PORT_ERROR_TYPE_BINDINGS = "Wrong address binding" # Default UUID for the global OS rule NSX_V3_OS_DFW_UUID = '00000000-def0-0000-0fed-000000000000' LOG = logging.getLogger(__name__) class DbCertProvider(client_cert.ClientCertProvider): """Write cert data from DB to file and delete after use New provider object with random filename is created for each request. This is not most efficient, but the safest way to avoid race conditions, since backend connections can occur both before and after neutron fork, and several concurrent requests can occupy the same thread. Note that new cert filename for each request does not result in new connection for each request (at least for now..) """ EXPIRATION_ALERT_DAYS = 30 # days prior to expiration def __init__(self): super(DbCertProvider, self).__init__(None) random.seed() self._filename = '/tmp/.' + str(random.randint(1, 10000000)) def _check_expiration(self, expires_in_days): if expires_in_days > self.EXPIRATION_ALERT_DAYS: return if expires_in_days < 0: LOG.error("Client certificate has expired %d days ago.", expires_in_days * -1) else: LOG.warning("Client certificate expires in %d days. " "Once expired, service will become unavailable.", expires_in_days) def __enter__(self): try: context = q_context.get_admin_context() db_storage_driver = cert_utils.DbCertificateStorageDriver( context) with client_cert.ClientCertificateManager( cert_utils.NSX_OPENSTACK_IDENTITY, None, db_storage_driver) as cert_manager: if not cert_manager.exists(): msg = _("Unable to load from nsx-db") raise nsx_exc.ClientCertificateException(err_msg=msg) filename = self._filename if not os.path.exists(os.path.dirname(filename)): if len(os.path.dirname(filename)) > 0: fileutils.ensure_tree(os.path.dirname(filename)) cert_manager.export_pem(filename) expires_in_days = cert_manager.expires_in_days() self._check_expiration(expires_in_days) except Exception as e: self._on_exit() raise e return self def _on_exit(self): if os.path.isfile(self._filename): os.remove(self._filename) self._filename = None def __exit__(self, type, value, traceback): self._on_exit() def filename(self): return self._filename def get_client_cert_provider(conf_path=cfg.CONF.nsx_v3): if not conf_path.nsx_use_client_auth: return None if conf_path.nsx_client_cert_storage.lower() == 'none': # Admin is responsible for providing cert file, the plugin # should not touch it return client_cert.ClientCertProvider( conf_path.nsx_client_cert_file) if conf_path.nsx_client_cert_storage.lower() == 'nsx-db': # Cert data is stored in DB, and written to file system only # when new connection is opened, and deleted immediately after. return DbCertProvider def get_nsxlib_wrapper(nsx_username=None, nsx_password=None, basic_auth=False, plugin_conf=None, allow_overwrite_header=False): client_cert_provider = None if not basic_auth: # if basic auth requested, dont use cert file even if provided client_cert_provider = get_client_cert_provider() if not plugin_conf: plugin_conf = cfg.CONF.nsx_v3 nsxlib_config = config.NsxLibConfig( username=nsx_username or plugin_conf.nsx_api_user, password=nsx_password or plugin_conf.nsx_api_password, client_cert_provider=client_cert_provider, retries=plugin_conf.http_retries, insecure=plugin_conf.insecure, ca_file=plugin_conf.ca_file, concurrent_connections=plugin_conf.concurrent_connections, http_timeout=plugin_conf.http_timeout, http_read_timeout=plugin_conf.http_read_timeout, conn_idle_timeout=plugin_conf.conn_idle_timeout, http_provider=None, max_attempts=plugin_conf.retries, nsx_api_managers=plugin_conf.nsx_api_managers, plugin_scope=OS_NEUTRON_ID_SCOPE, plugin_tag=NSX_NEUTRON_PLUGIN, plugin_ver=n_version.version_info.release_string(), dns_nameservers=cfg.CONF.nsx_v3.nameservers, dns_domain=cfg.CONF.nsx_v3.dns_domain, allow_overwrite_header=allow_overwrite_header) return v3.NsxLib(nsxlib_config) def get_nsxpolicy_wrapper(nsx_username=None, nsx_password=None, basic_auth=False): #TODO(asarfaty) move to a different file? (under common_v3) client_cert_provider = None if not basic_auth: # if basic auth requested, dont use cert file even if provided client_cert_provider = get_client_cert_provider( conf_path=cfg.CONF.nsx_p) nsxlib_config = config.NsxLibConfig( username=nsx_username or cfg.CONF.nsx_p.nsx_api_user, password=nsx_password or cfg.CONF.nsx_p.nsx_api_password, client_cert_provider=client_cert_provider, retries=cfg.CONF.nsx_p.http_retries, insecure=cfg.CONF.nsx_p.insecure, ca_file=cfg.CONF.nsx_p.ca_file, concurrent_connections=cfg.CONF.nsx_p.concurrent_connections, http_timeout=cfg.CONF.nsx_p.http_timeout, http_read_timeout=cfg.CONF.nsx_p.http_read_timeout, conn_idle_timeout=cfg.CONF.nsx_p.conn_idle_timeout, http_provider=None, max_attempts=cfg.CONF.nsx_p.retries, nsx_api_managers=cfg.CONF.nsx_p.nsx_api_managers, plugin_scope=OS_NEUTRON_ID_SCOPE, plugin_tag=NSX_NEUTRON_PLUGIN, plugin_ver=n_version.version_info.release_string(), allow_passthrough=cfg.CONF.nsx_p.allow_passthrough, realization_max_attempts=cfg.CONF.nsx_p.realization_max_attempts, realization_wait_sec=cfg.CONF.nsx_p.realization_wait_sec) return policy.NsxPolicyLib(nsxlib_config) def get_orphaned_dhcp_servers(context, plugin, nsxlib, dhcp_profile_uuid=None): # An orphaned DHCP server means the associated neutron network # does not exist or has no DHCP-enabled subnet. orphaned_servers = [] server_net_pairs = [] # Find matching DHCP servers (for a given dhcp_profile_uuid). response = nsxlib.dhcp_server.list() for dhcp_server in response['results']: if (dhcp_profile_uuid and dhcp_server['dhcp_profile_id'] != dhcp_profile_uuid): continue found = False neutron_obj = False for tag in dhcp_server.get('tags', []): if tag['scope'] == 'os-neutron-net-id': dhcp_server['neutron_net_id'] = tag['tag'] server_net_pairs.append((dhcp_server, tag['tag'])) found = True if tag['scope'] == 'os-api-version': neutron_obj = True if not found and neutron_obj: # The associated neutron network is not defined. dhcp_server['neutron_net_id'] = None orphaned_servers.append(dhcp_server) # Check if there is DHCP-enabled subnet in each network. for dhcp_server, net_id in server_net_pairs: try: network = plugin.get_network(context, net_id) except Exception: # The associated neutron network is not found in DB. orphaned_servers.append(dhcp_server) continue dhcp_enabled = False for subnet_id in network['subnets']: subnet = plugin.get_subnet(context, subnet_id) if subnet['enable_dhcp']: dhcp_enabled = True break if not dhcp_enabled: orphaned_servers.append(dhcp_server) return orphaned_servers def delete_orphaned_dhcp_server(context, nsxlib, server): # Delete an orphaned DHCP server: # (1) delete the attached logical DHCP port, # (2) delete the logical DHCP server, # (3) clean corresponding neutron DB entry. # Return True if it was deleted, or false + error if not try: response = nsxlib.logical_port.get_by_attachment('DHCP_SERVICE', server['id']) if response and response['result_count'] > 0: nsxlib.logical_port.delete(response['results'][0]['id']) nsxlib.dhcp_server.delete(server['id']) net_id = server.get('neutron_net_id') if net_id: # Delete neutron_net_id -> dhcp_service_id mapping from the DB. nsx_db.delete_neutron_nsx_service_binding( context.session, net_id, nsx_constants.SERVICE_DHCP) return True, None except Exception as e: return False, e def get_orphaned_networks(context, nsxlib): nsx_switches = nsxlib.logical_switch.list()['results'] missing_networks = [] for nsx_switch in nsx_switches: # check if it exists in the neutron DB net_ids = nsx_db.get_net_ids(context.session, nsx_switch['id']) if not net_ids: # Skip non-neutron networks, by tags neutron_net = False for tag in nsx_switch.get('tags', []): if tag.get('scope') == 'os-neutron-net-id': neutron_net = True nsx_switch['neutron_net_id'] = tag['tag'] break if neutron_net: missing_networks.append(nsx_switch) return missing_networks def get_orphaned_routers(context, nsxlib): nsx_routers = nsxlib.logical_router.list()['results'] missing_routers = [] for nsx_router in nsx_routers: # check if it exists in the neutron DB neutron_id = nsx_db.get_neutron_from_nsx_router_id(context.session, nsx_router['id']) if not neutron_id: # Skip non-neutron routers, by tags for tag in nsx_router.get('tags', []): if tag.get('scope') == 'os-neutron-router-id': nsx_router['neutron_router_id'] = tag['tag'] missing_routers.append(nsx_router) break return missing_routers def delete_orphaned_router(nsxlib, nsx_id): # Delete an orphaned logical router from the NSX: # (1) delete the attached ports, # (2) delete the logical router # Return True if it was deleted, or false + error if not try: # first delete its ports ports = nsxlib.logical_router_port.get_by_router_id(nsx_id) for port in ports: nsxlib.logical_router_port.delete(port['id']) nsxlib.logical_router.delete(nsx_id) except Exception as e: return False, e else: return True, None def get_security_groups_mappings(context): q = context.session.query( securitygroup.SecurityGroup.name, securitygroup.SecurityGroup.id, nsx_models.NeutronNsxFirewallSectionMapping.nsx_id, nsx_models.NeutronNsxSecurityGroupMapping.nsx_id).join( nsx_models.NeutronNsxFirewallSectionMapping, nsx_models.NeutronNsxSecurityGroupMapping).all() sg_mappings = [{'name': mapp[0], 'id': mapp[1], 'section-id': mapp[2], 'nsx-securitygroup-id': mapp[3]} for mapp in q] return sg_mappings def get_orphaned_firewall_sections(context, nsxlib): fw_sections = nsxlib.firewall_section.list() sg_mappings = get_security_groups_mappings(context) orphaned_sections = [] for fw_section in fw_sections: for sg_db in sg_mappings: if fw_section['id'] == sg_db['section-id']: break else: # Skip non-neutron sections, by tags neutron_obj = False for tag in fw_section.get('tags', []): if tag['scope'] == 'os-api-version': neutron_obj = True if tag.get('scope') == 'os-neutron-secgr-id': fw_section['neutron_sg_id'] = tag['tag'] if neutron_obj: orphaned_sections.append(fw_section) return orphaned_sections def get_security_group_rules_mappings(context): q = context.session.query( securitygroup.SecurityGroupRule.id, nsx_models.NeutronNsxRuleMapping.nsx_id).join( nsx_models.NeutronNsxRuleMapping).all() sg_mappings = [{'rule_id': mapp[0], 'nsx_rule_id': mapp[1]} for mapp in q] return sg_mappings def get_orphaned_firewall_section_rules(context, nsxlib): fw_sections = nsxlib.firewall_section.list() sg_mappings = get_security_groups_mappings(context) rules_mappings = get_security_group_rules_mappings(context) orphaned_rules = [] nsx_rules_in_mappings = [r['nsx_rule_id'] for r in rules_mappings] for fw_section in fw_sections: for sg_db in sg_mappings: if (fw_section['id'] == sg_db['section-id'] and sg_db['id'] != NSX_V3_OS_DFW_UUID): # found the right neutron SG section_rules = nsxlib.firewall_section.get_rules( fw_section['id'])['results'] for nsx_rule in section_rules: if nsx_rule['id'] not in nsx_rules_in_mappings: # orphaned rule orphaned_rules.append( {'security-group-name': sg_db['name'], 'security-group-id': sg_db['id'], 'section-id': fw_section['id'], 'rule-id': nsx_rule['id']}) return orphaned_rules def get_dhcp_profile_id(nsxlib): profiles = nsxlib.switching_profile.find_by_display_name( NSX_V3_DHCP_PROFILE_NAME) if profiles and len(profiles) == 1: return profiles[0]['id'] LOG.warning("Could not find DHCP profile on backend") def get_spoofguard_profile_id(nsxlib): profiles = nsxlib.switching_profile.find_by_display_name( NSX_V3_PSEC_PROFILE_NAME) if profiles and len(profiles) == 1: return profiles[0]['id'] LOG.warning("Could not find Spoof Guard profile on backend") def add_profile_mismatch(problems, neutron_id, nsx_id, prf_id, title): msg = ('Wrong %(title)s profile %(prf_id)s') % {'title': title, 'prf_id': prf_id} problems.append({'neutron_id': neutron_id, 'nsx_id': nsx_id, 'error': msg, 'error_type': PORT_ERROR_TYPE_PROFILE}) def get_port_nsx_id(session, neutron_id): # get the nsx port id from the DB mapping try: mapping = (session.query(nsx_models.NeutronNsxPortMapping). filter_by(neutron_id=neutron_id). one()) return mapping['nsx_port_id'] except exc.NoResultFound: pass def get_mismatch_logical_ports(context, nsxlib, plugin, get_filters=None): neutron_ports = plugin.get_ports(context, filters=get_filters) # get pre-defined profile ids dhcp_profile_id = get_dhcp_profile_id(nsxlib) dhcp_profile_key = ( core_resources.SwitchingProfileTypes.SWITCH_SECURITY) spoofguard_profile_id = get_spoofguard_profile_id(nsxlib) spoofguard_profile_key = ( core_resources.SwitchingProfileTypes.SPOOF_GUARD) qos_profile_key = core_resources.SwitchingProfileTypes.QOS problems = [] for port in neutron_ports: neutron_id = port['id'] # get the network nsx id from the mapping table nsx_id = get_port_nsx_id(context.session, neutron_id) if not nsx_id: # skip external ports pass else: try: nsx_port = nsxlib.logical_port.get(nsx_id) except nsxlib_exc.ResourceNotFound: problems.append({'neutron_id': neutron_id, 'nsx_id': nsx_id, 'error': 'Missing from backend', 'error_type': PORT_ERROR_TYPE_MISSING}) continue # Port found on backend! # Check that it has all the expected switch profiles. # create a dictionary of the current profiles: profiles_dict = {} for prf in nsx_port['switching_profile_ids']: profiles_dict[prf['key']] = prf['value'] # DHCP port: neutron dhcp profile should be attached # to logical ports created for neutron DHCP but not # for native DHCP. if (port.get('device_owner') == const.DEVICE_OWNER_DHCP and not cfg.CONF.nsx_v3.native_dhcp_metadata): prf_id = profiles_dict[dhcp_profile_key] if prf_id != dhcp_profile_id: add_profile_mismatch(problems, neutron_id, nsx_id, prf_id, "DHCP security") # Port with QoS policy: a matching profile should be attached qos_policy_id = qos_utils.get_port_policy_id(context, neutron_id) if qos_policy_id: qos_profile_id = nsx_db.get_switch_profile_by_qos_policy( context.session, qos_policy_id) prf_id = profiles_dict[qos_profile_key] if prf_id != qos_profile_id: add_profile_mismatch(problems, neutron_id, nsx_id, prf_id, "QoS") # Port with security & fixed ips/address pairs: # neutron spoofguard profile should be attached port_sec, has_ip = plugin._determine_port_security_and_has_ip( context, port) addr_pair = port.get(addr_apidef.ADDRESS_PAIRS) if port_sec and (has_ip or addr_pair): prf_id = profiles_dict[spoofguard_profile_key] if prf_id != spoofguard_profile_id: add_profile_mismatch(problems, neutron_id, nsx_id, prf_id, "Spoof Guard") # Check the address bindings if port_sec: nsx_address_bindings = nsx_port.get('address_bindings', []) nsx_ips = [x['ip_address'] for x in nsx_address_bindings] nsx_macs = [x['mac_address'] for x in nsx_address_bindings] neutron_ips = [x['ip_address'] for x in port.get('fixed_ips', [])] neutron_mac = port['mac_address'] different_macs = [mac for mac in nsx_macs if mac != neutron_mac] if (len(nsx_ips) != len(neutron_ips) or set(nsx_ips) != set(neutron_ips)): problems.append({'neutron_id': neutron_id, 'nsx_id': nsx_id, 'port': port, 'error': 'Different IP address bindings', 'error_type': PORT_ERROR_TYPE_BINDINGS}) elif different_macs: problems.append({'neutron_id': neutron_id, 'nsx_id': nsx_id, 'port': port, 'error': 'Different MAC address bindings', 'error_type': PORT_ERROR_TYPE_BINDINGS}) return problems def inject_headers(): ctx = context_utils.get_current() if ctx: ctx_dict = ctx.to_dict() # Remove unsupported characters from the user-id user_id = ctx_dict.get('user_identity') re.sub('[^A-Za-z0-9]+', '', user_id) return {'X-NSX-EUSER': user_id, 'X-NSX-EREQID': ctx_dict.get('request_id')} return {} ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/policies/0000755000175000017500000000000000000000000021177 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/policies/__init__.py0000644000175000017500000000226500000000000023315 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from vmware_nsx.policies import housekeeper from vmware_nsx.policies import lsn from vmware_nsx.policies import maclearning from vmware_nsx.policies import network_gateway from vmware_nsx.policies import nsxpolicy from vmware_nsx.policies import providersecuritygroup from vmware_nsx.policies import security_group def list_rules(): return itertools.chain( lsn.list_rules(), maclearning.list_rules(), network_gateway.list_rules(), providersecuritygroup.list_rules(), security_group.list_rules(), nsxpolicy.list_rules(), housekeeper.list_rules(), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/policies/base.py0000644000175000017500000000167500000000000022474 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(amotoki): Define these in neutron or neutron-lib RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner' RULE_ADMIN_ONLY = 'rule:admin_only' RULE_ANY = 'rule:regular_user' RULE_ADMIN_OR_NET_OWNER = 'rule:admin_or_network_owner' RULE_ADVSVC = 'rule:context_is_advsvc' RULE_ADMIN_OR_NET_OWNER_OR_ADVSVC = '%s or %s' % (RULE_ADMIN_OR_NET_OWNER, RULE_ADVSVC) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/policies/housekeeper.py0000644000175000017500000000200100000000000024061 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from vmware_nsx.policies import base rules = [ policy.DocumentedRuleDefault( 'get_housekeeper', base.RULE_ANY, 'Get Housekeepers', [ { 'method': 'GET', 'path': '/housekeepers', }, { 'method': 'GET', 'path': '/housekeepers/{id}', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/policies/lsn.py0000644000175000017500000000157600000000000022356 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from vmware_nsx.policies import base rules = [ policy.RuleDefault( 'create_lsn', base.RULE_ADMIN_ONLY, description='Create a LSN'), policy.RuleDefault( 'get_lsn', base.RULE_ADMIN_ONLY, description='Get LSNs'), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/policies/maclearning.py0000644000175000017500000000243300000000000024033 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from vmware_nsx.policies import base rules = [ policy.DocumentedRuleDefault( 'create_port:mac_learning_enabled', base.RULE_ADMIN_OR_NET_OWNER_OR_ADVSVC, 'Create a port with ``mac_learning_enabled`` attribute', [ { 'method': 'POST', 'path': '/ports', }, ] ), policy.DocumentedRuleDefault( 'update_port:mac_learning_enabled', base.RULE_ADMIN_OR_NET_OWNER_OR_ADVSVC, 'Update ``mac_learning_enabled`` attribute of a port', [ { 'method': 'PUT', 'path': '/ports/{id}', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/policies/network_gateway.py0000644000175000017500000000715100000000000024767 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from vmware_nsx.policies import base rules = [ policy.DocumentedRuleDefault( 'create_network_gateway', base.RULE_ADMIN_OR_OWNER, 'Create a network gateway', [ { 'method': 'POST', 'path': '/network-gateways', }, ] ), policy.DocumentedRuleDefault( 'update_network_gateway', base.RULE_ADMIN_OR_OWNER, 'Update a network gateway', [ { 'method': 'PUT', 'path': '/network-gateways/{id}', }, ] ), policy.DocumentedRuleDefault( 'delete_network_gateway', base.RULE_ADMIN_OR_OWNER, 'Delete a network gateway', [ { 'method': 'DELETE', 'path': '/network-gateways/{id}', }, ] ), policy.DocumentedRuleDefault( 'get_network_gateway', base.RULE_ADMIN_OR_OWNER, 'Get network gateways', [ { 'method': 'GET', 'path': '/network-gateways', }, { 'method': 'GET', 'path': '/network-gateways/{id}', }, ] ), policy.DocumentedRuleDefault( 'connect_network', base.RULE_ADMIN_OR_OWNER, 'Connect a network to a network gateway', [ { 'method': 'PUT', 'path': '/network-gateways/{id}/connect_network', }, ] ), policy.DocumentedRuleDefault( 'disconnect_network', base.RULE_ADMIN_OR_OWNER, 'Disconnect a network from a network gateway', [ { 'method': 'PUT', 'path': '/network-gateways/{id}/disconnect_network', }, ] ), policy.DocumentedRuleDefault( 'create_gateway_device', base.RULE_ADMIN_OR_OWNER, 'Create a gateway device', [ { 'method': 'POST', 'path': '/gateway-devices', }, ] ), policy.DocumentedRuleDefault( 'update_gateway_device', base.RULE_ADMIN_OR_OWNER, 'Update a gateway device', [ { 'method': 'PUT', 'path': '/gateway-devices/{id}', }, ] ), policy.DocumentedRuleDefault( 'delete_gateway_device', base.RULE_ADMIN_OR_OWNER, 'Delete a gateway device', [ { 'method': 'DELETE', 'path': '/gateway-devices/{id}', }, ] ), policy.DocumentedRuleDefault( 'get_gateway_device', base.RULE_ADMIN_OR_OWNER, 'Get gateway devices', [ { 'method': 'GET', 'path': '/gateway-devices', }, { 'method': 'GET', 'path': '/gateway-devices/{id}', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/policies/nsxpolicy.py0000644000175000017500000000200000000000000023571 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from vmware_nsx.policies import base rules = [ policy.DocumentedRuleDefault( 'get_nsx_policy', base.RULE_ANY, 'Get NSX policies', [ { 'method': 'GET', 'path': '/nsx-policies', }, { 'method': 'GET', 'path': '/nsx-policies/{id}', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/policies/providersecuritygroup.py0000644000175000017500000000240700000000000026253 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from vmware_nsx.policies import base rules = [ policy.DocumentedRuleDefault( 'create_port:provider_security_groups', base.RULE_ADMIN_ONLY, 'Create a port with ``provider_security_groups`` attribute', [ { 'method': 'POST', 'path': '/ports', }, ] ), policy.DocumentedRuleDefault( 'update_port:provider_security_groups', base.RULE_ADMIN_ONLY, 'Update ``provider_security_groups`` attribute of a port', [ { 'method': 'PUT', 'path': '/ports/{id}', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/policies/security_group.py0000644000175000017500000000503300000000000024635 0ustar00coreycorey00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from vmware_nsx.policies import base rules = [ policy.DocumentedRuleDefault( 'create_security_group:logging', base.RULE_ADMIN_ONLY, 'Create a security group with ``logging`` attribute', [ { 'method': 'POST', 'path': '/security-groups', }, ] ), policy.DocumentedRuleDefault( 'update_security_group:logging', base.RULE_ADMIN_ONLY, 'Update ``logging`` attribute of a security group', [ { 'method': 'PUT', 'path': '/security-groups/{id}', }, ] ), policy.DocumentedRuleDefault( 'get_security_group:logging', base.RULE_ADMIN_ONLY, 'Get ``logging`` attributes of security groups', [ { 'method': 'GET', 'path': '/security-groups', }, { 'method': 'GET', 'path': '/security-groups/{id}', }, ] ), policy.DocumentedRuleDefault( 'create_security_group:provider', base.RULE_ADMIN_ONLY, 'Create a security group with ``provider`` attribute', [ { 'method': 'POST', 'path': '/security-groups', }, ] ), policy.DocumentedRuleDefault( 'create_security_group:policy', base.RULE_ADMIN_ONLY, 'Create a security group with ``policy`` attribute', [ { 'method': 'POST', 'path': '/security-groups', }, ] ), policy.DocumentedRuleDefault( 'update_security_group:policy', base.RULE_ADMIN_ONLY, 'Update ``policy`` attribute of a security group', [ { 'method': 'PUT', 'path': '/security-groups/{id}', }, ] ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/0000755000175000017500000000000000000000000021213 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/__init__.py0000644000175000017500000000000000000000000023312 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/dynamic_routing/0000755000175000017500000000000000000000000024406 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/dynamic_routing/__init__.py0000644000175000017500000000000000000000000026505 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/dynamic_routing/bgp_plugin.py0000644000175000017500000004203500000000000027112 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_dynamic_routing.db import bgp_db from neutron_dynamic_routing.extensions import bgp as bgp_ext from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.services import base as service_base from oslo_log import log as logging from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import edge_service_gateway_bgp_peer as ext_esg from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.dynamic_routing.nsx_v import driver as nsxv_driver LOG = logging.getLogger(__name__) PLUGIN_NAME = bgp_ext.BGP_EXT_ALIAS + '_nsx_svc_plugin' class NSXBgpPlugin(service_base.ServicePluginBase, bgp_db.BgpDbMixin): """BGP service plugin for NSX-V as well as TVD plugins. Currently only the nsx-v is supported. other plugins will be refused. """ supported_extension_aliases = [bgp_ext.BGP_EXT_ALIAS, ext_esg.ALIAS] def __init__(self): super(NSXBgpPlugin, self).__init__() self._core_plugin = directory.get_plugin() # initialize the supported drivers (currently only NSX-v) self.drivers = {} try: self.drivers[projectpluginmap.NsxPlugins.NSX_V] = ( nsxv_driver.NSXvBgpDriver(self)) except Exception: # No driver found LOG.warning("NSXBgpPlugin failed to initialize the NSX-V driver") self.drivers[projectpluginmap.NsxPlugins.NSX_V] = None self._register_callbacks() def get_plugin_name(self): return PLUGIN_NAME def get_plugin_type(self): return bgp_ext.BGP_EXT_ALIAS def get_plugin_description(self): """returns string description of the plugin.""" return ("BGP dynamic routing service for announcement of next-hops " "for project networks, floating IP's, and DVR host routes.") def _register_callbacks(self): registry.subscribe(self.router_interface_callback, resources.ROUTER_INTERFACE, events.AFTER_CREATE) registry.subscribe(self.router_interface_callback, resources.ROUTER_INTERFACE, events.AFTER_DELETE) registry.subscribe(self.router_gateway_callback, resources.ROUTER_GATEWAY, events.AFTER_UPDATE) registry.subscribe(self.router_gateway_callback, resources.ROUTER_GATEWAY, events.AFTER_DELETE) registry.subscribe(self._after_service_edge_create_callback, nsxv_constants.SERVICE_EDGE, events.AFTER_CREATE) registry.subscribe(self._before_service_edge_delete_callback, nsxv_constants.SERVICE_EDGE, events.BEFORE_DELETE) def _get_driver_by_project(self, context, project): # Check if the current project id has a matching driver # Currently only NSX-V is supported if self._core_plugin.is_tvd_plugin(): plugin_type = self._core_plugin.get_plugin_type_from_project( context, project) else: plugin_type = self._core_plugin.plugin_type() if not self.drivers.get(plugin_type): msg = (_("Project %(project)s with plugin %(plugin)s has no " "support for dynamic routing") % { 'project': project, 'plugin': plugin_type}) raise n_exc.InvalidInput(error_message=msg) return self.drivers[plugin_type] def _get_driver_by_speaker(self, context, bgp_speaker_id): try: speaker = self.get_bgp_speaker(context, bgp_speaker_id) except Exception: msg = _("BGP speaker %s could not be found") % bgp_speaker_id raise n_exc.BadRequest(resource=bgp_ext.BGP_SPEAKER_RESOURCE_NAME, msg=msg) return self._get_driver_by_project(context, speaker['tenant_id']) def create_bgp_speaker(self, context, bgp_speaker): driver = self._get_driver_by_project( context, bgp_speaker['bgp_speaker']['tenant_id']) driver.create_bgp_speaker(context, bgp_speaker) return super(NSXBgpPlugin, self).create_bgp_speaker(context, bgp_speaker) def update_bgp_speaker(self, context, bgp_speaker_id, bgp_speaker): driver = self._get_driver_by_speaker(context, bgp_speaker_id) with locking.LockManager.get_lock(str(bgp_speaker_id)): driver.update_bgp_speaker(context, bgp_speaker_id, bgp_speaker) # TBD(roeyc): rolling back changes on edges base class call failed. return super(NSXBgpPlugin, self).update_bgp_speaker( context, bgp_speaker_id, bgp_speaker) def delete_bgp_speaker(self, context, bgp_speaker_id): driver = self._get_driver_by_speaker(context, bgp_speaker_id) with locking.LockManager.get_lock(str(bgp_speaker_id)): driver.delete_bgp_speaker(context, bgp_speaker_id) super(NSXBgpPlugin, self).delete_bgp_speaker(context, bgp_speaker_id) def _add_esg_peer_info(self, context, peer): # TODO(asarfaty): only if nsxv driver, or do it in the driver itself binding = nsxv_db.get_nsxv_bgp_peer_edge_binding(context.session, peer['id']) if binding: peer['esg_id'] = binding['edge_id'] def get_bgp_peer(self, context, bgp_peer_id, fields=None): peer = super(NSXBgpPlugin, self).get_bgp_peer(context, bgp_peer_id, fields) if not fields or 'esg_id' in fields: self._add_esg_peer_info(context, peer) return peer def get_bgp_peers_by_bgp_speaker(self, context, bgp_speaker_id, fields=None): ret = super(NSXBgpPlugin, self).get_bgp_peers_by_bgp_speaker( context, bgp_speaker_id, fields=fields) if fields is None or 'esg_id' in fields: for peer in ret: self._add_esg_peer_info(context, peer) return ret def _get_driver_by_peer(self, context, bgp_peer_id): try: peer = self.get_bgp_peer(context, bgp_peer_id) except Exception: raise bgp_ext.BgpPeerNotFound(id=bgp_peer_id) return self._get_driver_by_project(context, peer['tenant_id']) def create_bgp_peer(self, context, bgp_peer): driver = self._get_driver_by_project( context, bgp_peer['bgp_peer']['tenant_id']) driver.create_bgp_peer(context, bgp_peer) peer = super(NSXBgpPlugin, self).create_bgp_peer(context, bgp_peer) # TODO(asarfaty): only if nsxv driver, or do it in the driver itself esg_id = bgp_peer['bgp_peer'].get('esg_id') if esg_id: nsxv_db.add_nsxv_bgp_peer_edge_binding(context.session, peer['id'], esg_id) peer['esg_id'] = esg_id return peer def update_bgp_peer(self, context, bgp_peer_id, bgp_peer): driver = self._get_driver_by_peer(context, bgp_peer_id) super(NSXBgpPlugin, self).update_bgp_peer(context, bgp_peer_id, bgp_peer) driver.update_bgp_peer(context, bgp_peer_id, bgp_peer) return self.get_bgp_peer(context, bgp_peer_id) def delete_bgp_peer(self, context, bgp_peer_id): driver = self._get_driver_by_peer(context, bgp_peer_id) bgp_peer_info = {'bgp_peer_id': bgp_peer_id} bgp_speaker_ids = driver._get_bgp_speakers_by_bgp_peer( context, bgp_peer_id) for speaker_id in bgp_speaker_ids: try: self.remove_bgp_peer(context, speaker_id, bgp_peer_info) except bgp_ext.BgpSpeakerPeerNotAssociated: LOG.debug("Couldn't find bgp speaker %s peer binding while " "deleting bgp peer %s", speaker_id, bgp_peer_id) super(NSXBgpPlugin, self).delete_bgp_peer(context, bgp_peer_id) def add_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): # speaker & peer must belong to the same driver if not bgp_peer_info.get('bgp_peer_id'): msg = _("bgp_peer_id must be specified") raise n_exc.BadRequest(resource='bgp-peer', msg=msg) peer_driver = self._get_driver_by_peer( context, bgp_peer_info['bgp_peer_id']) speaker_driver = self._get_driver_by_speaker(context, bgp_speaker_id) if peer_driver != speaker_driver: msg = _("Peer and Speaker must belong to the same plugin") raise n_exc.InvalidInput(error_message=msg) with locking.LockManager.get_lock(str(bgp_speaker_id)): speaker_driver.add_bgp_peer(context, bgp_speaker_id, bgp_peer_info) return super(NSXBgpPlugin, self).add_bgp_peer(context, bgp_speaker_id, bgp_peer_info) def remove_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): driver = self._get_driver_by_speaker(context, bgp_speaker_id) with locking.LockManager.get_lock(str(bgp_speaker_id)): ret = super(NSXBgpPlugin, self).remove_bgp_peer( context, bgp_speaker_id, bgp_peer_info) driver.remove_bgp_peer(context, bgp_speaker_id, bgp_peer_info) return ret def _validate_network_plugin( self, context, network_info, plugin_type=projectpluginmap.NsxPlugins.NSX_V): """Make sure the network belongs to the NSX0-V plugin""" if not network_info.get('network_id'): msg = _("network_id must be specified") raise n_exc.BadRequest(resource=bgp_ext.BGP_SPEAKER_RESOURCE_NAME, msg=msg) net_id = network_info['network_id'] p = self._core_plugin._get_plugin_from_net_id(context, net_id) if p.plugin_type() != plugin_type: msg = (_('Network should belong to the %s plugin as the bgp ' 'speaker') % plugin_type) raise n_exc.InvalidInput(error_message=msg) def add_gateway_network(self, context, bgp_speaker_id, network_info): driver = self._get_driver_by_speaker(context, bgp_speaker_id) if self._core_plugin.is_tvd_plugin(): # The plugin of the network and speaker must be the same self._validate_network_plugin(context, network_info) with locking.LockManager.get_lock(str(bgp_speaker_id)): driver.add_gateway_network(context, bgp_speaker_id, network_info) return super(NSXBgpPlugin, self).add_gateway_network( context, bgp_speaker_id, network_info) def remove_gateway_network(self, context, bgp_speaker_id, network_info): driver = self._get_driver_by_speaker(context, bgp_speaker_id) with locking.LockManager.get_lock(str(bgp_speaker_id)): super(NSXBgpPlugin, self).remove_gateway_network( context, bgp_speaker_id, network_info) driver.remove_gateway_network(context, bgp_speaker_id, network_info) def get_advertised_routes(self, context, bgp_speaker_id): driver = self._get_driver_by_speaker(context, bgp_speaker_id) return driver.get_advertised_routes(context, bgp_speaker_id) def router_interface_callback(self, resource, event, trigger, **kwargs): if not kwargs['network_id']: # No GW network, hence no BGP speaker associated return context = kwargs['context'].elevated() router_id = kwargs['router_id'] subnets = kwargs.get('subnets') network_id = kwargs['network_id'] port = kwargs['port'] speakers = self._bgp_speakers_for_gateway_network(context, network_id) for speaker in speakers: speaker_id = speaker.id with locking.LockManager.get_lock(str(speaker_id)): speaker = self.get_bgp_speaker(context, speaker_id) driver = self._get_driver_by_project( context, speaker['tenant_id']) if network_id not in speaker['networks']: continue if event == events.AFTER_CREATE: driver.advertise_subnet(context, speaker_id, router_id, subnets[0]) if event == events.AFTER_DELETE: subnet_id = port['fixed_ips'][0]['subnet_id'] driver.withdraw_subnet(context, speaker_id, router_id, subnet_id) def router_gateway_callback(self, resource, event, trigger, payload=None): context = payload.context or n_context.get_admin_context() context = context.elevated() router_id = payload.resource_id network_id = payload.metadata['network_id'] speakers = self._bgp_speakers_for_gateway_network(context, network_id) for speaker in speakers: speaker_id = speaker.id driver = self._get_driver_by_project( context, speaker['tenant_id']) with locking.LockManager.get_lock(str(speaker_id)): speaker = self.get_bgp_speaker(context, speaker_id) if network_id not in speaker['networks']: continue if event == events.AFTER_DELETE: gw_ips = payload.metadata['gateway_ips'] driver.disable_bgp_on_router(context, speaker, router_id, gw_ips[0]) if event == events.AFTER_UPDATE: updated_port = payload.metadata['updated_port'] router = payload.latest_state driver.process_router_gw_port_update( context, speaker, router, updated_port) def _before_service_edge_delete_callback(self, resource, event, trigger, payload=None): context = payload.context.elevated() router = payload.latest_state ext_net_id = router.gw_port and router.gw_port['network_id'] gw_ip = router.gw_port and router.gw_port['fixed_ips'][0]['ip_address'] edge_id = payload.resource_id speakers = self._bgp_speakers_for_gateway_network(context, ext_net_id) for speaker in speakers: driver = self._get_driver_by_project( context, speaker['tenant_id']) with locking.LockManager.get_lock(speaker.id): speaker = self.get_bgp_speaker(context, speaker.id) if ext_net_id not in speaker['networks']: continue driver.disable_bgp_on_router(context, speaker, router['id'], gw_ip, edge_id) def _after_service_edge_create_callback(self, resource, event, trigger, payload=None): context = payload.context.elevated() router = payload.latest_state ext_net_id = router.gw_port and router.gw_port['network_id'] speakers = self._bgp_speakers_for_gateway_network(context, ext_net_id) for speaker in speakers: driver = self._get_driver_by_project( context, speaker['tenant_id']) with locking.LockManager.get_lock(speaker.id): speaker = self.get_bgp_speaker(context, speaker.id) if ext_net_id not in speaker['networks']: continue driver.enable_bgp_on_router(context, speaker, router['id']) class NSXvBgpPlugin(NSXBgpPlugin): """Defined for backwards compatibility only""" pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/dynamic_routing/nsx_v/0000755000175000017500000000000000000000000025543 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/dynamic_routing/nsx_v/__init__.py0000644000175000017500000000000000000000000027642 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/dynamic_routing/nsx_v/driver.py0000644000175000017500000010065500000000000027417 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_dynamic_routing.extensions import bgp as bgp_ext from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron_lib.api.definitions import address_scope from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import constants as n_const from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import edge_service_gateway_bgp_peer as ext_esg_peer from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc LOG = logging.getLogger(__name__) def ip_prefix(name, ip_address): return {'ipPrefix': {'name': name, 'ipAddress': ip_address}} def redistribution_rule(advertise_static_routes, prefix_name, action='permit'): rule = { 'prefixName': prefix_name, 'action': action, 'from': { 'ospf': False, 'bgp': False, 'connected': not advertise_static_routes, 'static': advertise_static_routes } } return {'rule': rule} def _get_bgp_neighbour(ip_address, remote_as, password, direction): bgp_filter = {'bgpFilter': [{'direction': direction, 'action': 'permit'}]} nbr = { 'ipAddress': ip_address, 'remoteAS': remote_as, 'bgpFilters': bgp_filter, 'holdDownTimer': cfg.CONF.nsxv.bgp_neighbour_hold_down_timer, 'keepAliveTimer': cfg.CONF.nsxv.bgp_neighbour_keep_alive_timer } if password: nbr['password'] = password return {'bgpNeighbour': nbr} def bgp_neighbour_from_peer(bgp_peer): return _get_bgp_neighbour(bgp_peer['peer_ip'], bgp_peer['remote_as'], bgp_peer['password'], direction='out') def gw_bgp_neighbour(ip_address, remote_as, password): return _get_bgp_neighbour(ip_address, remote_as, password, direction='in') class NSXvBgpDriver(object): """Class driver to address the neutron_dynamic_routing API""" def __init__(self, plugin): super(NSXvBgpDriver, self).__init__() self._plugin = plugin self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) if not self._core_plugin: err_msg = _("NSXv BGP cannot work without the NSX-V core plugin") raise n_exc.InvalidInput(error_message=err_msg) self._nsxv = self._core_plugin.nsx_v self._edge_manager = self._core_plugin.edge_manager def prefix_name(self, subnet_id): return 'subnet-%s' % subnet_id def _get_router_edge_info(self, context, router_id): edge_binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not edge_binding: return None, None # Indicates which routes should be advertised - connected or static. advertise_static_routes = False if edge_binding['edge_type'] != nsxv_constants.SERVICE_EDGE: # Distributed router plr_id = self._edge_manager.get_plr_by_tlr_id(context, router_id) edge_binding = nsxv_db.get_nsxv_router_binding(context.session, plr_id) if not edge_binding: # Distributed router isn't bound to plr return None, None # PLR for distributed router, advertise static routes. advertise_static_routes = True return edge_binding['edge_id'], advertise_static_routes def get_advertised_routes(self, context, bgp_speaker_id): routes = [] bgp_speaker = self._plugin.get_bgp_speaker(context, bgp_speaker_id) edge_router_dict = ( self._get_dynamic_routing_edge_list(context, bgp_speaker['networks'][0], bgp_speaker_id)) for edge_id, edge_router_config in edge_router_dict.items(): bgp_identifier = edge_router_config['bgp_identifier'] subnets = self._query_tenant_subnets( context, edge_router_config['no_snat_routers']) routes.extend([(subnet['cidr'], bgp_identifier) for subnet in subnets]) routes = self._plugin._make_advertised_routes_list(routes) return self._plugin._make_advertised_routes_dict(routes) def _get_dynamic_routing_edge_list(self, context, gateway_network_id, bgp_speaker_id): # Filter the routers attached this network as gateway interface filters = {'network_id': [gateway_network_id], 'device_owner': [n_const.DEVICE_OWNER_ROUTER_GW]} fields = ['device_id', 'fixed_ips'] gateway_ports = self._core_plugin.get_ports(context, filters=filters, fields=fields) bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) binding_info = {bgp_binding['edge_id']: bgp_binding['bgp_identifier'] for bgp_binding in bgp_bindings} edge_router_dict = {} for port in gateway_ports: router_id = port['device_id'] router = self._core_plugin._get_router(context, router_id) edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) if not edge_id: # Shared router is not attached on any edge continue if edge_id not in edge_router_dict: bgp_identifier = binding_info.get( edge_id, port['fixed_ips'][0]['ip_address']) edge_router_dict[edge_id] = {'no_snat_routers': [], 'bgp_identifier': bgp_identifier, 'advertise_static_routes': advertise_static_routes} if not router.enable_snat: edge_router_dict[edge_id]['no_snat_routers'].append(router_id) return edge_router_dict def _get_md_proxy_for_router(self, context, router_id): binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) md_proxy = None if binding: az_name = binding['availability_zone'] md_proxy = self._core_plugin.get_metadata_proxy_handler( az_name) return md_proxy def _query_tenant_subnets(self, context, router_ids): # Query subnets attached to all of routers attached to same edge subnets = [] for router_id in router_ids: filters = {'device_id': [router_id], 'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF]} int_ports = self._core_plugin.get_ports(context, filters=filters, fields=['fixed_ips']) # We need to skip metadata subnets md_proxy = self._get_md_proxy_for_router(context, router_id) for p in int_ports: subnet_id = p['fixed_ips'][0]['subnet_id'] if md_proxy and md_proxy.is_md_subnet(subnet_id): continue subnet = self._core_plugin.get_subnet(context, subnet_id) subnets.append({'id': subnet_id, 'cidr': subnet['cidr']}) LOG.debug("Got related subnets %s", subnets) return subnets def _get_bgp_speakers_by_bgp_peer(self, context, bgp_peer_id): fields = ['id', 'peers'] bgp_speakers = self._plugin.get_bgp_speakers(context, fields=fields) bgp_speaker_ids = [bgp_speaker['id'] for bgp_speaker in bgp_speakers if bgp_peer_id in bgp_speaker['peers']] return bgp_speaker_ids def _get_prefixes_and_redistribution_rules(self, subnets, advertise_static_routes): prefixes = [] redis_rules = [] for subnet in subnets: prefix_name = self.prefix_name(subnet['id']) prefix = ip_prefix(prefix_name, subnet['cidr']) prefixes.append(prefix) rule = redistribution_rule(advertise_static_routes, prefix_name) redis_rules.append(rule) return prefixes, redis_rules def create_bgp_speaker(self, context, bgp_speaker): bgp_speaker_data = bgp_speaker['bgp_speaker'] ip_version = bgp_speaker_data.get('ip_version') if ip_version and ip_version == 6: err_msg = _("NSXv BGP does not support for IPv6") raise n_exc.InvalidInput(error_message=err_msg) def update_bgp_speaker(self, context, bgp_speaker_id, bgp_speaker): bgp_obj = bgp_speaker['bgp_speaker'] old_speaker_info = self._plugin.get_bgp_speaker(context, bgp_speaker_id) enabled_state = old_speaker_info['advertise_tenant_networks'] new_enabled_state = bgp_obj.get('advertise_tenant_networks', enabled_state) if new_enabled_state == enabled_state: return bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) edge_ids = [bgp_binding['edge_id'] for bgp_binding in bgp_bindings] action = 'Enabling' if new_enabled_state else 'Disabling' LOG.info("%s BGP route redistribution on edges: %s.", action, edge_ids) for edge_id in edge_ids: try: self._nsxv.update_routing_redistribution(edge_id, new_enabled_state) except vcns_exc.VcnsApiException: LOG.warning("Failed to update BGP on edge '%s'.", edge_id) def delete_bgp_speaker(self, context, bgp_speaker_id): bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) self._stop_bgp_on_edges(context, bgp_bindings, bgp_speaker_id) def _validate_bgp_configuration_on_peer_esg(self, bgp_peer): if not bgp_peer.get('esg_id'): return # TBD(roeyc): Validate peer_ip is on subnet bgp_config = self._nsxv.get_routing_bgp_config(bgp_peer['esg_id']) remote_as = bgp_peer['remote_as'] esg_id = bgp_peer['esg_id'] esg_as = bgp_config['bgp'].get('localAS') if not bgp_config['bgp']['enabled']: raise ext_esg_peer.BgpDisabledOnEsgPeer(esg_id=esg_id) if esg_as != int(remote_as): raise ext_esg_peer.EsgRemoteASDoNotMatch(remote_as=remote_as, esg_id=esg_id, esg_as=esg_as) h, resp = self._nsxv.vcns.get_interfaces(esg_id) for iface in resp['vnics']: address_groups = iface['addressGroups']['addressGroups'] matching_iface = [ag for ag in address_groups if ag['primaryAddress'] == bgp_peer['peer_ip']] if matching_iface: break else: raise ext_esg_peer.EsgInternalIfaceDoesNotMatch(esg_id=esg_id) def create_bgp_peer(self, context, bgp_peer): bgp_peer = bgp_peer['bgp_peer'] remote_ip = bgp_peer['peer_ip'] if not netaddr.valid_ipv4(remote_ip): err_msg = _("NSXv BGP does not support for IPv6") raise n_exc.InvalidInput(error_message=err_msg) self._validate_bgp_configuration_on_peer_esg(bgp_peer) def update_bgp_peer(self, context, bgp_peer_id, bgp_peer): password = bgp_peer['bgp_peer'].get('password') old_bgp_peer = self._plugin.get_bgp_peer(context, bgp_peer_id) # Only password update is relevant for backend. if old_bgp_peer['password'] == password: return bgp_speaker_ids = self._get_bgp_speakers_by_bgp_peer(context, bgp_peer_id) # Update the password for the old bgp peer and update NSX old_bgp_peer['password'] = password neighbour = bgp_neighbour_from_peer(old_bgp_peer) for bgp_speaker_id in bgp_speaker_ids: with locking.LockManager.get_lock(bgp_speaker_id): peers = self._plugin.get_bgp_peers_by_bgp_speaker( context, bgp_speaker_id) if bgp_peer_id not in [p['id'] for p in peers]: continue bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) for binding in bgp_bindings: try: # Neighbours are identified by their ip address self._nsxv.update_bgp_neighbours(binding['edge_id'], [neighbour], [neighbour]) except vcns_exc.VcnsApiException: LOG.error("Failed to update BGP neighbor '%s' on " "edge '%s'", old_bgp_peer['peer_ip'], binding['edge_id']) def _validate_bgp_peer(self, context, bgp_speaker_id, new_peer_id): new_peer = self._plugin._get_bgp_peer(context, new_peer_id) peers = self._plugin._get_bgp_peers_by_bgp_speaker_binding( context, bgp_speaker_id) self._plugin._validate_peer_ips(bgp_speaker_id, peers, new_peer) def add_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): bgp_peer_id = self._plugin._get_id_for(bgp_peer_info, 'bgp_peer_id') bgp_peer_obj = self._plugin.get_bgp_peer(context, bgp_peer_id) nbr = bgp_neighbour_from_peer(bgp_peer_obj) bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings(context.session, bgp_speaker_id) self._validate_bgp_peer(context, bgp_speaker_id, bgp_peer_obj['id']) speaker = self._plugin.get_bgp_speaker(context, bgp_speaker_id) # list of tenant edge routers to be removed as bgp-neighbours to this # peer if it's associated with specific ESG. neighbours = [] for binding in bgp_bindings: try: self._nsxv.add_bgp_neighbours(binding['edge_id'], [nbr]) except vcns_exc.VcnsApiException: LOG.error("Failed to add BGP neighbour on '%s'", binding['edge_id']) else: gw_nbr = gw_bgp_neighbour(binding['bgp_identifier'], speaker['local_as'], bgp_peer_obj['password']) neighbours.append(gw_nbr) LOG.debug("Succesfully added BGP neighbor '%s' on '%s'", bgp_peer_obj['peer_ip'], binding['edge_id']) if bgp_peer_obj.get('esg_id'): edge_gw = bgp_peer_obj['esg_id'] try: self._nsxv.add_bgp_neighbours(edge_gw, neighbours) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.error("Failed to add BGP neighbour on GW Edge '%s'", edge_gw) def remove_bgp_peer(self, context, bgp_speaker_id, bgp_peer_info): bgp_peer_id = bgp_peer_info['bgp_peer_id'] bgp_peer_obj = self._plugin.get_bgp_peer(context, bgp_peer_id) nbr = bgp_neighbour_from_peer(bgp_peer_obj) bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) speaker = self._plugin.get_bgp_speaker(context, bgp_speaker_id) # list of tenant edge routers to be removed as bgp-neighbours to this # peer if it's associated with specific ESG. neighbours = [] for binding in bgp_bindings: try: self._nsxv.remove_bgp_neighbours(binding['edge_id'], [nbr]) except vcns_exc.VcnsApiException: LOG.error("Failed to remove BGP neighbour on '%s'", binding['edge_id']) else: gw_nbr = gw_bgp_neighbour(binding['bgp_identifier'], speaker['local_as'], bgp_peer_obj['password']) neighbours.append(gw_nbr) LOG.debug("Succesfully removed BGP neighbor '%s' on '%s'", bgp_peer_obj['peer_ip'], binding['edge_id']) if bgp_peer_obj.get('esg_id'): edge_gw = bgp_peer_obj['esg_id'] try: self._nsxv.remove_bgp_neighbours(edge_gw, neighbours) except vcns_exc.VcnsApiException: LOG.error("Failed to remove BGP neighbour on GW Edge '%s'", edge_gw) def _validate_gateway_network(self, context, speaker_id, network_id): ext_net = self._core_plugin.get_network(context, network_id) if not ext_net.get(extnet_apidef.EXTERNAL): raise nsx_exc.NsxBgpNetworkNotExternal(net_id=network_id) if not ext_net['subnets']: raise nsx_exc.NsxBgpGatewayNetworkHasNoSubnets(net_id=network_id) # REVISIT(roeyc): Currently not allowing more than one bgp speaker per # gateway network. speakers_on_network = self._plugin._bgp_speakers_for_gateway_network( context, network_id) if speakers_on_network: raise bgp_ext.BgpSpeakerNetworkBindingError( network_id=network_id, bgp_speaker_id=speakers_on_network[0]['id']) subnet_id = ext_net['subnets'][0] ext_subnet = self._core_plugin.get_subnet(context, subnet_id) if ext_subnet.get('gateway_ip'): raise ext_esg_peer.ExternalSubnetHasGW( network_id=network_id, subnet_id=subnet_id) if not ext_net[address_scope.IPV4_ADDRESS_SCOPE]: raise nsx_exc.NsxBgpSpeakerUnableToAddGatewayNetwork( network_id=network_id, bgp_speaker_id=speaker_id) return True def add_gateway_network(self, context, bgp_speaker_id, network_info): gateway_network_id = network_info['network_id'] if not self._validate_gateway_network(context, bgp_speaker_id, gateway_network_id): return edge_router_dict = self._get_dynamic_routing_edge_list( context, gateway_network_id, bgp_speaker_id) speaker = self._plugin.get_bgp_speaker(context, bgp_speaker_id) bgp_peers = self._plugin.get_bgp_peers_by_bgp_speaker( context, bgp_speaker_id) local_as = speaker['local_as'] peers = [] for edge_id, edge_router_config in edge_router_dict.items(): router_ids = edge_router_config['no_snat_routers'] advertise_static_routes = ( edge_router_config['advertise_static_routes']) subnets = self._query_tenant_subnets(context, router_ids) # router_id here is in IP address format and is required for # the BGP configuration. bgp_identifier = edge_router_config['bgp_identifier'] try: self._start_bgp_on_edge(context, edge_id, speaker, bgp_peers, bgp_identifier, subnets, advertise_static_routes) except vcns_exc.VcnsApiException: LOG.error("Failed to configure BGP speaker %s on edge '%s'.", bgp_speaker_id, edge_id) else: peers.append(bgp_identifier) for edge_gw, password in [(peer['esg_id'], peer['password']) for peer in bgp_peers if peer.get('esg_id')]: neighbours = [gw_bgp_neighbour(bgp_id, local_as, password) for bgp_id in peers] try: self._nsxv.add_bgp_neighbours(edge_gw, neighbours) except vcns_exc.VcnsApiException: LOG.error("Failed to add BGP neighbour on GW Edge '%s'", edge_gw) def _start_bgp_on_edge(self, context, edge_id, speaker, bgp_peers, bgp_identifier, subnets, advertise_static_routes): enabled_state = speaker['advertise_tenant_networks'] local_as = speaker['local_as'] prefixes, redis_rules = self._get_prefixes_and_redistribution_rules( subnets, advertise_static_routes) bgp_neighbours = [bgp_neighbour_from_peer(bgp_peer) for bgp_peer in bgp_peers] try: self._nsxv.add_bgp_speaker_config(edge_id, bgp_identifier, local_as, enabled_state, bgp_neighbours, prefixes, redis_rules) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): LOG.error("Failed to configure BGP speaker '%s' on edge '%s'.", speaker['id'], edge_id) else: nsxv_db.add_nsxv_bgp_speaker_binding(context.session, edge_id, speaker['id'], bgp_identifier) def _stop_bgp_on_edges(self, context, bgp_bindings, speaker_id): peers_to_remove = [] speaker = self._plugin.get_bgp_speaker(context, speaker_id) local_as = speaker['local_as'] for bgp_binding in bgp_bindings: edge_id = bgp_binding['edge_id'] try: self._nsxv.delete_bgp_speaker_config(edge_id) except vcns_exc.VcnsApiException: LOG.error("Failed to delete BGP speaker '%s' config on edge " "'%s'.", speaker_id, edge_id) else: nsxv_db.delete_nsxv_bgp_speaker_binding(context.session, edge_id) peers_to_remove.append(bgp_binding['bgp_identifier']) # We should also remove all bgp neighbours on gw-edges which # corresponds with tenant routers that are associated with this bgp # speaker. bgp_peers = self._plugin.get_bgp_peers_by_bgp_speaker(context, speaker_id) gw_edges = [(peer['esg_id'], peer['password']) for peer in bgp_peers if peer.get('esg_id')] for gw_edge, password in gw_edges: neighbours_to_remove = [gw_bgp_neighbour(bgp_identifier, local_as, password) for bgp_identifier in peers_to_remove] try: self._nsxv.remove_bgp_neighbours(gw_edge, neighbours_to_remove) except vcns_exc.VcnsApiException: LOG.error("Failed to remove BGP neighbour on GW edge '%s'.", gw_edge) def remove_gateway_network(self, context, bgp_speaker_id, network_info): bgp_bindings = nsxv_db.get_nsxv_bgp_speaker_bindings( context.session, bgp_speaker_id) self._stop_bgp_on_edges(context, bgp_bindings, bgp_speaker_id) def _update_edge_bgp_identifier(self, context, bgp_binding, speaker, new_bgp_identifier): local_as = speaker['local_as'] bgp_peers = self._plugin.get_bgp_peers_by_bgp_speaker(context, speaker['id']) self._nsxv.update_router_id(bgp_binding['edge_id'], new_bgp_identifier) for gw_edge_id, password in [(peer['esg_id'], peer['password']) for peer in bgp_peers if peer.get('esg_id')]: nbr_to_remove = gw_bgp_neighbour(bgp_binding['bgp_identifier'], local_as, password) nbr_to_add = gw_bgp_neighbour(new_bgp_identifier, local_as, password) self._nsxv.update_bgp_neighbours(gw_edge_id, [nbr_to_add], [nbr_to_remove]) with context.session.begin(subtransactions=True): bgp_binding['bgp_identifier'] = new_bgp_identifier def process_router_gw_port_update(self, context, speaker, router, updated_port): router_id = router['id'] gw_fixed_ip = router.gw_port['fixed_ips'][0]['ip_address'] edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) if not edge_id: # shared router is not attached on any edge return bgp_binding = nsxv_db.get_nsxv_bgp_speaker_binding( context.session, edge_id) if bgp_binding: new_fixed_ip = updated_port['fixed_ips'][0]['ip_address'] fixed_ip_updated = gw_fixed_ip != new_fixed_ip subnets = self._query_tenant_subnets(context, [router_id]) prefixes, redis_rules = ( self._get_prefixes_and_redistribution_rules( subnets, advertise_static_routes)) # Handle possible snat/no-nat update if router.enable_snat: self._nsxv.remove_bgp_redistribution_rules(edge_id, prefixes) else: self._nsxv.add_bgp_redistribution_rules(edge_id, prefixes, redis_rules) if bgp_binding['bgp_identifier'] == gw_fixed_ip: if fixed_ip_updated: self._update_edge_bgp_identifier(context, bgp_binding, speaker, new_fixed_ip) def enable_bgp_on_router(self, context, speaker, router_id): local_as = speaker['local_as'] edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) if not edge_id: # shared router is not attached on any edge return router = self._core_plugin._get_router(context, router_id) subnets = self._query_tenant_subnets(context, [router_id]) bgp_peers = self._plugin.get_bgp_peers_by_bgp_speaker( context, speaker['id']) bgp_binding = nsxv_db.get_nsxv_bgp_speaker_binding( context.session, edge_id) if bgp_binding and subnets: # Edge already configured with BGP (e.g - shared router edge), # Add the router attached subnets. if router.enable_snat: prefixes = [self.prefix_name(subnet['id']) for subnet in subnets] self._nsxv.remove_bgp_redistribution_rules(edge_id, prefixes) else: prefixes, redis_rules = ( self._get_prefixes_and_redistribution_rules( subnets, advertise_static_routes)) self._nsxv.add_bgp_redistribution_rules(edge_id, prefixes, redis_rules) elif not bgp_binding: if router.enable_snat: subnets = [] bgp_identifier = router.gw_port['fixed_ips'][0]['ip_address'] self._start_bgp_on_edge(context, edge_id, speaker, bgp_peers, bgp_identifier, subnets, advertise_static_routes) for gw_edge_id, password in [(peer['esg_id'], peer['password']) for peer in bgp_peers if peer.get('esg_id')]: nbr = gw_bgp_neighbour(bgp_identifier, local_as, password) self._nsxv.add_bgp_neighbours(gw_edge_id, [nbr]) def disable_bgp_on_router(self, context, speaker, router_id, gw_ip, edge_id=None): speaker = self._plugin.get_bgp_speaker(context, speaker['id']) current_edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) edge_id = edge_id or current_edge_id if not edge_id: return bgp_binding = nsxv_db.get_nsxv_bgp_speaker_binding(context.session, edge_id) if not bgp_binding: return # Need to ensure that we do not use the metadata IP's md_proxy = self._get_md_proxy_for_router(context, router_id) routers_ids = ( self._core_plugin.edge_manager.get_routers_on_same_edge( context, router_id)) routers_ids.remove(router_id) # We need to find out what other routers are hosted on the edges and # whether they have a gw addresses that could replace the current # bgp-identifier (if required). filters = {'device_owner': [n_const.DEVICE_OWNER_ROUTER_GW], 'device_id': routers_ids} edge_gw_ports = self._core_plugin.get_ports(context, filters=filters) alt_bgp_identifiers = [ p['fixed_ips'][0]['ip_address'] for p in edge_gw_ports if (not md_proxy or not md_proxy.is_md_subnet( p['fixed_ips'][0]['subnet_id']))] if alt_bgp_identifiers: # Shared router, only remove prefixes and redistribution # rules. subnets = self._query_tenant_subnets(context, [router_id]) prefixes = [self.prefix_name(subnet['id']) for subnet in subnets] self._nsxv.remove_bgp_redistribution_rules(edge_id, prefixes) if bgp_binding['bgp_identifier'] == gw_ip: self._update_edge_bgp_identifier(context, bgp_binding, speaker, alt_bgp_identifiers[0]) else: self._stop_bgp_on_edges(context, [bgp_binding], speaker['id']) def advertise_subnet(self, context, speaker_id, router_id, subnet): router = self._core_plugin._get_router(context, router_id) if router.enable_snat: # Do nothing, by default, only when advertisement is needed we add # a new redistribution rule return edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) if not edge_id: # shared router is not attached on any edge return prefixes, redis_rules = self._get_prefixes_and_redistribution_rules( [subnet], advertise_static_routes) self._nsxv.add_bgp_redistribution_rules(edge_id, prefixes, redis_rules) def withdraw_subnet(self, context, speaker_id, router_id, subnet_id): router = self._core_plugin._get_router(context, router_id) if router.enable_snat: # Do nothing, by default, only when advertisement is needed we add # a new redistribution rule return edge_id, advertise_static_routes = ( self._get_router_edge_info(context, router_id)) prefix_name = self.prefix_name(subnet_id) self._nsxv.remove_bgp_redistribution_rules(edge_id, [prefix_name]) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/flowclassifier/0000755000175000017500000000000000000000000024227 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/flowclassifier/__init__.py0000644000175000017500000000000000000000000026326 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/flowclassifier/nsx_v/0000755000175000017500000000000000000000000025364 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/flowclassifier/nsx_v/__init__.py0000644000175000017500000000000000000000000027463 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/flowclassifier/nsx_v/driver.py0000644000175000017500000003736500000000000027247 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import xml.etree.ElementTree as et from networking_sfc.extensions import flowclassifier from networking_sfc.services.flowclassifier.common import exceptions as exc from networking_sfc.services.flowclassifier.drivers import base as fc_driver from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_context from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import config # noqa from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.plugins.nsx_v.vshield import vcns as nsxv_api from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.services.flowclassifier.nsx_v import utils as fc_utils LOG = logging.getLogger(__name__) REDIRECT_FW_SECTION_NAME = 'OS Flow Classifier Rules' class NsxvFlowClassifierDriver(fc_driver.FlowClassifierDriverBase): """FlowClassifier Driver For NSX-V.""" _redirect_section_id = None def initialize(self): self._nsxv = vcns_driver.VcnsDriver(None) self.init_profile_id() self.init_security_group() self.init_security_group_in_profile() # register an event to the end of the init to handle the first upgrade if self._is_new_security_group: registry.subscribe(self.init_complete, resources.PROCESS, events.BEFORE_SPAWN) def init_profile_id(self): """Init the service insertion profile ID Initialize the profile id that should be assigned to the redirect rules from the nsx configuration and verify that it exists on backend. """ if not cfg.CONF.nsxv.service_insertion_profile_id: raise cfg.RequiredOptError("service_insertion_profile_id", group=cfg.OptGroup('nsxv')) self._profile_id = cfg.CONF.nsxv.service_insertion_profile_id # Verify that this moref exists if not self._nsxv.vcns.validate_inventory(self._profile_id): error = (_("Configured service profile ID: %s not found") % self._profile_id) raise nsx_exc.NsxPluginException(err_msg=error) def init_security_group(self): """Init the service insertion security group Look for the service insertion security group in the backend. If it was not found - create it This security group will contain all the VMs vnics that should be inspected by the redirect rules """ # check if this group exist, and create it if not. sg_name = fc_utils.SERVICE_INSERTION_SG_NAME sg_id = self._nsxv.vcns.get_security_group_id(sg_name) self._is_new_security_group = False if not sg_id: description = ("OpenStack Service Insertion Security Group, " "managed by Neutron nsx-v plugin.") sg = {"securitygroup": {"name": sg_name, "description": description}} h, sg_id = ( self._nsxv.vcns.create_security_group(sg)) self._is_new_security_group = True self._security_group_id = sg_id def init_security_group_in_profile(self): """Attach the security group to the service profile """ data = self._nsxv.vcns.get_service_insertion_profile(self._profile_id) if data and len(data) > 1: profile = et.fromstring(data[1]) profile_binding = profile.find('serviceProfileBinding') sec_groups = profile_binding.find('securityGroups') for sec in sec_groups.iter('string'): if sec.text == self._security_group_id: # Already there return # add the security group to the binding et.SubElement(sec_groups, 'string').text = self._security_group_id self._nsxv.vcns.update_service_insertion_profile_binding( self._profile_id, et.tostring(profile_binding, encoding="us-ascii")) def init_complete(self, resource, event, trigger, payload=None): if self._is_new_security_group: # add existing VMs to the new security group # This code must run after init is done core_plugin = directory.get_plugin() core_plugin.add_vms_to_service_insertion( self._security_group_id) # Add the first flow classifier entry if cfg.CONF.nsxv.service_insertion_redirect_all: self.add_any_any_redirect_rule() def add_any_any_redirect_rule(self): """Add an any->any flow classifier entry Add 1 flow classifier entry that will redirect all the traffic to the security partner The user will be able to delete/change it later """ context = n_context.get_admin_context() fc_plugin = directory.get_plugin(flowclassifier.FLOW_CLASSIFIER_EXT) # first check that there is no other flow classifier entry defined: fcs = fc_plugin.get_flow_classifiers(context) if len(fcs) > 0: return # Create any->any rule fc = {'name': 'redirect_all', 'description': 'Redirect all traffic', 'tenant_id': nsxv_constants.INTERNAL_TENANT_ID, 'l7_parameters': {}, 'ethertype': 'IPv4', 'protocol': None, 'source_port_range_min': None, 'source_port_range_max': None, 'destination_port_range_min': None, 'destination_port_range_max': None, 'source_ip_prefix': None, 'destination_ip_prefix': None, 'logical_source_port': None, 'logical_destination_port': None } fc_plugin.create_flow_classifier(context, {'flow_classifier': fc}) def get_redirect_fw_section_id(self): if not self._redirect_section_id: # try to find it self._redirect_section_id = self._nsxv.vcns.get_section_id( REDIRECT_FW_SECTION_NAME) if not self._redirect_section_id: # create it for the first time section = et.Element('section') section.attrib['name'] = REDIRECT_FW_SECTION_NAME self._nsxv.vcns.create_redirect_section(et.tostring(section)) self._redirect_section_id = self._nsxv.vcns.get_section_id( REDIRECT_FW_SECTION_NAME) return self._redirect_section_id def get_redirect_fw_section_uri(self): return '%s/%s/%s' % (nsxv_api.FIREWALL_PREFIX, nsxv_api.FIREWALL_REDIRECT_SEC_TYPE, self.get_redirect_fw_section_id()) def get_redirect_fw_section_from_backend(self): section_uri = self.get_redirect_fw_section_uri() section_resp = self._nsxv.vcns.get_section(section_uri) if section_resp and len(section_resp) > 1: xml_section = section_resp[1] return et.fromstring(xml_section) def update_redirect_section_in_backed(self, section): section_uri = self.get_redirect_fw_section_uri() self._nsxv.vcns.update_section( section_uri, et.tostring(section, encoding="us-ascii"), None) def _rule_ip_type(self, flow_classifier): if flow_classifier.get('ethertype') == 'IPv6': return 'Ipv6Address' return 'Ipv4Address' def _rule_ports(self, type, flow_classifier): min_port = flow_classifier.get(type + '_port_range_min') max_port = flow_classifier.get(type + '_port_range_max') return self._ports_list(min_port, max_port) def _ports_list(self, min_port, max_port): """Return a string representing the port/range""" if min_port == max_port: return str(min_port) return "%s-%s" % (min_port, max_port) def _rule_name(self, flow_classifier): # The name of the rule will include the name & id of the classifier # so we can later find it in order to update/delete it. # Both the flow classifier DB & the backend has max name length of 255 # so we may have to trim the name a bit return (flow_classifier.get('name')[:200] + '-' + flow_classifier.get('id')) def _is_the_same_rule(self, rule, flow_classifier_id): return rule.find('name').text.endswith(flow_classifier_id) def init_redirect_fw_rule(self, redirect_rule, flow_classifier): et.SubElement(redirect_rule, 'name').text = self._rule_name( flow_classifier) et.SubElement(redirect_rule, 'action').text = 'redirect' et.SubElement(redirect_rule, 'direction').text = 'inout' si_profile = et.SubElement(redirect_rule, 'siProfile') et.SubElement(si_profile, 'objectId').text = self._profile_id et.SubElement(redirect_rule, 'packetType').text = flow_classifier.get( 'ethertype').lower() # init the source & destination if flow_classifier.get('source_ip_prefix'): sources = et.SubElement(redirect_rule, 'sources') sources.attrib['excluded'] = 'false' source = et.SubElement(sources, 'source') et.SubElement(source, 'type').text = self._rule_ip_type( flow_classifier) et.SubElement(source, 'value').text = flow_classifier.get( 'source_ip_prefix') if flow_classifier.get('destination_ip_prefix'): destinations = et.SubElement(redirect_rule, 'destinations') destinations.attrib['excluded'] = 'false' destination = et.SubElement(destinations, 'destination') et.SubElement(destination, 'type').text = self._rule_ip_type( flow_classifier) et.SubElement(destination, 'value').text = flow_classifier.get( 'destination_ip_prefix') # init the service if (flow_classifier.get('destination_port_range_min') or flow_classifier.get('source_port_range_min')): services = et.SubElement(redirect_rule, 'services') service = et.SubElement(services, 'service') et.SubElement(service, 'isValid').text = 'true' if flow_classifier.get('source_port_range_min'): source_port = et.SubElement(service, 'sourcePort') source_port.text = self._rule_ports('source', flow_classifier) if flow_classifier.get('destination_port_range_min'): dest_port = et.SubElement(service, 'destinationPort') dest_port.text = self._rule_ports('destination', flow_classifier) prot = et.SubElement(service, 'protocolName') prot.text = flow_classifier.get('protocol').upper() # Add the classifier description if flow_classifier.get('description'): notes = et.SubElement(redirect_rule, 'notes') notes.text = flow_classifier.get('description') def _loc_fw_section(self): return locking.LockManager.get_lock('redirect-fw-section') @log_helpers.log_method_call def create_flow_classifier(self, context): """Create a redirect rule at the backend """ flow_classifier = context.current with self._loc_fw_section(): section = self.get_redirect_fw_section_from_backend() new_rule = et.SubElement(section, 'rule') self.init_redirect_fw_rule(new_rule, flow_classifier) self.update_redirect_section_in_backed(section) @log_helpers.log_method_call def update_flow_classifier(self, context): """Update the backend redirect rule """ flow_classifier = context.current with self._loc_fw_section(): section = self.get_redirect_fw_section_from_backend() redirect_rule = None for rule in section.iter('rule'): if self._is_the_same_rule(rule, flow_classifier['id']): redirect_rule = rule break if redirect_rule is None: msg = _("Failed to find redirect rule %s " "on backed") % flow_classifier['id'] raise exc.FlowClassifierException(message=msg) else: # The flowclassifier plugin currently supports updating only # name or description name = redirect_rule.find('name') name.text = self._rule_name(flow_classifier) notes = redirect_rule.find('notes') notes.text = flow_classifier.get('description') or '' self.update_redirect_section_in_backed(section) @log_helpers.log_method_call def delete_flow_classifier(self, context): """Delete the backend redirect rule """ flow_classifier_id = context.current['id'] with self._loc_fw_section(): section = self.get_redirect_fw_section_from_backend() redirect_rule = None for rule in section.iter('rule'): if self._is_the_same_rule(rule, flow_classifier_id): redirect_rule = rule section.remove(redirect_rule) break if redirect_rule is None: LOG.error("Failed to delete redirect rule %s: " "Could not find rule on backed", flow_classifier_id) # should not fail the deletion else: self.update_redirect_section_in_backed(section) @log_helpers.log_method_call def create_flow_classifier_precommit(self, context): """Validate the flow classifier data before committing the transaction The NSX-v redirect rules does not support: - logical ports - l7 parameters - source ports range / destination port range with more than 15 ports """ flow_classifier = context.current # Logical source port logical_source_port = flow_classifier['logical_source_port'] if logical_source_port is not None: msg = _('The NSXv driver does not support setting ' 'logical source port in FlowClassifier') raise exc.FlowClassifierBadRequest(message=msg) # Logical destination port logical_destination_port = flow_classifier['logical_destination_port'] if logical_destination_port is not None: msg = _('The NSXv driver does not support setting ' 'logical destination port in FlowClassifier') raise exc.FlowClassifierBadRequest(message=msg) # L7 parameters l7_params = flow_classifier['l7_parameters'] if l7_params is not None and len(l7_params.keys()) > 0: msg = _('The NSXv driver does not support setting ' 'L7 parameters in FlowClassifier') raise exc.FlowClassifierBadRequest(message=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/flowclassifier/nsx_v/utils.py0000644000175000017500000000500500000000000027076 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import directory from oslo_log import log as logging LOG = logging.getLogger(__name__) SERVICE_INSERTION_SG_NAME = 'Service Insertion Security Group' SERVICE_INSERTION_RESOURCE = 'Service Insertion' # Using the constant defined here to avoid the need to clone networking-sfc # if the driver is not used. FLOW_CLASSIFIER_EXT = "flow_classifier" class NsxvServiceInsertionHandler(object): def __init__(self, core_plugin): super(NsxvServiceInsertionHandler, self).__init__() self._nsxv = core_plugin.nsx_v self._initialized = False def _initialize_handler(self): if not self._initialized: self._enabled = False self._sg_id = None if self.is_service_insertion_enabled(): self._sg_id = self.get_service_inserion_sg_id() if not self._sg_id: # failed to create the security group or the driver # was not configured LOG.error("Failed to enable service insertion. " "Security group not found.") self._enabled = False else: self._enabled = True self._initialized = True def is_service_insertion_enabled(self): # Note - this cannot be called during init, since the manager is busy if (directory.get_plugin(FLOW_CLASSIFIER_EXT)): return True return False def get_service_inserion_sg_id(self): # Note - this cannot be called during init, since the nsxv flow # classifier driver creates this group return self._nsxv.vcns.get_security_group_id( SERVICE_INSERTION_SG_NAME) @property def enabled(self): self._initialize_handler() return self._enabled @property def sg_id(self): self._initialize_handler() return self._sg_id ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/0000755000175000017500000000000000000000000022314 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/__init__.py0000644000175000017500000000000000000000000024413 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/common/0000755000175000017500000000000000000000000023604 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/common/__init__.py0000644000175000017500000000000000000000000025703 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/common/api_replay_driver.py0000644000175000017500000001066200000000000027663 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_fwaas.db.firewall.v2 import firewall_db_v2 from neutron_fwaas.services.firewall.service_drivers.agents import agents from neutron_lib import constants as nl_constants class ApiReplayFirewallPluginDb(firewall_db_v2.FirewallPluginDb): """Override FWaaS agent DB actions to use given objects IDs""" def create_firewall_rule(self, context, firewall_rule): fwr = firewall_rule src_port_min, src_port_max = self._get_min_max_ports_from_range( fwr['source_port']) dst_port_min, dst_port_max = self._get_min_max_ports_from_range( fwr['destination_port']) with context.session.begin(subtransactions=True): fwr_db = firewall_db_v2.FirewallRuleV2( # Use given ID for api_replay support id=fwr.get('id'), tenant_id=fwr['tenant_id'], name=fwr['name'], description=fwr['description'], protocol=fwr['protocol'], ip_version=fwr['ip_version'], source_ip_address=fwr['source_ip_address'], destination_ip_address=fwr['destination_ip_address'], source_port_range_min=src_port_min, source_port_range_max=src_port_max, destination_port_range_min=dst_port_min, destination_port_range_max=dst_port_max, action=fwr['action'], enabled=fwr['enabled'], shared=fwr['shared']) context.session.add(fwr_db) return self._make_firewall_rule_dict(fwr_db) def create_firewall_policy(self, context, firewall_policy): """This method is manipulated to allow the creation of additional default firewall policy, and do not automatically ensure one exists """ fwp = firewall_policy with context.session.begin(subtransactions=True): # Use given ID for api_replay support fwp_db = firewall_db_v2.FirewallPolicy( id=fwp.get('id'), tenant_id=fwp['tenant_id'], name=fwp['name'], description=fwp['description'], audited=fwp['audited'], shared=fwp['shared']) context.session.add(fwp_db) self._set_rules_for_policy(context, fwp_db, fwp) return self._make_firewall_policy_dict(fwp_db) def create_firewall_group(self, context, firewall_group, default_fwg=False): """This method is manipulated to allow the creation of additional default firewall group, and do not automatically ensure one exists """ fwg = firewall_group tenant_id = fwg['tenant_id'] if firewall_group.get('status') is None: fwg['status'] = nl_constants.CREATED with context.session.begin(subtransactions=True): # Use given ID for api_replay support fwg_db = firewall_db_v2.FirewallGroup( id=fwg.get('id'), tenant_id=tenant_id, name=fwg['name'], description=fwg['description'], status=fwg['status'], ingress_firewall_policy_id=fwg['ingress_firewall_policy_id'], egress_firewall_policy_id=fwg['egress_firewall_policy_id'], admin_state_up=fwg['admin_state_up'], shared=fwg['shared']) context.session.add(fwg_db) self._set_ports_for_firewall_group(context, fwg_db, fwg) return self._make_firewall_group_dict(fwg_db) class ApiReplayFirewallAgentDriver(agents.FirewallAgentDriver): """FWaaS V2 agent driver for api-replay allowing POST with id.""" def __init__(self, *args, **kwargs): super(ApiReplayFirewallAgentDriver, self).__init__(*args, **kwargs) self.firewall_db = ApiReplayFirewallPluginDb() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/common/fwaas_callbacks_v2.py0000644000175000017500000002644200000000000027675 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron.agent.l3 import router_info from neutron.common import config as neutron_config # noqa from neutron_lib import constants as nl_constants from neutron_lib import context as n_context from neutron_lib.plugins import directory LOG = logging.getLogger(__name__) try: from neutron_fwaas.common import fwaas_constants from neutron_fwaas.services.firewall.service_drivers.agents.l3reference \ import firewall_l3_agent_v2 except ImportError: # FWaaS project no found from vmware_nsx.services.fwaas.common import fwaas_mocks \ as firewall_l3_agent_v2 from vmware_nsx.services.fwaas.common import fwaas_mocks \ as fwaas_constants class DummyAgentApi(object): def is_router_in_namespace(self, router_id): return True class NsxFwaasCallbacksV2(firewall_l3_agent_v2.L3WithFWaaS): """Common NSX RPC callbacks for Firewall As A Service - V2.""" def __init__(self, with_rpc): # The super code needs a configuration object with the neutron host # and an agent_mode, which our driver doesn't use. neutron_conf = cfg.CONF neutron_conf.agent_mode = 'nsx' self.with_rpc = with_rpc super(NsxFwaasCallbacksV2, self).__init__(conf=neutron_conf) self.agent_api = DummyAgentApi() self.core_plugin = self._get_core_plugin() def start_rpc_listeners(self, host, conf): # Make sure RPC queue will be created only when needed if not self.with_rpc: return return super(NsxFwaasCallbacksV2, self).start_rpc_listeners(host, conf) @property def plugin_type(self): pass def _get_core_plugin(self): """Get the NSX core plugin""" core_plugin = directory.get_plugin() if core_plugin.is_tvd_plugin(): # get the plugin that match this driver core_plugin = core_plugin.get_plugin_by_type( self.plugin_type) return core_plugin # Override functions using the agent_api that is not used by our plugin def _get_firewall_group_ports(self, context, firewall_group, to_delete=False, require_new_plugin=False): """Returns in-namespace ports, either from firewall group dict if newer version of plugin or from project routers otherwise. NOTE: Vernacular move from "tenant" to "project" doesn't yet appear as a key in router or firewall group objects. """ fwg_port_ids = [] if self._has_port_insertion_fields(firewall_group): if to_delete: fwg_port_ids = firewall_group['del-port-ids'] else: fwg_port_ids = firewall_group['add-port-ids'] if (not firewall_group.get('del-port-ids') and not firewall_group.get('add-port-ids') and firewall_group.get('ports')): # No change in ports, but policy changed so all ports are # relevant fwg_port_ids = firewall_group['ports'] # Mark to the driver that this is not port deletion firewall_group['last-port'] = False elif not require_new_plugin: routers = self._get_routers_in_project( context, firewall_group['tenant_id']) for router in routers: if router.router['tenant_id'] == firewall_group['tenant_id']: fwg_port_ids.extend([p['id'] for p in router.internal_ports]) # Return in-namespace port objects. ports = self._get_in_ns_ports(fwg_port_ids, ignore_errors=to_delete) # On illegal ports - change FW status to Error if ports is None: self.fwplugin_rpc.set_firewall_group_status( context, firewall_group['id'], nl_constants.ERROR) return ports def _get_in_ns_ports(self, port_ids, ignore_errors=False): """Returns port objects in the local namespace, along with their router_info. """ context = n_context.get_admin_context() in_ns_ports = {} # This will be converted to a list later. for port_id in port_ids: # find the router of this port: port = self.core_plugin.get_port(context, port_id) # verify that this is a router interface port if port['device_owner'] != nl_constants.DEVICE_OWNER_ROUTER_INTF: if not ignore_errors: LOG.error("NSX-V3 FWaaS V2 plugin does not support %s " "ports", port['device_owner']) return # since this is a deletion of an illegal port, add this port # with a dummy router so that the FWaaS plugin will notice the # change and change the FW status. router_info = 'Dummy' else: router_id = port['device_id'] router = self.core_plugin.get_router(context, router_id) router_info = self._router_dict_to_obj(router) if router_info: if router_info in in_ns_ports: in_ns_ports[router_info].append(port_id) else: in_ns_ports[router_info] = [port_id] return list(in_ns_ports.items()) def delete_firewall_group(self, context, firewall_group, host): """Handles RPC from plugin to delete a firewall group. This method is overridden here in order to handle routers in Error state without ports, and make sure those are deleted. """ ports_for_fwg = self._get_firewall_group_ports( context, firewall_group, to_delete=True) if not ports_for_fwg: # FW without ports should be deleted without calling the driver self.fwplugin_rpc.firewall_group_deleted( context, firewall_group['id']) return return super(NsxFwaasCallbacksV2, self).delete_firewall_group( context, firewall_group, host) def _get_routers_in_project(self, context, project_id): return self.core_plugin.get_routers( context, filters={'project_id': [project_id]}) def _router_dict_to_obj(self, r): # The callbacks expect a router-info object with an agent config agent_conf = cfg.CONF agent_conf.metadata_access_mark = '0x1' return router_info.RouterInfo( None, r['id'], router=r, agent_conf=agent_conf, interface_driver=None, use_ipv6=False) def get_port_fwg(self, context, port_id): """Return the firewall group of this port if the FWaaS rules should be added to the backend router. """ if not self.fwaas_enabled: return False ctx = context.elevated() fwg_id = self._get_port_firewall_group_id(ctx, port_id) if fwg_id is None: # No FWaas Firewall was assigned to this port return # check the state of this firewall group fwg = self._get_fw_group_from_plugin(ctx, fwg_id) if fwg is not None: if fwg.get('status') in (nl_constants.ERROR, nl_constants.PENDING_DELETE): # Do not add rules of firewalls with errors LOG.warning("Port %(port)s will not get rules from firewall " "group %(fwg)s which is in %(status)s", {'port': port_id, 'fwg': fwg_id, 'status': fwg['status']}) return return fwg def _get_fw_group_from_plugin(self, context, fwg_id): # NOTE(asarfaty): currently there is no api to get a specific firewall fwg_list = self.fwplugin_rpc.get_firewall_groups_for_project(context) for fwg in fwg_list: if fwg['id'] == fwg_id: return fwg def _get_port_firewall_group_id(self, context, port_id): fw_plugin = directory.get_plugin(fwaas_constants.FIREWALL_V2) if fw_plugin: driver_db = fw_plugin.driver.firewall_db return driver_db.get_fwg_attached_to_port(context, port_id) def should_apply_firewall_to_router(self, context, router_id): """Return True if there are FWaaS rules that are attached to an interface of the given router. """ if not self.fwaas_enabled: return False ctx = context.elevated() router_interfaces = self.core_plugin._get_router_interfaces( ctx, router_id) for port in router_interfaces: fwg_id = self._get_port_firewall_group_id(ctx, port['id']) if fwg_id: # check the state of this firewall group fwg = self._get_fw_group_from_plugin(ctx, fwg_id) if fwg is not None: if fwg.get('status') not in (nl_constants.ERROR, nl_constants.PENDING_DELETE): # Found a router interface port with rules return True return False def delete_port(self, context, port_id): # Mark the FW group as inactive if this is the last port fwg = self.get_port_fwg(context, port_id) if (fwg and fwg.get('status') == nl_constants.ACTIVE and len(fwg.get('ports', [])) <= 1): self.fwplugin_rpc.set_firewall_group_status( context, fwg['id'], nl_constants.INACTIVE) class NsxCommonv3FwaasCallbacksV2(NsxFwaasCallbacksV2): """NSX-V3+Policy RPC callbacks for Firewall As A Service - V2.""" def should_apply_firewall_to_router(self, context, router_id): """Return True if the FWaaS rules should be added to this router.""" if not super(NsxCommonv3FwaasCallbacksV2, self).should_apply_firewall_to_router(context, router_id): return False # get all the relevant router info ctx_elevated = context.elevated() router_data = self.core_plugin.get_router(ctx_elevated, router_id) if not router_data: LOG.error("Couldn't read router %s data", router_id) return False # Check if the FWaaS driver supports this router if not self.internal_driver.should_apply_firewall_to_router( router_data): return False return True def router_with_fwg(self, context, router_interfaces): for port in router_interfaces: fwg = self.get_port_fwg(context, port['id']) if fwg and fwg.get('status') == nl_constants.ACTIVE: return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/common/fwaas_driver_base.py0000644000175000017500000000723000000000000027626 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import helpers as log_helpers from oslo_log import log as logging try: from neutron_fwaas.services.firewall.service_drivers.agents.drivers \ import fwaas_base except ImportError: # FWaaS project no found from vmware_nsx.services.fwaas.common import fwaas_mocks \ as fwaas_base LOG = logging.getLogger(__name__) class EdgeFwaasDriverBaseV2(fwaas_base.FwaasDriverBase): """NSX Base driver for Firewall As A Service - V2.""" def __init__(self, driver_name): super(EdgeFwaasDriverBaseV2, self).__init__() self.driver_name = driver_name @log_helpers.log_method_call def create_firewall_group(self, agent_mode, apply_list, firewall_group): """Create the Firewall with a given policy. """ self._validate_firewall_group(firewall_group) self._update_backend_routers(apply_list, firewall_group['id']) @log_helpers.log_method_call def update_firewall_group(self, agent_mode, apply_list, firewall_group): """Remove previous policy and apply the new policy.""" self._validate_firewall_group(firewall_group) self._update_backend_routers(apply_list, firewall_group['id']) @log_helpers.log_method_call def delete_firewall_group(self, agent_mode, apply_list, firewall_group): """Delete firewall. Removes rules created by this instance from the backend firewall And add the default allow rule. """ self._update_backend_routers(apply_list, firewall_group['id']) @log_helpers.log_method_call def apply_default_policy(self, agent_mode, apply_list, firewall_group): """Apply the default policy (deny all). The backend firewall always has this policy (=deny all) as default, so we only need to delete the current rules. """ self._update_backend_routers(apply_list, firewall_group['id']) @abc.abstractmethod def _update_backend_routers(self, apply_list, fwg_id): """Update all the affected router on the backend""" pass def _validate_firewall_group(self, firewall_group): """Validate the rules in the firewall group""" for rule in firewall_group['egress_rule_list']: if (rule.get('source_ip_address') and not rule['source_ip_address'].startswith('0.0.0.0')): # Ignoring interface port as we cannot set it with the ip LOG.info("Rule %(id)s with source ips used in an egress " "policy: interface port will be ignored in the NSX " "rule", {'id': rule['id']}) for rule in firewall_group['ingress_rule_list']: if (rule.get('destination_ip_address') and not rule['destination_ip_address'].startswith('0.0.0.0')): # Ignoring interface port as we cannot set it with the ip LOG.info("Rule %(id)s with destination ips used in an " "ingress policy: interface port will be ignored " "in the NSX rule", {'id': rule['id']}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/common/fwaas_mocks.py0000644000175000017500000000200600000000000026451 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file contains FWaaS mocks, to allow the vmware nsx plugins to work when # FWaaS code does not exist, and FWaaS is not configured in neutron FIREWALL_V2 = 'FIREWALL_V2' class L3WithFWaaS(object): def __init__(self, **kwargs): self.fwaas_enabled = False class FwaasDriverBase(object): pass class FirewallPluginV2(object): pass class FirewallCallbacks(object): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/common/utils.py0000644000175000017500000000176500000000000025327 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import directory try: from neutron_fwaas.common import fwaas_constants except ImportError: # FWaaS project no found from vmware_nsx.services.fwaas.common import fwaas_mocks \ as fwaas_constants def is_fwaas_v2_plugin_enabled(): fwaas_plugin = directory.get_plugin(fwaas_constants.FIREWALL_V2) if fwaas_plugin: return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/common/v3_utils.py0000644000175000017500000000413300000000000025727 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lib.api.definitions import constants as fwaas_consts from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) def translate_fw_rule_action(fwaas_action, fwaas_rule_id): """Translate FWaaS action to NSX action""" if fwaas_action == fwaas_consts.FWAAS_ALLOW: return nsx_constants.FW_ACTION_ALLOW if fwaas_action == fwaas_consts.FWAAS_DENY: return nsx_constants.FW_ACTION_DROP if fwaas_action == fwaas_consts.FWAAS_REJECT: # reject is not supported by the NSX edge firewall LOG.warning("Reject action is not supported by the NSX backend " "for edge firewall. Using %(action)s instead for " "rule %(id)s", {'action': nsx_constants.FW_ACTION_DROP, 'id': fwaas_rule_id}) return nsx_constants.FW_ACTION_DROP # Unexpected action LOG.error("Unsupported FWAAS action %(action)s for rule %(id)s", { 'action': fwaas_action, 'id': fwaas_rule_id}) def translate_fw_rule_protocol(fwaas_protocol): """Translate FWaaS L4 protocol to NSX protocol""" if fwaas_protocol.lower() == 'tcp': return nsx_constants.TCP if fwaas_protocol.lower() == 'udp': return nsx_constants.UDP if fwaas_protocol.lower() == 'icmp': # This will cover icmpv6 too, when adding the rule. return nsx_constants.ICMPV4 def translate_fw_rule_ports(ports): return [ports.replace(':', '-')] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_p/0000755000175000017500000000000000000000000023443 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_p/__init__.py0000644000175000017500000000000000000000000025542 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_p/edge_fwaas_driver_v2.py0000644000175000017500000000272300000000000030070 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import directory from oslo_log import log as logging from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_base \ as base_driver LOG = logging.getLogger(__name__) FWAAS_DRIVER_NAME = 'Fwaas V2 NSX-P driver' class EdgeFwaasPDriverV2(base_driver.CommonEdgeFwaasV3Driver): """NSX-P driver for Firewall As A Service V2.""" def __init__(self): super(EdgeFwaasPDriverV2, self).__init__(FWAAS_DRIVER_NAME) self._core_plugin = None @property def core_plugin(self): """Get the NSX-P core plugin""" if not self._core_plugin: self._core_plugin = directory.get_plugin() # make sure plugin init was completed if not self._core_plugin.init_is_complete: self._core_plugin.init_complete(None, None, {}) return self._core_plugin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_p/fwaas_callbacks_v2.py0000644000175000017500000004602400000000000027532 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random import netaddr from oslo_log import log as logging from neutron_lib.exceptions import firewall_v2 as exceptions from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.fwaas.common import fwaas_callbacks_v2 as \ com_callbacks from vmware_nsx.services.fwaas.common import v3_utils from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3.policy import constants as policy_constants from vmware_nsxlib.v3 import utils as nsxlib_utils LOG = logging.getLogger(__name__) GATEWAY_POLICY_NAME = 'Tier1 %s gateway policy' DEFAULT_RULE_NAME = 'Default LR Layer3 Rule' DEFAULT_RULE_ID = 'default_rule' RULE_NAME_PREFIX = 'Fwaas-' ROUTER_FW_TAG = 'os-router-firewall' class NsxpFwaasCallbacksV2(com_callbacks.NsxCommonv3FwaasCallbacksV2): """NSX-P RPC callbacks for Firewall As A Service V2.""" def __init__(self, with_rpc): super(NsxpFwaasCallbacksV2, self).__init__(with_rpc) self.internal_driver = None if self.fwaas_enabled: self.internal_driver = self.fwaas_driver @property def plugin_type(self): return projectpluginmap.NsxPlugins.NSX_P @property def nsxpolicy(self): return self.core_plugin.nsxpolicy def _get_default_backend_rule(self, router_id): """Return the default allow-all rule entry This rule entry will be added to the end of the rules list """ return self.nsxpolicy.gateway_policy.build_entry( DEFAULT_RULE_NAME, policy_constants.DEFAULT_DOMAIN, router_id, self._get_random_rule_id(DEFAULT_RULE_ID), description=DEFAULT_RULE_NAME, sequence_number=None, action=nsx_constants.FW_ACTION_ALLOW, scope=[self.nsxpolicy.tier1.get_path(router_id)], source_groups=None, dest_groups=None, direction=nsx_constants.IN_OUT) def _translate_service(self, project_id, router_id, rule): """Return the NSX Policy service id matching the FW rule service. L4 protocol service will be created per router-id & rule-id and the service id will reflect both, as will as the L4 protocol. This will allow the cleanup of the service by tags when the router is detached. """ ip_version = rule.get('ip_version', 4) if rule.get('protocol'): tags = self.nsxpolicy.build_v3_tags_payload( rule, resource_type='os-neutron-fwrule-id', project_name=project_id) tags = nsxlib_utils.add_v3_tag(tags, ROUTER_FW_TAG, router_id) l4_protocol = v3_utils.translate_fw_rule_protocol( rule.get('protocol')) srv_name = 'FW_rule_%s_%s_service' % (rule['id'], rule['protocol']) description = '%s service for FW rule %s of Tier1 %s' % ( rule['protocol'], rule['id'], router_id) if l4_protocol in [nsx_constants.TCP, nsx_constants.UDP]: if rule.get('destination_port') is None: destination_ports = [] else: destination_ports = v3_utils.translate_fw_rule_ports( rule['destination_port']) if rule.get('source_port') is None: source_ports = [] else: source_ports = v3_utils.translate_fw_rule_ports( rule['source_port']) srv_id = self.nsxpolicy.service.create_or_overwrite( srv_name, description=description, protocol=l4_protocol, dest_ports=destination_ports, source_ports=source_ports, tags=tags) elif l4_protocol == nsx_constants.ICMPV4: #TODO(asarfaty): Can use predefined service for ICMP srv_id = self.nsxpolicy.icmp_service.create_or_overwrite( srv_name, version=ip_version, tags=tags) return srv_id def _get_random_rule_id(self, rule_id): """Return a rule ID with random suffix to be used on the NSX Random sequence needs to be added to rule IDs, so that PUT command will replace all existing rules. Keeping the same rule id will require updating the rule revision as well. """ #TODO(asarfaty): add support for self created id in build_entry and # remove this method return '%s-%s' % (rule_id, str(random.randint(1, 10000000))) def _get_rule_ips_group_id(self, rule_id, direction): return '%s-%s' % (direction, rule_id) def _is_empty_cidr(self, cidr, fwaas_rule_id): net = netaddr.IPNetwork(cidr) if ((net.version == 4 and cidr.startswith('0.0.0.0')) or (net.version == 6 and str(net.ip) == "::")): LOG.warning("Unsupported FWaaS cidr %(cidr)s for rule %(id)s", {'cidr': cidr, 'id': fwaas_rule_id}) return True def _validate_cidr(self, cidr, fwaas_rule_id): error_msg = (_("Illegal FWaaS cidr %(cidr)s for rule %(id)s") % {'cidr': cidr, 'id': fwaas_rule_id}) # Validate that this is a legal & supported ipv4 / ipv6 cidr net = netaddr.IPNetwork(cidr) if net.version == 4: if net.prefixlen == 0: LOG.error(error_msg) raise self.driver_exception(driver=self.driver_name) elif net.version == 6: if net.prefixlen == 0: LOG.error(error_msg) raise self.driver_exception(driver=self.driver_name) else: LOG.error(error_msg) raise self.driver_exception(driver=self.driver_name) def _get_rule_cidr_group(self, project_id, router_id, rule, is_source, is_ingress): field = 'source_ip_address' if is_source else 'destination_ip_address' direction_text = 'source' if is_source else 'destination' if (rule.get(field) and not self._is_empty_cidr(rule[field], rule['id'])): # Create a group for ips group_ips = rule[field] group_id = self._get_rule_ips_group_id(rule['id'], direction_text) self._validate_cidr(group_ips, rule['id']) expr = self.nsxpolicy.group.build_ip_address_expression( [group_ips]) tags = self.nsxpolicy.build_v3_tags_payload( rule, resource_type='os-neutron-fwrule-id', project_name=project_id) tags = nsxlib_utils.add_v3_tag(tags, ROUTER_FW_TAG, router_id) self.nsxpolicy.group.create_or_overwrite_with_conditions( "FW_rule_%s_%s" % (rule['id'], direction_text), policy_constants.DEFAULT_DOMAIN, group_id=group_id, description='%s: %s' % (direction_text, group_ips), conditions=[expr], tags=tags) return group_id def _create_network_group(self, router_id, neutron_net_id): scope_and_tag = "%s|%s" % ('os-neutron-net-id', neutron_net_id) tags = [] tags = nsxlib_utils.add_v3_tag(tags, ROUTER_FW_TAG, router_id) expr = self.nsxpolicy.group.build_condition( cond_val=scope_and_tag, cond_key=policy_constants.CONDITION_KEY_TAG, cond_member_type=nsx_constants.TARGET_TYPE_LOGICAL_SWITCH) group_id = '%s-%s' % (router_id, neutron_net_id) self.nsxpolicy.group.create_or_overwrite_with_conditions( "Segment_%s" % neutron_net_id, policy_constants.DEFAULT_DOMAIN, group_id=group_id, description='Group for segment %s' % neutron_net_id, conditions=[expr], tags=tags) return group_id def _translate_rules(self, project_id, router_id, segment_group, fwaas_rules, is_ingress, logged=False): """Translate a list of FWaaS rules to NSX rule structure""" translated_rules = [] for rule in fwaas_rules: if not rule['enabled']: # skip disabled rules continue # Make sure the rule has a name, and it starts with the prefix # (backend max name length is 255) if rule.get('name'): rule_name = RULE_NAME_PREFIX + rule['name'] else: rule_name = RULE_NAME_PREFIX + rule['id'] rule_name = rule_name[:255] # Set rule ID with a random suffix rule_id = self._get_random_rule_id(rule['id']) action = v3_utils.translate_fw_rule_action( rule['action'], rule['id']) if not action: raise exceptions.FirewallInternalDriverError( driver=self.internal_driver.driver_name) src_group = self._get_rule_cidr_group( project_id, router_id, rule, is_source=True, is_ingress=is_ingress) if not is_ingress and not src_group: src_group = segment_group dest_group = self._get_rule_cidr_group( project_id, router_id, rule, is_source=False, is_ingress=is_ingress) if is_ingress and not dest_group: dest_group = segment_group srv_id = self._translate_service(project_id, router_id, rule) direction = nsx_constants.IN if is_ingress else nsx_constants.OUT ip_protocol = (nsx_constants.IPV4 if rule.get('ip_version', 4) == 4 else nsx_constants.IPV6) rule_entry = self.nsxpolicy.gateway_policy.build_entry( rule_name, policy_constants.DEFAULT_DOMAIN, router_id, rule_id, description=rule.get('description'), action=action, source_groups=[src_group] if src_group else None, dest_groups=[dest_group] if dest_group else None, service_ids=[srv_id] if srv_id else None, ip_protocol=ip_protocol, logged=logged, scope=[self.nsxpolicy.tier1.get_path(router_id)], direction=direction) translated_rules.append(rule_entry) return translated_rules def _get_port_translated_rules(self, project_id, router_id, neutron_net_id, firewall_group, plugin_rules): """Return the list of translated FWaaS rules per port Add the egress/ingress rules of this port + default drop rules in each direction for this port. """ net_group_id = self._create_network_group( router_id, neutron_net_id) port_rules = [] # Add the firewall group ingress/egress rules only if the fw is up if firewall_group['admin_state_up']: port_rules.extend(self._translate_rules( project_id, router_id, net_group_id, firewall_group['ingress_rule_list'], is_ingress=True)) port_rules.extend(self._translate_rules( project_id, router_id, net_group_id, firewall_group['egress_rule_list'], is_ingress=False)) # Add the per-port plugin rules if plugin_rules and isinstance(plugin_rules, list): port_rules.extend(plugin_rules) # Add ingress/egress block rules for this port port_rules.extend([ self.nsxpolicy.gateway_policy.build_entry( "Block port ingress", policy_constants.DEFAULT_DOMAIN, router_id, self._get_random_rule_id( DEFAULT_RULE_ID + neutron_net_id + 'ingress'), action=nsx_constants.FW_ACTION_DROP, dest_groups=[net_group_id], scope=[self.nsxpolicy.tier1.get_path(router_id)], direction=nsx_constants.IN), self.nsxpolicy.gateway_policy.build_entry( "Block port egress", policy_constants.DEFAULT_DOMAIN, router_id, self._get_random_rule_id( DEFAULT_RULE_ID + neutron_net_id + 'egress'), action=nsx_constants.FW_ACTION_DROP, scope=[self.nsxpolicy.tier1.get_path(router_id)], source_groups=[net_group_id], direction=nsx_constants.OUT)]) return port_rules def _set_rules_order(self, fw_rules): # TODO(asarfaty): Consider adding vmware-nsxlib api for this # add sequence numbers to keep rules in order seq_num = 0 for rule in fw_rules: rule.attrs['sequence_number'] = seq_num seq_num += 1 def update_router_firewall(self, context, router_id, router, router_interfaces, called_from_fw=False): """Rewrite all the FWaaS v2 rules in the router edge firewall This method should be called on FWaaS updates, and on router interfaces changes. The purpose of called_from_fw is to differ between fw calls and other router calls, and if it is True - add the service router accordingly. """ plugin = self.core_plugin project_id = router['project_id'] fw_rules = [] router_with_fw = False # Add firewall rules per port attached to a firewall group for port in router_interfaces: # Check if this port has a firewall fwg = self.get_port_fwg(context, port['id']) if fwg: router_with_fw = True # Add plugin additional allow rules plugin_rules = self.core_plugin.get_extra_fw_rules( context, router_id, port['id']) # Add the FWaaS rules for this port:ingress/egress firewall # rules + default ingress/egress drop rule for this port fw_rules.extend(self._get_port_translated_rules( project_id, router_id, port['network_id'], fwg, plugin_rules)) # Add a default allow-all rule to all other traffic & ports fw_rules.append(self._get_default_backend_rule(router_id)) self._set_rules_order(fw_rules) # Update the backend router firewall sr_exists_on_backend = plugin.verify_sr_at_backend(router_id) if called_from_fw: # FW action required if router_with_fw: # Firewall needed and no NSX service router: create it. if not sr_exists_on_backend: plugin.create_service_router( context, router_id, update_firewall=False) sr_exists_on_backend = True else: # First, check if other services exist and use the sr router_with_services = plugin.service_router_has_services( context, router_id, router=router) if not router_with_services and sr_exists_on_backend: # No other services that require service router: delete it # This also deleted the gateway policy. self.core_plugin.delete_service_router(router_id) sr_exists_on_backend = False if sr_exists_on_backend: if router_with_fw: self.create_or_update_router_gateway_policy(context, router_id, router, fw_rules) else: # Do all the cleanup once the router has no more FW rules # create or update the edge firewall # TODO(asarfaty): Consider keeping the FW with default allow # rule instead of deletion as it may be created again soon self.delete_router_gateway_policy(router_id) self.cleanup_router_fw_resources(router_id) def create_or_update_router_gateway_policy(self, context, router_id, router, fw_rules): """Create/Overwrite gateway policy for a router with firewall rules""" # Check if the gateway policy already exists try: self.nsxpolicy.gateway_policy.get(policy_constants.DEFAULT_DOMAIN, map_id=router_id, silent=True) except nsx_lib_exc.ResourceNotFound: LOG.info("Going to create gateway policy for router %s", router_id) else: # only update the rules of this policy self.nsxpolicy.gateway_policy.update_entries( policy_constants.DEFAULT_DOMAIN, router_id, fw_rules, category=policy_constants.CATEGORY_LOCAL_GW) return tags = self.nsxpolicy.build_v3_tags_payload( router, resource_type='os-neutron-router-id', project_name=context.tenant_name) policy_name = GATEWAY_POLICY_NAME % router_id self.nsxpolicy.gateway_policy.create_with_entries( policy_name, policy_constants.DEFAULT_DOMAIN, map_id=router_id, description=policy_name, tags=tags, entries=fw_rules, category=policy_constants.CATEGORY_LOCAL_GW) def delete_router_gateway_policy(self, router_id): """Delete the gateway policy associated with a router, it it exists. Should be called when the router is deleted / FW removed from it """ try: self.nsxpolicy.gateway_policy.get(policy_constants.DEFAULT_DOMAIN, map_id=router_id, silent=True) except nsx_lib_exc.ResourceNotFound: return self.nsxpolicy.gateway_policy.delete(policy_constants.DEFAULT_DOMAIN, map_id=router_id) # Also delete all groups & services self.cleanup_router_fw_resources(router_id) def cleanup_router_fw_resources(self, router_id): tags_to_search = [{'scope': ROUTER_FW_TAG, 'tag': router_id}] # Delete per rule & per network groups groups = self.nsxpolicy.search_by_tags( tags_to_search, self.nsxpolicy.group.entry_def.resource_type())['results'] for group in groups: self.nsxpolicy.group.delete(policy_constants.DEFAULT_DOMAIN, group['id']) services = self.nsxpolicy.search_by_tags( tags_to_search, self.nsxpolicy.service.parent_entry_def.resource_type())['results'] for srv in services: self.nsxpolicy.service.delete(srv['id']) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_tv/0000755000175000017500000000000000000000000023635 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_tv/__init__.py0000644000175000017500000000000000000000000025734 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_tv/edge_fwaas_driver_v2.py0000644000175000017500000001067200000000000030264 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from neutron_lib.exceptions import firewall_v2 as exceptions from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.services.fwaas.nsx_v import edge_fwaas_driver_v2 as v_driver from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_v2 as t_driver LOG = logging.getLogger(__name__) FWAAS_DRIVER_NAME = 'FwaaS V2 NSX-TV driver' try: from neutron_fwaas.services.firewall.service_drivers.agents.drivers \ import fwaas_base_v2 except ImportError: # FWaaS project no found from vmware_nsx.services.fwaas.common import fwaas_mocks \ as fwaas_base_v2 class EdgeFwaasTVDriverV2(fwaas_base_v2.FwaasDriverBase): """NSX-TV driver for Firewall As A Service - V2. """ def __init__(self): super(EdgeFwaasTVDriverV2, self).__init__() self.driver_name = FWAAS_DRIVER_NAME # supported drivers: self.drivers = {} try: self.drivers[projectpluginmap.NsxPlugins.NSX_T] = ( t_driver.EdgeFwaasV3DriverV2()) except Exception: LOG.warning("EdgeFwaasTVDriverV2 failed to initialize the NSX-T " "driver") self.drivers[projectpluginmap.NsxPlugins.NSX_T] = None try: self.drivers[projectpluginmap.NsxPlugins.NSX_V] = ( v_driver.EdgeFwaasVDriverV2()) except Exception: LOG.warning("EdgeFwaasTVDriverV2 failed to initialize the NSX-V " "driver") self.drivers[projectpluginmap.NsxPlugins.NSX_V] = None def get_T_driver(self): return self.drivers[projectpluginmap.NsxPlugins.NSX_T] def get_V_driver(self): return self.drivers[projectpluginmap.NsxPlugins.NSX_V] def _get_driver_for_project(self, project): plugin_type = tvd_utils.get_tvd_plugin_type_for_project(project) if not self.drivers.get(plugin_type): LOG.error("Project %(project)s with plugin %(plugin)s has no " "support for FWaaS V2", {'project': project, 'plugin': plugin_type}) raise exceptions.FirewallInternalDriverError( driver=self.driver_name) return self.drivers[plugin_type] @log_helpers.log_method_call def create_firewall_group(self, agent_mode, apply_list, firewall_group): d = self._get_driver_for_project(firewall_group['tenant_id']) return d.create_firewall_group(agent_mode, apply_list, firewall_group) @log_helpers.log_method_call def update_firewall_group(self, agent_mode, apply_list, firewall_group): d = self._get_driver_for_project(firewall_group['tenant_id']) return d.update_firewall_group(agent_mode, apply_list, firewall_group) @log_helpers.log_method_call def delete_firewall_group(self, agent_mode, apply_list, firewall_group): d = self._get_driver_for_project(firewall_group['tenant_id']) return d.delete_firewall_group(agent_mode, apply_list, firewall_group) @log_helpers.log_method_call def apply_default_policy(self, agent_mode, apply_list, firewall_group): d = self._get_driver_for_project(firewall_group['tenant_id']) return d.apply_default_policy(agent_mode, apply_list, firewall_group) def translate_addresses_to_target(self, cidrs, plugin_type, fwaas_rule_id=None): # This api is called directly from the core plugin if not self.drivers.get(plugin_type): LOG.error("%s has no support for plugin %s", self.driver_name, plugin_type) else: return self.drivers[plugin_type].translate_addresses_to_target( cidrs, fwaas_rule_id=fwaas_rule_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_tv/plugin_v2.py0000644000175000017500000000233200000000000026114 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsx.plugins.nsx import utils as tvd_utils try: from neutron_fwaas.services.firewall import fwaas_plugin_v2 except ImportError: # FWaaS project no found from vmware_nsx.services.fwaas.common import fwaas_mocks \ as fwaas_plugin_v2 @tvd_utils.filter_plugins class FwaasTVPluginV2(fwaas_plugin_v2.FirewallPluginV2): """NSX-TV plugin for Firewall As A Service - V2. This plugin adds separation between T/V instances """ methods_to_separate = ['get_firewall_groups', 'get_firewall_policies', 'get_firewall_rules'] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_v/0000755000175000017500000000000000000000000023451 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_v/__init__.py0000644000175000017500000000000000000000000025550 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_v/edge_fwaas_driver_v2.py0000644000175000017500000001370200000000000030075 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import context as n_context from oslo_log import log as logging from neutron_lib.exceptions import firewall_v2 as exceptions from neutron_lib.plugins import directory from vmware_nsx.common import locking from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.services.fwaas.common import fwaas_driver_base LOG = logging.getLogger(__name__) FWAAS_DRIVER_NAME = 'Fwaas V2 NSX-V driver' class EdgeFwaasVDriverV2(fwaas_driver_base.EdgeFwaasDriverBaseV2): """NSX-V driver for Firewall As A Service - V2.""" def __init__(self): super(EdgeFwaasVDriverV2, self).__init__(FWAAS_DRIVER_NAME) self._core_plugin = None @property def core_plugin(self): """Get the NSX-V core plugin""" if not self._core_plugin: self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) if not self._core_plugin: # The NSX-V plugin was not initialized return # make sure plugin init was completed if not self._core_plugin.init_is_complete: self._core_plugin.init_complete(None, None, {}) return self._core_plugin def should_apply_firewall_to_router(self, router_data, raise_exception=True): """Return True if the firewall rules allowed to be added the router Return False in those cases: - router without an external gateway (rule may be added later when there is a gateway) Raise an exception if the router is unsupported (and raise_exception is True): - shared router (not supported) - md proxy router (not supported) """ if (not router_data.get('distributed') and router_data.get('router_type') == 'shared'): LOG.error("Cannot apply firewall to shared router %s", router_data['id']) if raise_exception: raise exceptions.FirewallInternalDriverError( driver=self.driver_name) return False if router_data.get('name', '').startswith('metadata_proxy_router'): LOG.error("Cannot apply firewall to the metadata proxy router %s", router_data['id']) if raise_exception: raise exceptions.FirewallInternalDriverError( driver=self.driver_name) return False if not router_data.get('external_gateway_info'): LOG.info("Cannot apply firewall to router %s with no gateway", router_data['id']) return False return True def _update_backend_routers(self, apply_list, fwg_id): """Update all the affected routers on the backend""" LOG.info("Updating routers firewall for firewall group %s", fwg_id) context = n_context.get_admin_context() routers = set() routers_mapping = {} # the apply_list is a list of tuples: routerInfo, port-id for router_info, port_id in apply_list: # Skip dummy entries that were added only to avoid errors if isinstance(router_info, str): continue # Skip unsupported routers if not self.should_apply_firewall_to_router(router_info.router): continue lookup_id = None router_id = router_info.router_id if router_info.router.get('distributed'): # Distributed router (need to update the plr edge) lookup_id = self.core_plugin.edge_manager.get_plr_by_tlr_id( context, router_id) else: # Exclusive router lookup_id = router_id if lookup_id: # look for the edge id in the DB edge_id = edge_utils.get_router_edge_id(context, lookup_id) if edge_id: routers_mapping[router_id] = {'edge_id': edge_id, 'lookup_id': lookup_id} routers.add(router_id) # update each router once using the core plugin for router_id in routers: router_db = self.core_plugin._get_router(context, router_id) edge_id = routers_mapping[router_id]['edge_id'] LOG.info("Updating FWaaS rules for router %s on edge %s", router_id, edge_id) router_lookup_id = routers_mapping[router_id]['lookup_id'] try: with locking.LockManager.get_lock(str(edge_id)): self.core_plugin.update_router_firewall( context, router_lookup_id, router_db) except Exception as e: # catch known library exceptions and raise Fwaas generic # exception LOG.error("Failed to update firewall rules on edge " "%(edge_id)s for router %(rtr)s: %(e)s", {'e': e, 'rtr': router_id, 'edge_id': edge_id}) raise exceptions.FirewallInternalDriverError( driver=self.driver_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_v/fwaas_callbacks_v2.py0000644000175000017500000001710200000000000027533 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver from vmware_nsx.services.fwaas.common import fwaas_callbacks_v2 as \ com_callbacks from vmware_nsx.services.fwaas.nsx_tv import edge_fwaas_driver_v2 as tv_driver LOG = logging.getLogger(__name__) RULE_NAME_PREFIX = 'Fwaas-' class NsxvFwaasCallbacksV2(com_callbacks.NsxFwaasCallbacksV2): """NSX-V RPC callbacks for Firewall As A Service - V2.""" def __init__(self, with_rpc): super(NsxvFwaasCallbacksV2, self).__init__(with_rpc) # update the fwaas driver in case of TV plugin self.internal_driver = None if self.fwaas_enabled: if self.fwaas_driver.driver_name == tv_driver.FWAAS_DRIVER_NAME: self.internal_driver = self.fwaas_driver.get_V_driver() else: self.internal_driver = self.fwaas_driver @property def plugin_type(self): return projectpluginmap.NsxPlugins.NSX_V def should_apply_firewall_to_router(self, context, router, router_id): """Return True if the FWaaS rules should be added to this router.""" # in case of a distributed-router: # router['id'] is the id of the neutron router (=tlr) # and router_id is the plr/tlr (the one that is being updated) # First check if there are rules attached to this router if not super(NsxvFwaasCallbacksV2, self).should_apply_firewall_to_router( context, router['id']): return False # get all the relevant router info # ("router" does not have all the fields) ctx_elevated = context.elevated() router_data = self.core_plugin.get_router(ctx_elevated, router['id']) if not router_data: LOG.error("Couldn't read router %s data", router['id']) return False if router_data.get('distributed'): if router_id == router['id']: # Do not add firewall rules on the tlr router. return False # Check if the FWaaS driver supports this router if not self.internal_driver.should_apply_firewall_to_router( router_data, raise_exception=False): return False return True def get_fwaas_rules_for_router(self, context, router_id, router_db, edge_id): """Return the list of (translated) FWaaS rules for this router.""" ctx_elevated = context.elevated() router_interfaces = self.core_plugin._get_router_interfaces( ctx_elevated, router_id) fw_rules = [] # Add firewall rules per port attached to a firewall group for port in router_interfaces: fwg = self.get_port_fwg(ctx_elevated, port['id']) if fwg: router_dict = {} self.core_plugin._extend_nsx_router_dict( router_dict, router_db) if router_dict['distributed']: # The vnic_id is ignored for distributed routers, so # each rule will be applied to all the interfaces. vnic_id = None else: # get the interface vnic edge_vnic_bind = nsxv_db.get_edge_vnic_binding( context.session, edge_id, port['network_id']) vnic_id = 'vnic-index-%s' % edge_vnic_bind.vnic_index # Add the FWaaS rules for this port fw_rules.extend( self.get_port_translated_rules(vnic_id, fwg)) return fw_rules def get_port_translated_rules(self, vnic_id, firewall_group): """Return the list of translated rules per port Ingress/Egress firewall rules + default ingress/egress drop """ port_rules = [] logged = False # Add the firewall group ingress/egress rules only if the fw is up if firewall_group['admin_state_up']: port_rules.extend(self.translate_rules( firewall_group['ingress_rule_list'], replace_dest=vnic_id, logged=logged, is_ingress=True)) port_rules.extend(self.translate_rules( firewall_group['egress_rule_list'], replace_src=vnic_id, logged=logged, is_ingress=False)) # Add ingress/egress block rules for this port default_ingress = {'name': "Block port ingress", 'action': edge_firewall_driver.FWAAS_DENY, 'logged': logged} default_egress = {'name': "Block port egress", 'action': edge_firewall_driver.FWAAS_DENY, 'logged': logged} if vnic_id: default_ingress['destination_vnic_groups'] = [vnic_id] default_egress['source_vnic_groups'] = [vnic_id] port_rules.extend([default_ingress, default_egress]) return port_rules def translate_rules(self, fwaas_rules, replace_dest=None, replace_src=None, logged=False, is_ingress=True): translated_rules = [] for rule in fwaas_rules: if not rule['enabled']: # skip disabled rules continue # Make sure the rule has a name, and it starts with the prefix # (backend max name length is 30) if rule.get('name'): rule['name'] = RULE_NAME_PREFIX + rule['name'] else: rule['name'] = RULE_NAME_PREFIX + rule['id'] rule['name'] = rule['name'][:30] if rule.get('id'): # update rules ID to prevent DB duplications in # NsxvEdgeFirewallRuleBinding if is_ingress: rule['id'] = ('ingress-%s' % rule['id'])[:36] else: rule['id'] = ('egress-%s' % rule['id'])[:36] # source & destination should be lists if (rule.get('destination_ip_address') and not rule['destination_ip_address'].startswith('0.0.0.0')): rule['destination_ip_address'] = [ rule['destination_ip_address']] else: if replace_dest: rule['destination_vnic_groups'] = [replace_dest] if 'destination_ip_address' in rule: del rule['destination_ip_address'] if (rule.get('source_ip_address') and not rule['source_ip_address'].startswith('0.0.0.0')): rule['source_ip_address'] = [rule['source_ip_address']] else: if replace_src: rule['source_vnic_groups'] = [replace_src] if 'source_ip_address' in rule: del rule['source_ip_address'] if logged: rule['logged'] = True translated_rules.append(rule) return translated_rules ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_v3/0000755000175000017500000000000000000000000023534 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_v3/__init__.py0000644000175000017500000000000000000000000025633 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_v3/edge_fwaas_driver_base.py0000644000175000017500000000560000000000000030541 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import context as n_context from neutron_lib.exceptions import firewall_v2 as exceptions from oslo_log import log as logging from vmware_nsx.services.fwaas.common import fwaas_driver_base LOG = logging.getLogger(__name__) class CommonEdgeFwaasV3Driver(fwaas_driver_base.EdgeFwaasDriverBaseV2): """Base class for NSX-V3/Policy driver for Firewall As A Service V2.""" def __init__(self, driver_name): super(CommonEdgeFwaasV3Driver, self).__init__(driver_name) self.driver_exception = exceptions.FirewallInternalDriverError self._core_plugin = None @property def core_plugin(self): """Get the core plugin - should be implemented by each driver""" pass def _update_backend_routers(self, apply_list, fwg_id): """Update all the affected router on the backend""" LOG.info("Updating routers firewall for firewall group %s", fwg_id) context = n_context.get_admin_context() routers = set() # the apply_list is a list of tuples: routerInfo, port-id for router_info, port_id in apply_list: # Skip dummy entries that were added only to avoid errors if isinstance(router_info, str): continue # Skip unsupported routers if not self.should_apply_firewall_to_router(router_info.router): continue routers.add(router_info.router_id) # update each router once for router_id in routers: try: self.core_plugin.update_router_firewall(context, router_id, from_fw=True) except Exception as e: # The core plugin failed to update the firewall LOG.error("Failed to update NSX edge firewall for router %s: " "%s", router_id, e) raise self.driver_exception(driver=self.driver_name) def should_apply_firewall_to_router(self, router_data): """Return True if the firewall rules should be added the router""" if not router_data.get('external_gateway_info'): LOG.info("Cannot apply firewall to router %s with no gateway", router_data['id']) return False return True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_v3/edge_fwaas_driver_v2.py0000644000175000017500000002223600000000000030162 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.plugins import directory from oslo_log import log as logging from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.fwaas.common import v3_utils from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_base \ as base_driver from vmware_nsxlib.v3 import nsx_constants as consts LOG = logging.getLogger(__name__) FWAAS_DRIVER_NAME = 'Fwaas V2 NSX-V3 driver' RULE_NAME_PREFIX = 'Fwaas-' DEFAULT_RULE_NAME = 'Default LR Layer3 Rule' class EdgeFwaasV3DriverV2(base_driver.CommonEdgeFwaasV3Driver): """NSX-V3 driver for Firewall As A Service - V2.""" def __init__(self): super(EdgeFwaasV3DriverV2, self).__init__(FWAAS_DRIVER_NAME) @property def core_plugin(self): """Get the NSX-V3 core plugin""" if not self._core_plugin: self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) if not self._core_plugin: # The nsx-t plugin was not initialized return # make sure plugin init was completed if not self._core_plugin.init_is_complete: self._core_plugin.init_complete(None, None, {}) return self._core_plugin @property def nsxlib(self): return self.core_plugin.nsxlib @property def nsx_firewall(self): return self.nsxlib.firewall_section @property def nsx_router(self): return self.nsxlib.logical_router def _translate_cidr(self, cidr, fwaas_rule_id): # Validate that this is a legal & supported ipv4 / ipv6 cidr error_msg = (_("Unsupported FWAAS cidr %(cidr)s for rule %(id)s") % { 'cidr': cidr, 'id': fwaas_rule_id}) net = netaddr.IPNetwork(cidr) if net.version == 4: if cidr.startswith('0.0.0.0'): # Treat as ANY and just log warning LOG.warning(error_msg) return if net.prefixlen == 0: LOG.error(error_msg) raise self.driver_exception(driver=self.driver_name) elif net.version == 6: if str(net.ip) == "::" or net.prefixlen == 0: LOG.error(error_msg) raise self.driver_exception(driver=self.driver_name) else: LOG.error(error_msg) raise self.driver_exception(driver=self.driver_name) return self.nsx_firewall.get_ip_cidr_reference( cidr, consts.IPV6 if net.version == 6 else consts.IPV4) def translate_addresses_to_target(self, cidrs, plugin_type, fwaas_rule_id=None): translated_cidrs = [] for ip in cidrs: res = self._translate_cidr(ip, fwaas_rule_id) if res: translated_cidrs.append(res) return translated_cidrs def _translate_services(self, fwaas_rule): l4_protocol = v3_utils.translate_fw_rule_protocol( fwaas_rule['protocol']) if l4_protocol in [consts.TCP, consts.UDP]: source_ports = [] destination_ports = [] if fwaas_rule.get('source_port'): source_ports = v3_utils.translate_fw_rule_ports( fwaas_rule['source_port']) if fwaas_rule.get('destination_port'): destination_ports = v3_utils.translate_fw_rule_ports( fwaas_rule['destination_port']) return [self.nsx_firewall.get_nsservice( consts.L4_PORT_SET_NSSERVICE, l4_protocol=l4_protocol, source_ports=source_ports, destination_ports=destination_ports)] elif l4_protocol == consts.ICMPV4: # Add both icmp v4 & v6 services return [ self.nsx_firewall.get_nsservice( consts.ICMP_TYPE_NSSERVICE, protocol=consts.ICMPV4), self.nsx_firewall.get_nsservice( consts.ICMP_TYPE_NSSERVICE, protocol=consts.ICMPV6), ] def _translate_rules(self, fwaas_rules, replace_src=None, replace_dest=None, logged=False): translated_rules = [] for rule in fwaas_rules: nsx_rule = {} if not rule['enabled']: # skip disabled rules continue # Make sure the rule has a name, and it starts with the prefix # (backend max name length is 255) if rule.get('name'): name = RULE_NAME_PREFIX + rule['name'] else: name = RULE_NAME_PREFIX + rule['id'] nsx_rule['display_name'] = name[:255] if rule.get('description'): nsx_rule['notes'] = rule['description'] nsx_rule['action'] = v3_utils.translate_fw_rule_action( rule['action'], rule['id']) if not nsx_rule['action']: raise self.driver_exception(driver=self.driver_name) if (rule.get('destination_ip_address') and not rule['destination_ip_address'].startswith('0.0.0.0')): nsx_rule['destinations'] = self.translate_addresses_to_target( [rule['destination_ip_address']], rule['id']) elif replace_dest: # set this value as the destination logical switch # (only if no dest IP) nsx_rule['destinations'] = [{'target_type': 'LogicalSwitch', 'target_id': replace_dest}] if (rule.get('source_ip_address') and not rule['source_ip_address'].startswith('0.0.0.0')): nsx_rule['sources'] = self.translate_addresses_to_target( [rule['source_ip_address']], rule['id']) elif replace_src: # set this value as the source logical switch, # (only if no source IP) nsx_rule['sources'] = [{'target_type': 'LogicalSwitch', 'target_id': replace_src}] if rule.get('protocol'): nsx_rule['services'] = self._translate_services(rule) if logged: nsx_rule['logged'] = logged # Set rule direction if replace_src: nsx_rule['direction'] = 'OUT' elif replace_dest: nsx_rule['direction'] = 'IN' translated_rules.append(nsx_rule) return translated_rules def get_default_backend_rule(self, section_id, allow_all=True): # Add default allow all rule old_default_rule = self.nsx_firewall.get_default_rule( section_id) return { 'display_name': DEFAULT_RULE_NAME, 'action': (consts.FW_ACTION_ALLOW if allow_all else consts.FW_ACTION_DROP), 'is_default': True, 'id': old_default_rule['id'] if old_default_rule else 0} def get_port_translated_rules(self, nsx_ls_id, firewall_group, plugin_rules): """Return the list of translated rules per port""" port_rules = [] # TODO(asarfaty): get this value from the firewall group extensions logged = False # Add the firewall group ingress/egress rules only if the fw is up if firewall_group['admin_state_up']: port_rules.extend(self._translate_rules( firewall_group['ingress_rule_list'], replace_dest=nsx_ls_id, logged=logged)) port_rules.extend(self._translate_rules( firewall_group['egress_rule_list'], replace_src=nsx_ls_id, logged=logged)) # Add the per-port plugin rules if plugin_rules and isinstance(plugin_rules, list): port_rules.extend(plugin_rules) # Add ingress/egress block rules for this port port_rules.extend([ {'display_name': "Block port ingress", 'action': consts.FW_ACTION_DROP, 'destinations': [{'target_type': 'LogicalSwitch', 'target_id': nsx_ls_id}], 'direction': 'IN'}, {'display_name': "Block port egress", 'action': consts.FW_ACTION_DROP, 'sources': [{'target_type': 'LogicalSwitch', 'target_id': nsx_ls_id}], 'direction': 'OUT'}]) return port_rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/fwaas/nsx_v3/fwaas_callbacks_v2.py0000644000175000017500000001064500000000000027623 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.fwaas.common import fwaas_callbacks_v2 as \ com_callbacks from vmware_nsx.services.fwaas.nsx_tv import edge_fwaas_driver_v2 as tv_driver LOG = logging.getLogger(__name__) class Nsxv3FwaasCallbacksV2(com_callbacks.NsxCommonv3FwaasCallbacksV2): """NSX-V3 RPC callbacks for Firewall As A Service - V2.""" def __init__(self, with_rpc): super(Nsxv3FwaasCallbacksV2, self).__init__(with_rpc) # update the fwaas driver in case of TV plugin self.internal_driver = None if self.fwaas_enabled: if self.fwaas_driver.driver_name == tv_driver.FWAAS_DRIVER_NAME: self.internal_driver = self.fwaas_driver.get_T_driver() else: self.internal_driver = self.fwaas_driver @property def plugin_type(self): return projectpluginmap.NsxPlugins.NSX_T def get_port_rules(self, nsx_ls_id, fwg, plugin_rules): return self.internal_driver.get_port_translated_rules( nsx_ls_id, fwg, plugin_rules) def update_router_firewall(self, context, nsxlib, router_id, router_interfaces, nsx_router_id, section_id, from_fw=False): """Rewrite all the FWaaS v2 rules in the router edge firewall This method should be called on FWaaS updates, and on router interfaces changes. The purpose of from_fw is to differ between fw calls and other router calls, and if it is True - add the service router accordingly. """ fw_rules = [] with_fw = False # Add firewall rules per port attached to a firewall group for port in router_interfaces: nsx_ls_id, _nsx_port_id = nsx_db.get_nsx_switch_and_port_id( context.session, port['id']) # Check if this port has a firewall fwg = self.get_port_fwg(context, port['id']) if fwg: with_fw = True # Add plugin additional allow rules plugin_rules = self.core_plugin.get_extra_fw_rules( context, router_id, port['id']) # add the FWaaS rules for this port # ingress/egress firewall rules + default ingress/egress drop # rule for this port fw_rules.extend(self.get_port_rules(nsx_ls_id, fwg, plugin_rules)) # add a default allow-all rule to all other traffic & ports fw_rules.append(self.internal_driver.get_default_backend_rule( section_id, allow_all=True)) # update the backend router firewall exists_on_backend = self.core_plugin.verify_sr_at_backend(context, router_id) if from_fw: # fw action required if with_fw: # firewall exists in Neutron and not on backend - create if not exists_on_backend: self.core_plugin.create_service_router( context, router_id, update_firewall=False) exists_on_backend = True else: # First, check if other services exist and use the sr sr_exists = self.core_plugin.service_router_has_services( context, router_id) if not sr_exists and exists_on_backend: # No other services that require service router - delete self.core_plugin.delete_service_router(context, router_id) exists_on_backend = False if exists_on_backend: nsxlib.firewall_section.update(section_id, rules=fw_rules) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/0000755000175000017500000000000000000000000022141 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/__init__.py0000644000175000017500000000000000000000000024240 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.210254 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/common/0000755000175000017500000000000000000000000023431 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/common/__init__.py0000644000175000017500000000000000000000000025530 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/common/driver.py0000644000175000017500000002462300000000000025305 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six from oslo_log import log as logging from neutron.ipam import driver as ipam_base from neutron.ipam.drivers.neutrondb_ipam import driver as neutron_driver from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from neutron.ipam import subnet_alloc from neutron_lib.plugins import directory from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class NsxIpamBase(object): @classmethod def get_core_plugin(cls): return directory.get_plugin() @property def _nsxlib(self): p = self.get_core_plugin() if p.is_tvd_plugin(): # get the NSX-T sub-plugin p = p.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) elif p.plugin_type() != projectpluginmap.NsxPlugins.NSX_T: # Non NSX-T plugin return return p.nsxlib @property def _vcns(self): p = self.get_core_plugin() if p.is_tvd_plugin(): # get the NSX-V sub-plugin p = p.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) elif p.plugin_type() != projectpluginmap.NsxPlugins.NSX_V: # Non NSX-V plugin return return p.nsx_v.vcns @classmethod def _fetch_subnet(cls, context, id): p = cls.get_core_plugin() return p._get_subnet(context, id) @classmethod def _fetch_network(cls, context, id): p = cls.get_core_plugin() return p.get_network(context, id) class NsxSubnetRequestFactory(ipam_req.SubnetRequestFactory, NsxIpamBase): """Builds request using subnet info, including the network id""" @classmethod def get_request(cls, context, subnet, subnetpool): req = super(NsxSubnetRequestFactory, cls).get_request( context, subnet, subnetpool) # Add the network id into the request if 'network_id' in subnet: req.network_id = subnet['network_id'] return req class NsxAbstractIpamDriver(subnet_alloc.SubnetAllocator, NsxIpamBase): """Abstract IPAM Driver For NSX.""" def __init__(self, subnetpool, context): super(NsxAbstractIpamDriver, self).__init__(subnetpool, context) # in case of unsupported networks (or pre-upgrade networks) # the neutron internal driver will be used self.default_ipam = neutron_driver.NeutronDbPool(subnetpool, context) # Mark which updates to the pool are supported # (The NSX-v backend does not support changing the ip pool cidr # or gateway) self.support_update_gateway = False self.support_update_pools = False def _is_supported_net(self, subnet_request): """By default - all networks are supported""" return True def get_subnet_request_factory(self): # override the OOB factory to add the network ID return NsxSubnetRequestFactory @abc.abstractproperty def _subnet_class(self): """Return the class of the subnet that should be used.""" pass def get_subnet(self, subnet_id): """Retrieve an IPAM subnet.""" nsx_pool_id = nsx_db.get_nsx_ipam_pool_for_subnet( self._context.session, subnet_id) if not nsx_pool_id: # Unsupported (or pre-upgrade) network return self.default_ipam.get_subnet(subnet_id) return self._subnet_class.load(subnet_id, nsx_pool_id, self._context) @abc.abstractmethod def allocate_backend_pool(self, subnet_request): """Create a pool on the NSX backend and return its ID""" pass def allocate_subnet(self, subnet_request): """Create an IPAMSubnet object for the provided request.""" if not self._is_supported_net(subnet_request=subnet_request): # fallback to the neutron internal driver implementation return self.default_ipam.allocate_subnet(subnet_request) if self._subnetpool: subnet = super(NsxAbstractIpamDriver, self).allocate_subnet( subnet_request) subnet_request = subnet.get_details() # SubnetRequest must be an instance of SpecificSubnet if not isinstance(subnet_request, ipam_req.SpecificSubnetRequest): raise ipam_exc.InvalidSubnetRequestType( subnet_type=type(subnet_request)) # Add the pool to the NSX backend nsx_pool_id = self.allocate_backend_pool(subnet_request) # Add the pool to the DB nsx_db.add_nsx_ipam_subnet_pool(self._context.session, subnet_request.subnet_id, nsx_pool_id) # return the subnet object return self._subnet_class.load(subnet_request.subnet_id, nsx_pool_id, self._context, tenant_id=subnet_request.tenant_id) @abc.abstractmethod def update_backend_pool(self, nsx_pool_id, subnet_request): pass def _raise_update_not_supported(self): msg = _('Changing the subnet range or gateway is not supported') raise ipam_exc.IpamValueInvalid(message=msg) def update_subnet(self, subnet_request): """Update subnet info in the IPAM driver. Do the update only if the specific change is supported by the backend """ nsx_pool_id = nsx_db.get_nsx_ipam_pool_for_subnet( self._context.session, subnet_request.subnet_id) if not nsx_pool_id: # Unsupported (or pre-upgrade) network return self.default_ipam.update_subnet( subnet_request) # get the current pool data curr_subnet = self._subnet_class.load( subnet_request.subnet_id, nsx_pool_id, self._context, tenant_id=subnet_request.tenant_id).get_details() # check if the gateway changed gateway_changed = False if (str(subnet_request.gateway_ip) != str(curr_subnet.gateway_ip)): if not self.support_update_gateway: self._raise_update_not_supported() gateway_changed = True # check that the prefix / cidr / pools changed pools_changed = False if subnet_request.prefixlen != curr_subnet.prefixlen: if not self.support_update_pools: self._raise_update_not_supported() pools_changed = True if subnet_request.subnet_cidr[0] != curr_subnet.subnet_cidr[0]: if not self.support_update_pools: self._raise_update_not_supported() pools_changed = True if (len(subnet_request.allocation_pools) != len(curr_subnet.allocation_pools)): if not self.support_update_pools: self._raise_update_not_supported() pools_changed = True if (len(subnet_request.allocation_pools) != len(curr_subnet.allocation_pools)): if not self.support_update_pools: self._raise_update_not_supported() pools_changed = True else: for pool_ind in range(len(subnet_request.allocation_pools)): pool_req = subnet_request.allocation_pools[pool_ind] curr_pool = curr_subnet.allocation_pools[pool_ind] if (pool_req.first != curr_pool.first or pool_req.last != curr_pool.last): if not self.support_update_pools: self._raise_update_not_supported() pools_changed = True # update the relevant attributes at the backend pool if gateway_changed or pools_changed: self.update_backend_pool(nsx_pool_id, subnet_request) @abc.abstractmethod def delete_backend_pool(self, nsx_pool_id): pass def remove_subnet(self, subnet_id): """Delete an IPAM subnet pool from backend & DB.""" nsx_pool_id = nsx_db.get_nsx_ipam_pool_for_subnet( self._context.session, subnet_id) if not nsx_pool_id: # Unsupported (or pre-upgrade) network self.default_ipam.remove_subnet(subnet_id) return # Delete from backend self.delete_backend_pool(nsx_pool_id) # delete pool from DB nsx_db.del_nsx_ipam_subnet_pool(self._context.session, subnet_id, nsx_pool_id) class NsxIpamSubnetManager(object): def __init__(self, neutron_subnet_id): self._neutron_subnet_id = neutron_subnet_id @property def neutron_id(self): return self._neutron_subnet_id class NsxAbstractIpamSubnet(ipam_base.Subnet, NsxIpamBase): """Manage IP addresses for the NSX IPAM driver.""" def __init__(self, subnet_id, nsx_pool_id, ctx, tenant_id): self._subnet_id = subnet_id self._nsx_pool_id = nsx_pool_id self._context = ctx self._tenant_id = tenant_id #TODO(asarfaty): this subnet_manager is currently required by the #pluggable-ipam-driver self.subnet_manager = NsxIpamSubnetManager(self._subnet_id) @classmethod def load(cls, neutron_subnet_id, nsx_pool_id, ctx, tenant_id=None): """Load an IPAM subnet object given its neutron ID.""" return cls(neutron_subnet_id, nsx_pool_id, ctx, tenant_id) def allocate(self, address_request): """Allocate an IP from the pool""" return self.backend_allocate(address_request) @abc.abstractmethod def backend_allocate(self, address_request): pass def deallocate(self, address): """Return an IP to the pool""" self.backend_deallocate(address) @abc.abstractmethod def backend_deallocate(self, address): pass def update_allocation_pools(self, pools, cidr): # Not supported pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2142541 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/nsx_tvd/0000755000175000017500000000000000000000000023626 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/nsx_tvd/__init__.py0000644000175000017500000000000000000000000025725 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/nsx_tvd/driver.py0000644000175000017500000000700100000000000025471 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron.ipam import exceptions as ipam_exc from neutron.ipam import subnet_alloc from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.services.ipam.common import driver as common_driver from vmware_nsx.services.ipam.nsx_v import driver as v_driver from vmware_nsx.services.ipam.nsx_v3 import driver as t_driver LOG = logging.getLogger(__name__) class NsxTvdIpamDriver(subnet_alloc.SubnetAllocator, common_driver.NsxIpamBase): """IPAM Driver For NSX-TVD plugin.""" def __init__(self, subnetpool, context): super(NsxTvdIpamDriver, self).__init__(subnetpool, context) # initialize the different drivers self.drivers = {} try: self.drivers[projectpluginmap.NsxPlugins.NSX_T] = ( t_driver.Nsxv3IpamDriver(subnetpool, context)) except Exception as e: LOG.warning("NsxTvdIpamDriver failed to initialize the NSX-T " "driver %s", e) self.drivers[projectpluginmap.NsxPlugins.NSX_T] = None try: self.drivers[projectpluginmap.NsxPlugins.NSX_V] = ( v_driver.NsxvIpamDriver(subnetpool, context)) except Exception as e: LOG.warning("NsxTvdIpamDriver failed to initialize the NSX-V " "driver %s", e) self.drivers[projectpluginmap.NsxPlugins.NSX_V] = None def get_T_driver(self): return self.drivers[projectpluginmap.NsxPlugins.NSX_T] def get_V_driver(self): return self.drivers[projectpluginmap.NsxPlugins.NSX_V] def _get_driver_for_project(self, project): plugin_type = tvd_utils.get_tvd_plugin_type_for_project(project) if not self.drivers.get(plugin_type): LOG.error("Project %(project)s with plugin %(plugin)s has no " "support for IPAM", {'project': project, 'plugin': plugin_type}) raise ipam_exc.IpamValueInvalid( message="IPAM driver not found") return self.drivers[plugin_type] def allocate_subnet(self, subnet_request): d = self._get_driver_for_project(subnet_request.tenant_id) return d.allocate_subnet(subnet_request) def update_subnet(self, subnet_request): d = self._get_driver_for_project(subnet_request.tenant_id) return d.update_subnet(subnet_request) def remove_subnet(self, subnet_id): d = self._get_driver_for_project(self._context.tenant_id) return d.remove_subnet(subnet_id) def get_subnet(self, subnet_id): d = self._get_driver_for_project(self._context.tenant_id) return d.get_subnet(subnet_id) def get_subnet_request_factory(self): d = self._get_driver_for_project(self._context.tenant_id) return d.get_subnet_request_factory() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2142541 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/nsx_v/0000755000175000017500000000000000000000000023276 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/nsx_v/__init__.py0000644000175000017500000000000000000000000025375 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/nsx_v/driver.py0000644000175000017500000001764200000000000025155 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import xml.etree.ElementTree as et import netaddr from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api import validators from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.plugins.nsx_v.vshield.common import constants from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vc_exc from vmware_nsx.services.ipam.common import driver as common LOG = logging.getLogger(__name__) class NsxvIpamDriver(common.NsxAbstractIpamDriver, common.NsxIpamBase): """IPAM Driver For NSX-V external & provider networks.""" def _is_ext_or_provider_net(self, subnet_request): """Return True if the network of the request is external or provider network """ network_id = subnet_request.network_id if network_id: network = self._fetch_network(self._context, network_id) if network.get(extnet_apidef.EXTERNAL): # external network return True if (validators.is_attr_set(network.get(mpnet_apidef.SEGMENTS)) or validators.is_attr_set(network.get(pnet.NETWORK_TYPE))): # provider network return True return False def _is_ipv6_subnet(self, subnet_request): """Return True if the network of the request is an ipv6 network""" if isinstance(subnet_request, ipam_req.SpecificSubnetRequest): return subnet_request.subnet_cidr.version == 6 else: if subnet_request.allocation_pools: for pool in subnet_request.allocation_pools: if pool.version == 6: return True return False def _is_supported_net(self, subnet_request): """This driver supports only ipv4 external/provider networks""" return (self._is_ext_or_provider_net(subnet_request) and not self._is_ipv6_subnet(subnet_request)) @property def _subnet_class(self): return NsxvIpamSubnet def allocate_backend_pool(self, subnet_request): """Create a pool on the NSX backend and return its ID""" if subnet_request.allocation_pools: ranges = [ {'ipRangeDto': {'startAddress': netaddr.IPAddress(pool.first), 'endAddress': netaddr.IPAddress(pool.last)}} for pool in subnet_request.allocation_pools] else: ranges = [] request = {'ipamAddressPool': # max name length on backend is 255, so there is no problem here {'name': 'subnet_' + subnet_request.subnet_id, 'prefixLength': subnet_request.prefixlen, 'gateway': subnet_request.gateway_ip, 'ipRanges': ranges}} try: response = self._vcns.create_ipam_ip_pool(request) nsx_pool_id = response[1] except vc_exc.VcnsApiException as e: msg = _('Failed to create subnet IPAM: %s') % e raise ipam_exc.IpamValueInvalid(message=msg) return nsx_pool_id def delete_backend_pool(self, nsx_pool_id): try: self._vcns.delete_ipam_ip_pool(nsx_pool_id) except vc_exc.VcnsApiException as e: LOG.error("Failed to delete IPAM from backend: %s", e) # Continue anyway, since this subnet was already removed def update_backend_pool(self, subnet_request): # The NSX-v backend does not support changing the ip pool cidr # or gateway. # If this function is called - there is no need to update the backend pass class NsxvIpamSubnet(common.NsxAbstractIpamSubnet, common.NsxIpamBase): """Manage IP addresses for the NSX-V IPAM driver.""" def _get_vcns_error_code(self, e): """Get the error code out of VcnsApiException""" try: desc = et.fromstring(e.response) return int(desc.find('errorCode').text) except Exception: LOG.error('IPAM pool: Error code not present. %s', e.response) def backend_allocate(self, address_request): try: # allocate a specific IP if isinstance(address_request, ipam_req.SpecificAddressRequest): # This handles both specific and automatic address requests ip_address = str(address_request.address) self._vcns.allocate_ipam_ip_from_pool(self._nsx_pool_id, ip_addr=ip_address) else: # Allocate any free IP response = self._vcns.allocate_ipam_ip_from_pool( self._nsx_pool_id)[1] # get the ip from the response root = et.fromstring(response) ip_address = root.find('ipAddress').text except vc_exc.VcnsApiException as e: # handle backend failures error_code = self._get_vcns_error_code(e) if error_code == constants.NSX_ERROR_IPAM_ALLOCATE_IP_USED: # This IP is already in use raise ipam_exc.IpAddressAlreadyAllocated( ip=ip_address, subnet_id=self._subnet_id) if error_code == constants.NSX_ERROR_IPAM_ALLOCATE_ALL_USED: # No more IP addresses available on the pool raise ipam_exc.IpAddressGenerationFailure( subnet_id=self._subnet_id) else: raise ipam_exc.IPAllocationFailed() return ip_address def backend_deallocate(self, address): try: self._vcns.release_ipam_ip_to_pool(self._nsx_pool_id, address) except vc_exc.VcnsApiException as e: LOG.error("NSX IPAM failed to free ip %(ip)s of subnet %(id)s:" " %(e)s", {'e': e.response, 'ip': address, 'id': self._subnet_id}) raise ipam_exc.IpAddressAllocationNotFound( subnet_id=self._subnet_id, ip_address=address) def _get_pool_cidr(self, pool): # rebuild the cidr from the pool range & prefix using the first # range in the pool, because they all should belong to the same cidr cidr = '%s/%s' % (pool['ipRanges'][0]['startAddress'], pool['prefixLength']) # convert to a proper cidr cidr = netaddr.IPNetwork(cidr).cidr return str(cidr) def get_details(self): """Return subnet data as a SpecificSubnetRequest""" # get the pool from the backend pool_details = self._vcns.get_ipam_ip_pool(self._nsx_pool_id)[1] gateway_ip = pool_details['gateway'] # rebuild the cidr from the range & prefix cidr = self._get_pool_cidr(pool_details) pools = [] for ip_range in pool_details['ipRanges']: pools.append(netaddr.IPRange(ip_range['startAddress'], ip_range['endAddress'])) return ipam_req.SpecificSubnetRequest( self._tenant_id, self._subnet_id, cidr, gateway_ip=gateway_ip, allocation_pools=pools) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2142541 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/nsx_v3/0000755000175000017500000000000000000000000023361 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/nsx_v3/__init__.py0000644000175000017500000000000000000000000025460 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/ipam/nsx_v3/driver.py0000644000175000017500000002507300000000000025235 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import log as logging from neutron.ipam import exceptions as ipam_exc from neutron.ipam import requests as ipam_req from vmware_nsx._i18n import _ from vmware_nsx.services.ipam.common import driver as common from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as error LOG = logging.getLogger(__name__) class Nsxv3IpamDriver(common.NsxAbstractIpamDriver): """IPAM Driver For NSX-V3 networks.""" def __init__(self, subnetpool, context): super(Nsxv3IpamDriver, self).__init__(subnetpool, context) self.nsxlib_ipam = self._nsxlib.ip_pool # Mark which updates to the pool are supported self.support_update_gateway = True self.support_update_pools = True @property def _subnet_class(self): return Nsxv3IpamSubnet def _get_cidr_from_request(self, subnet_request): return "%s/%s" % (subnet_request.subnet_cidr[0], subnet_request.prefixlen) def _get_ranges_from_request(self, subnet_request): if subnet_request.allocation_pools: ranges = [ {'start': str(pool[0]), 'end': str(pool[-1])} for pool in subnet_request.allocation_pools] else: ranges = [] return ranges def _is_supported_net(self, subnet_request): """This driver doesn't support multicast cidrs""" if not hasattr(subnet_request, "subnet_cidr"): return True net = netaddr.IPNetwork(subnet_request.subnet_cidr[0]) return not net.is_multicast() def allocate_backend_pool(self, subnet_request): """Create a pool on the NSX backend and return its ID""" # name/description length on backend is long, so there is no problem name = 'subnet_' + subnet_request.subnet_id description = 'OS IP pool for subnet ' + subnet_request.subnet_id try: response = self.nsxlib_ipam.create( self._get_cidr_from_request(subnet_request), allocation_ranges=self._get_ranges_from_request( subnet_request), display_name=name, description=description, gateway_ip=subnet_request.gateway_ip) nsx_pool_id = response['id'] except Exception as e: #TODO(asarfaty): handle specific errors msg = _('Failed to create subnet IPAM: %s') % e raise ipam_exc.IpamValueInvalid(message=msg) return nsx_pool_id def delete_backend_pool(self, nsx_pool_id): # Because of the delete_subnet flow in the neutron plugin, # some ports still hold IPs from this pool. # Those ports be deleted shortly after this function. # We need to release those IPs before deleting the backed pool, # or else it will fail. pool_allocations = self.nsxlib_ipam.get_allocations(nsx_pool_id) if pool_allocations and pool_allocations.get('result_count'): for allocation in pool_allocations.get('results', []): ip_addr = allocation.get('allocation_id') try: self.nsxlib_ipam.release(nsx_pool_id, ip_addr) except Exception as e: LOG.warning("Failed to release ip %(ip)s from pool " "%(pool)s: %(e)s", {'ip': ip_addr, 'pool': nsx_pool_id, 'e': e}) try: self.nsxlib_ipam.delete(nsx_pool_id) except Exception as e: LOG.error("Failed to delete IPAM from backend: %s", e) # Continue anyway, since this subnet was already removed def update_backend_pool(self, nsx_pool_id, subnet_request): update_args = { 'cidr': self._get_cidr_from_request(subnet_request), 'allocation_ranges': self._get_ranges_from_request(subnet_request), 'gateway_ip': subnet_request.gateway_ip} try: self.nsxlib_ipam.update( nsx_pool_id, **update_args) except nsx_lib_exc.ManagerError as e: LOG.error("NSX IPAM failed to update pool %(id)s: " " %(e)s; code %(code)s", {'e': e, 'id': nsx_pool_id, 'code': e.error_code}) if (e.error_code == error.ERR_CODE_IPAM_RANGE_MODIFY or e.error_code == error.ERR_CODE_IPAM_RANGE_DELETE or e.error_code == error.ERR_CODE_IPAM_RANGE_SHRUNK): # The change is not allowed: already allocated IPs out of # the new range raise ipam_exc.InvalidSubnetRequest( reason=_("Already allocated IPs outside of the updated " "pools")) except Exception as e: # unexpected error msg = _('Failed to update subnet IPAM: %s') % e raise ipam_exc.IpamValueInvalid(message=msg) class Nsxv3IpamSubnet(common.NsxAbstractIpamSubnet): """Manage IP addresses for the NSX V3 IPAM driver.""" def __init__(self, subnet_id, nsx_pool_id, ctx, tenant_id): super(Nsxv3IpamSubnet, self).__init__( subnet_id, nsx_pool_id, ctx, tenant_id) self.nsxlib_ipam = self._nsxlib.ip_pool def backend_allocate(self, address_request): try: # allocate a specific IP if isinstance(address_request, ipam_req.SpecificAddressRequest): # This handles both specific and automatic address requests ip_address = str(address_request.address) # If this is the subnet gateway IP - no need to allocate it subnet = self.get_details() if str(subnet.gateway_ip) == ip_address: LOG.info("Skip allocation of gateway-ip for pool %s", self._nsx_pool_id) return ip_address else: # Allocate any free IP ip_address = None response = self.nsxlib_ipam.allocate(self._nsx_pool_id, ip_addr=ip_address) ip_address = response['allocation_id'] except nsx_lib_exc.ManagerError as e: LOG.error("NSX IPAM failed to allocate ip %(ip)s of subnet " "%(id)s: %(e)s; code %(code)s", {'e': e, 'ip': ip_address, 'id': self._subnet_id, 'code': e.error_code}) if e.error_code == error.ERR_CODE_IPAM_POOL_EXHAUSTED: # No more IP addresses available on the pool raise ipam_exc.IpAddressGenerationFailure( subnet_id=self._subnet_id) if e.error_code == error.ERR_CODE_IPAM_SPECIFIC_IP: # The NSX backend does not support allocation of specific IPs # prior to version 2.0. msg = (_("NSX-V3 IPAM driver does not support allocation of a " "specific ip %s for port") % ip_address) raise NotImplementedError(msg) if e.error_code == error.ERR_CODE_IPAM_IP_ALLOCATED: # This IP is already in use raise ipam_exc.IpAddressAlreadyAllocated( ip=ip_address, subnet_id=self._subnet_id) if e.error_code == error.ERR_CODE_OBJECT_NOT_FOUND: msg = (_("NSX-V3 IPAM failed to allocate: pool %s was not " "found") % self._nsx_pool_id) raise ipam_exc.IpamValueInvalid(message=msg) else: # another backend error raise ipam_exc.IPAllocationFailed() except Exception as e: LOG.error("NSX IPAM failed to allocate ip %(ip)s of subnet " "%(id)s: %(e)s", {'e': e, 'ip': ip_address, 'id': self._subnet_id}) # handle unexpected failures raise ipam_exc.IPAllocationFailed() return ip_address def backend_deallocate(self, ip_address): # If this is the subnet gateway IP - no need to allocate it subnet = self.get_details() if str(subnet.gateway_ip) == ip_address: LOG.info("Skip deallocation of gateway-ip for pool %s", self._nsx_pool_id) return try: self.nsxlib_ipam.release(self._nsx_pool_id, ip_address) except nsx_lib_exc.ManagerError as e: # fail silently LOG.error("NSX IPAM failed to free ip %(ip)s of subnet " "%(id)s: %(e)s; code %(code)s", {'e': e, 'ip': ip_address, 'id': self._subnet_id, 'code': e.error_code}) def get_details(self): """Return subnet data as a SpecificSubnetRequest""" # get the pool from the backend try: pool_details = self.nsxlib_ipam.get(self._nsx_pool_id) except Exception as e: msg = _('Failed to get details for nsx pool: %(id)s: ' '%(e)s') % {'id': self._nsx_pool_id, 'e': e} raise ipam_exc.IpamValueInvalid(message=msg) first_range = pool_details.get('subnets', [None])[0] if not first_range: msg = _('Failed to get details for nsx pool: %(id)s') % { 'id': self._nsx_pool_id} raise ipam_exc.IpamValueInvalid(message=msg) cidr = first_range.get('cidr') gateway_ip = first_range.get('gateway_ip') pools = [] for subnet in pool_details.get('subnets', []): for ip_range in subnet.get('allocation_ranges', []): pools.append(netaddr.IPRange(ip_range.get('start'), ip_range.get('end'))) return ipam_req.SpecificSubnetRequest( self._tenant_id, self._subnet_id, cidr, gateway_ip=gateway_ip, allocation_pools=pools) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2142541 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/0000755000175000017500000000000000000000000023112 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/__init__.py0000644000175000017500000000000000000000000025211 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2142541 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/nsx_tvd/0000755000175000017500000000000000000000000024577 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/nsx_tvd/__init__.py0000644000175000017500000000000000000000000026676 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/nsx_tvd/driver.py0000644000175000017500000001510100000000000026442 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_l2gw.db.l2gateway import l2gateway_db from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from oslo_log import log as logging from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.l2gateway.nsx_v import driver as v_driver from vmware_nsx.services.l2gateway.nsx_v3 import driver as t_driver LOG = logging.getLogger(__name__) class NsxTvdL2GatewayDriver(l2gateway_db.L2GatewayMixin): """Class to handle API calls for L2 gateway and NSX-TVD plugin wrapper.""" def __init__(self, plugin): super(NsxTvdL2GatewayDriver, self).__init__() self._plugin = plugin # supported drivers: self.drivers = {} try: self.drivers[projectpluginmap.NsxPlugins.NSX_T] = ( t_driver.NsxV3Driver(plugin)) except Exception: LOG.warning("NsxTvdL2GatewayDriver failed to initialize the NSX-T " "driver") self.drivers[projectpluginmap.NsxPlugins.NSX_T] = None try: self.drivers[projectpluginmap.NsxPlugins.NSX_V] = ( v_driver.NsxvL2GatewayDriver(plugin)) except Exception: LOG.warning("NsxTvdL2GatewayDriver failed to initialize the NSX-V " "driver") self.drivers[projectpluginmap.NsxPlugins.NSX_V] = None def _get_driver_for_project(self, context, project): """Get the l2gw driver by the plugin of the project""" mapping = nsx_db.get_project_plugin_mapping( context.session, project) if mapping: plugin_type = mapping['plugin'] else: msg = _("Couldn't find the plugin project %s is using") % project raise n_exc.InvalidInput(error_message=msg) if plugin_type not in self.drivers: msg = (_("Project %(project)s with plugin %(plugin)s has no " "support for L2GW") % {'project': project, 'plugin': plugin_type}) raise n_exc.InvalidInput(error_message=msg) # make sure the core plugin is supported core_plugin = directory.get_plugin() if not core_plugin.get_plugin_by_type(plugin_type): msg = (_("Plugin %(plugin)s for project %(project)s is not " "supported by the core plugin") % {'project': project, 'plugin': plugin_type}) raise n_exc.InvalidInput(error_message=msg) return self.drivers[plugin_type] def create_l2_gateway(self, context, l2_gateway): d = self._get_driver_for_project( context, l2_gateway['l2_gateway']['tenant_id']) return d.create_l2_gateway(context, l2_gateway) def create_l2_gateway_precommit(self, context, l2_gateway): # Not implemented by any of the plugins pass def create_l2_gateway_postcommit(self, context, l2_gateway): # Not implemented by any of the plugins pass def update_l2_gateway(self, context, l2_gateway): # Not implemented by any of the plugins pass def update_l2_gateway_precommit(self, context, l2_gateway): # Not implemented by any of the plugins pass def update_l2_gateway_postcommit(self, context, l2_gateway): # Not implemented by any of the plugins pass def create_l2_gateway_connection(self, context, l2_gateway_connection): d = self._get_driver_for_project( context, l2_gateway_connection['l2_gateway_connection']['tenant_id']) return d.create_l2_gateway_connection(context, l2_gateway_connection) def create_l2_gateway_connection_precommit(self, contex, gw_connection): # Not implemented by any of the plugins pass def create_l2_gateway_connection_postcommit(self, context, gw_connection): d = self._get_driver_for_project(context, gw_connection['tenant_id']) return d.create_l2_gateway_connection_postcommit( context, gw_connection) def _get_gw_connection_driver(self, context, l2gw_connection_id): l2gw_conn = self._plugin._get_l2_gateway_connection( context, l2gw_connection_id) return self._get_driver_for_project(context, l2gw_conn.tenant_id) def delete_l2_gateway_connection(self, context, l2_gateway_connection_id): d = self._get_gw_connection_driver(context, l2_gateway_connection_id) return d.delete_l2_gateway_connection( context, l2_gateway_connection_id) def delete_l2_gateway_connection_precommit(self, context, l2_gateway_connection): # Not implemented by any of the plugins pass def delete_l2_gateway_connection_postcommit(self, context, l2_gateway_connection_id): # Not implemented by any of the plugins #Note(asarfaty): in postcommit the l2_gateway_connection was already # deleted so we cannot decide on the plugin by the project of the # connection. pass def delete_l2_gateway(self, context, l2_gateway_id): l2gw = self._plugin._get_l2_gateway(context, l2_gateway_id) d = self._get_driver_for_project( context, l2gw['tenant_id']) return d.delete_l2_gateway(context, l2_gateway_id) def delete_l2_gateway_precommit(self, context, l2_gateway): # Not implemented by any of the plugins pass def delete_l2_gateway_postcommit(self, context, l2_gateway): # Not implemented by any of the plugins #Note(asarfaty): in postcommit the l2_gateway was already deleted # so we cannot decide on the plugin by the project of the gw. pass def add_port_mac(self, context, port_dict): """Process a created Neutron port.""" pass def delete_port_mac(self, context, port): """Process a deleted Neutron port.""" pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/nsx_tvd/plugin.py0000644000175000017500000000176000000000000026453 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_l2gw.services.l2gateway import plugin from vmware_nsx.plugins.nsx import utils as tvd_utils @tvd_utils.filter_plugins class L2GatewayPlugin(plugin.L2GatewayPlugin): """NSX-TV plugin for L2GW. This plugin adds separation between T/V instances """ methods_to_separate = ['get_l2_gateways', 'get_l2_gateway_connections'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2142541 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/nsx_v/0000755000175000017500000000000000000000000024247 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/nsx_v/__init__.py0000644000175000017500000000000000000000000026346 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/nsx_v/driver.py0000644000175000017500000002306100000000000026116 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_l2gw.db.l2gateway import l2gateway_db from networking_l2gw.db.l2gateway import l2gateway_models as models from networking_l2gw.services.l2gateway.common import constants as l2gw_const from networking_l2gw.services.l2gateway import exceptions as l2gw_exc from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from oslo_log import log as logging from oslo_utils import uuidutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import nsxv_constants from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import exceptions LOG = logging.getLogger(__name__) class NsxvL2GatewayDriver(l2gateway_db.L2GatewayMixin): """Class to handle API calls for L2 gateway and NSXv backend.""" def __init__(self, plugin): super(NsxvL2GatewayDriver, self).__init__() self._plugin = plugin self.__core_plugin = None @property def _core_plugin(self): if not self.__core_plugin: self.__core_plugin = directory.get_plugin() if self.__core_plugin.is_tvd_plugin(): self.__core_plugin = self.__core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) return self.__core_plugin @property def _nsxv(self): return self._core_plugin.nsx_v @property def _edge_manager(self): return self._core_plugin.edge_manager def _validate_device_list(self, devices): # In NSX-v, one L2 gateway is mapped to one DLR. # So we expect only one device to be configured as part of # a L2 gateway resource. if len(devices) != 1: msg = _("Only a single device is supported for one L2 gateway") raise n_exc.InvalidInput(error_message=msg) def _get_l2gateway_interface(self, context, interface_name): """Get all l2gateway_interfaces_by interface_name.""" session = context.session with session.begin(): return session.query(models.L2GatewayInterface).filter_by( interface_name=interface_name).all() def _validate_interface_list(self, context, interfaces): # In NSXv, interface is mapped to a vDS VLAN port group. # Since HA is not supported, only one interface is expected if len(interfaces) != 1: msg = _("Only a single interface is supported for one L2 gateway") raise n_exc.InvalidInput(error_message=msg) if not self._nsxv.vcns.validate_network(interfaces[0]['name']): msg = _("Configured interface not found") raise n_exc.InvalidInput(error_message=msg) interface = self._get_l2gateway_interface(context, interfaces[0]['name']) if interface: msg = _("%s is already used.") % interfaces[0]['name'] raise n_exc.InvalidInput(error_message=msg) def create_l2_gateway_precommit(self, context, l2_gateway): pass def create_l2_gateway_postcommit(self, context, l2_gateway): pass def create_l2_gateway(self, context, l2_gateway): """Create a logical L2 gateway.""" self._admin_check(context, 'CREATE') gw = l2_gateway[self.gateway_resource] devices = gw['devices'] self._validate_device_list(devices) interfaces = devices[0]['interfaces'] self._validate_interface_list(context, interfaces) # Create a dedicated DLR try: edge_id = self._create_l2_gateway_edge(context) except nsx_exc.NsxL2GWDeviceNotFound: LOG.exception("Failed to create backend device " "for L2 gateway") raise devices[0]['device_name'] = edge_id l2_gateway[self.gateway_resource]['devices'] = devices return def update_l2_gateway_precommit(self, context, l2_gateway): pass def update_l2_gateway_postcommit(self, context, l2_gateway): pass def _create_l2_gateway_edge(self, context): # Create a dedicated DLR lrouter = {'name': nsxv_constants.L2_GATEWAY_EDGE, 'id': uuidutils.generate_uuid()} # Create the router on the default availability zone availability_zone = (nsx_az.NsxVAvailabilityZones(). get_default_availability_zone()) self._edge_manager.create_lrouter(context, lrouter, lswitch=None, dist=True, availability_zone=availability_zone) edge_binding = nsxv_db.get_nsxv_router_binding(context.session, lrouter['id']) if not edge_binding: raise nsx_exc.NsxL2GWDeviceNotFound() # Enable edge HA on the DLR if availability_zone.edge_ha: edge_id = edge_binding['edge_id'] self._edge_manager.nsxv_manager.update_edge_ha(edge_id) return edge_binding['edge_id'] def _get_device(self, context, l2gw_id): devices = self._get_l2_gateway_devices(context, l2gw_id) return devices[0] def create_l2_gateway_connection_precommit(self, contex, gw_connection): pass def create_l2_gateway_connection_postcommit(self, context, gw_connection): network_id = gw_connection.get('network_id') virtual_wire = nsx_db.get_nsx_switch_ids(context.session, network_id) # In NSX-v, there will be only one device configured per L2 gateway. # The name of the device shall carry the backend DLR. l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID) device = self._get_device(context, l2gw_id) device_name = device.get('device_name') device_id = device.get('id') interface = self._get_l2_gw_interfaces(context, device_id) interface_name = interface[0].get("interface_name") # bridge name length cannot exceed 40 characters bridge_name = "brg-" + uuidutils.generate_uuid() bridge_dict = {"bridges": {"bridge": {"name": bridge_name, "virtualWire": virtual_wire[0], "dvportGroup": interface_name}}} try: self._nsxv.create_bridge(device_name, bridge_dict) except exceptions.VcnsApiException: LOG.exception("Failed to update NSX, " "rolling back changes on neutron.") raise l2gw_exc.L2GatewayServiceDriverError( method='create_l2_gateway_connection_postcommit') return def create_l2_gateway_connection(self, context, l2_gateway_connection): """Create a L2 gateway connection.""" gw_connection = l2_gateway_connection.get(l2gw_const. CONNECTION_RESOURCE_NAME) l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID) gw_db = self._get_l2_gateway(context, l2gw_id) if gw_db.network_connections: raise nsx_exc.NsxL2GWInUse(gateway_id=l2gw_id) return def delete_l2_gateway_connection_precommit(self, context, l2_gateway_connection): pass def delete_l2_gateway_connection_postcommit(self, context, l2_gateway_connection): pass def delete_l2_gateway_connection(self, context, l2_gateway_connection): """Delete a L2 gateway connection.""" self._admin_check(context, 'DELETE') gw_connection = self.get_l2_gateway_connection(context, l2_gateway_connection) if not gw_connection: raise l2gw_exc.L2GatewayConnectionNotFound( l2_gateway_connection) l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID) device = self._get_device(context, l2gw_id) device_name = device.get('device_name') self._nsxv.delete_bridge(device_name) def delete_l2_gateway(self, context, l2_gateway): """Delete a L2 gateway.""" self._admin_check(context, 'DELETE') device = self._get_device(context, l2_gateway) edge_id = device.get('device_name') rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge( context.session, edge_id) if rtr_binding: self._edge_manager.delete_lrouter(context, rtr_binding['router_id']) def delete_l2_gateway_precommit(self, context, l2_gateway): pass def delete_l2_gateway_postcommit(self, context, l2_gateway): pass def add_port_mac(self, context, port_dict): """Process a created Neutron port.""" pass def delete_port_mac(self, context, port): """Process a deleted Neutron port.""" pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2142541 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/nsx_v3/0000755000175000017500000000000000000000000024332 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/nsx_v3/__init__.py0000644000175000017500000000000000000000000026431 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/l2gateway/nsx_v3/driver.py0000644000175000017500000006011000000000000026175 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from networking_l2gw.db.l2gateway import l2gateway_db from networking_l2gw.services.l2gateway.common import constants as l2gw_const from networking_l2gw.services.l2gateway import exceptions as l2gw_exc from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils from neutron_lib.api.definitions import provider_net as providernet from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib.db import api as db_api from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.plugins import utils as plugin_utils from vmware_nsx._i18n import _ from vmware_nsx.common import utils as nsx_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils as nsxlib_utils LOG = logging.getLogger(__name__) class _NotUniqueL2GW(Exception): """Raised if validation of default L2 GW uniqueness fails.""" class NsxV3Driver(l2gateway_db.L2GatewayMixin): """Class to handle API calls for L2 gateway and NSXv3 backend.""" gateway_resource = l2gw_const.GATEWAY_RESOURCE_NAME def __init__(self, plugin): super(NsxV3Driver, self).__init__() self._plugin = plugin LOG.debug("Starting service plugin for NSX L2Gateway") self.subscribe_callback_notifications() LOG.debug("Initialization complete for NSXv3 driver for " "L2 gateway service plugin.") self.__core_plugin = None @property def _core_plugin(self): if not self.__core_plugin: self.__core_plugin = directory.get_plugin() if self.__core_plugin.is_tvd_plugin(): self.__core_plugin = self.__core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) return self.__core_plugin def subscribe_callback_notifications(self): registry.subscribe(self._prevent_l2gw_port_delete, resources.PORT, events.BEFORE_DELETE) registry.subscribe(self._ensure_default_l2_gateway, resources.PROCESS, events.BEFORE_SPAWN) def _find_default_l2_gateway(self, admin_ctx, def_device_id): for l2gateway in self._get_l2_gateways(admin_ctx): if l2gateway['devices'][0]['device_name'] == def_device_id: return l2gateway @nsxlib_utils.retry_random_upon_exception(_NotUniqueL2GW, max_attempts=10) def _create_default_l2_gateway(self, admin_ctx, l2gw_dict, def_device_id): LOG.debug("Creating default layer-2 gateway with: %s", l2gw_dict) def_l2gw = super(NsxV3Driver, self).create_l2_gateway(admin_ctx, l2gw_dict) # Verify that no other instance of neutron-server had the same # brilliant idea... l2gateways = self._get_l2_gateways(admin_ctx) for l2gateway in l2gateways: # Since we ensure L2 gateway is created with only 1 device, we use # the first device in the list. if l2gateway['devices'][0]['device_name'] == def_device_id: if l2gateway['id'] == def_l2gw['id']: # Nothing to worry about, that's our gateway continue LOG.info("Default L2 gateway is already created with " "id %s, deleting L2 gateway with id %s", l2gateway['id'], def_l2gw['id']) # Commit suicide! self.validate_l2_gateway_for_delete( admin_ctx, def_l2gw) # We can be sure the gateway is not in use... super(NsxV3Driver, self).delete_l2_gateway( admin_ctx, def_l2gw['id']) # The operation should be retried to avoid the situation where # every instance deletes the gateway it created raise _NotUniqueL2GW return def_l2gw def _get_bridge_vlan_tz_id(self, bep_data): nsxlib = self._core_plugin.nsxlib # Edge cluster Id is mandatory, do not fear KeyError edge_cluster_id = bep_data['edge_cluster_id'] member_indexes = bep_data.get('edge_cluster_member_indexes', []) # NSX should not allow bridge endpoint profiles attached to # non-existing edge clusters edge_cluster = nsxlib.edge_cluster.get(edge_cluster_id) member_map = dict((member['member_index'], member['transport_node_id']) for member in edge_cluster['members']) # By default consider all transport nodes in the cluster for # retrieving the VLAN transprtzone tn_ids = member_map.values() if member_indexes: try: tn_ids = [member_map[idx] for idx in member_indexes] except KeyError: LOG.warning("Invalid member indexes specified in bridge " "endpoint profile: %(bep_id)s: %(indexes)s", {'bep_id': bep_data['id'], 'indexes': member_indexes}) # Retrieve VLAN transport zones vlan_transport_zones = nsxlib.search_all_resource_by_attributes( nsxlib.transport_zone.resource_type, transport_type='VLAN') vlan_tz_map = dict((vlan_tz['id'], []) for vlan_tz in vlan_transport_zones) for tn_id in tn_ids: tn_data = nsxlib.transport_node.get(tn_id) for tz_endpoint in tn_data.get('transport_zone_endpoints', []): tz_id = tz_endpoint['transport_zone_id'] if tz_id in vlan_tz_map: vlan_tz_map[tz_id].append(tn_id) # Find the VLAN transport zone that is used by all transport nodes results = [] for (tz_id, nodes) in vlan_tz_map.items(): if set(nodes) != set(tn_ids): continue results.append(tz_id) return results def _ensure_default_l2_gateway(self, resource, event, trigger, payload=None): """ Create a default logical L2 gateway. Create a logical L2 gateway in the neutron database for the default bridge endpoint profile, if specified in the configuration. Ensure only one gateway is configured in presence of multiple neutron servers. """ if cfg.CONF.nsx_v3.default_bridge_cluster: LOG.warning("Attention! The default_bridge_cluster option is " "still set to %s. This option won't have any effect " "as L2 gateways based on bridge clusters are not " "implemented anymore", cfg.CONF.nsx_v3.default_bridge_cluster) def_bep = cfg.CONF.nsx_v3.default_bridge_endpoint_profile # Return if no default_endpoint_profile set in config if not def_bep: LOG.info("NSX Default bridge endpoint profile not set. " "Default L2 gateway will not be processed.") return admin_ctx = context.get_admin_context() nsx_bep_client = self._core_plugin.nsxlib.bridge_endpoint_profile bep_id = nsx_bep_client.get_id_by_name_or_id(def_bep) def_l2gw = self._find_default_l2_gateway(admin_ctx, bep_id) # If there is already an existing gateway, use that one if def_l2gw: LOG.info("A default L2 gateway for bridge endpoint profile " "%(bep_id)s already exists. Reusing L2 gateway " "%(def_l2gw_id)s)", {'bep_id': bep_id, 'def_l2gw_id': def_l2gw['id']}) return def_l2gw bep_data = nsx_bep_client.get(bep_id) vlan_tzs = self._get_bridge_vlan_tz_id(bep_data) if not vlan_tzs: LOG.info("No NSX VLAN transport zone could be used for bridge " "endpoint profile: %s. Default L2 gateway will not " "be processed", bep_id) return # TODO(salv-orlando): Implement support for multiple VLAN TZ vlan_tz = vlan_tzs[0] if len(vlan_tzs) > 1: LOG.info("The NSX L2 gateway driver currenly supports a single " "VLAN transport zone for bridging, but %(num_tz)d " "were specified. Transport zone %(tz)s will be used " "for L2 gateways", {'num_tz': len(vlan_tzs), 'tz': vlan_tz}) device = {'device_name': bep_id, 'interfaces': [{'name': vlan_tz}]} # TODO(asarfaty): Add a default v3 tenant-id to allow TVD filtering l2gw_dict = {self.gateway_resource: { 'name': 'default-nsxedge-l2gw', 'devices': [device]}} self._create_default_l2_gateway(admin_ctx, l2gw_dict, bep_id) return def_l2gw def _prevent_l2gw_port_delete(self, resource, event, trigger, payload=None): context = payload.context port_id = payload.resource_id port_check = payload.metadata['port_check'] if port_check: self.prevent_l2gw_port_deletion(context, port_id) def _validate_device_list(self, devices, check_backend=True): # In NSXv3, one L2 gateway is mapped to one bridge endpoint profle. # So we expect only one device to be configured as part of # a L2 gateway resource. The name of the device must be the bridge # endpoint profile UUID. if len(devices) != 1: msg = _("Only a single device is supported by the NSX L2" "gateway driver") raise n_exc.InvalidInput(error_message=msg) dev_name = devices[0]['device_name'] if not uuidutils.is_uuid_like(dev_name): msg = _("Device name must be configured with a UUID") raise n_exc.InvalidInput(error_message=msg) # Ensure the L2GW device is a valid bridge endpoint profile in NSX if check_backend: try: self._core_plugin.nsxlib.bridge_endpoint_profile.get( dev_name) except nsxlib_exc.ResourceNotFound: msg = _("Could not find Bridge Endpoint Profile for L2 " "gateway device %s on NSX backend") % dev_name LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) # One L2 gateway must have only one interface defined. interfaces = devices[0].get(l2gw_const.IFACE_NAME_ATTR) if len(interfaces) > 1: msg = _("Maximum of one interface is supported by the NSX L2 " "gateway driver") raise n_exc.InvalidInput(error_message=msg) def create_l2_gateway(self, context, l2_gateway): """Create a logical L2 gateway.""" gw = l2_gateway[self.gateway_resource] devices = gw['devices'] self._validate_device_list(devices) def create_l2_gateway_precommit(self, context, l2_gateway): pass def create_l2_gateway_postcommit(self, context, l2_gateway): pass def update_l2_gateway_precommit(self, context, l2_gateway): pass def update_l2_gateway_postcommit(self, context, l2_gateway): pass def delete_l2_gateway(self, context, l2_gateway_id): pass def delete_l2_gateway_precommit(self, context, l2_gateway_id): pass def delete_l2_gateway_postcommit(self, context, l2_gateway_id): pass def _validate_network(self, context, network_id): network = self._core_plugin.get_network(context, network_id) network_type = network.get(providernet.NETWORK_TYPE) # If network is a provider network, verify whether it is of type GENEVE if network_type and network_type != nsx_utils.NsxV3NetworkTypes.GENEVE: msg = (_("Unsupported network type %s for L2 gateway connection") % network_type) raise n_exc.InvalidInput(error_message=msg) def _validate_segment_id(self, seg_id): if not seg_id: raise l2gw_exc.L2GatewaySegmentationRequired return plugin_utils.is_valid_vlan_tag(seg_id) def create_l2_gateway_connection(self, context, l2_gateway_connection): gw_connection = l2_gateway_connection.get(self.connection_resource) network_id = gw_connection.get(l2gw_const.NETWORK_ID) self._validate_network(context, network_id) def _get_bep(self, context, l2gw_id): # In NSXv3, there will be only one device configured per L2 gateway. # The name of the device shall carry the bridge endpoint profile id. devices = self._get_l2_gateway_devices(context, l2gw_id) return devices[0].get('device_name') def _get_conn_parameters(self, context, gw_connection): """Return interface and segmenantion id for a connection. """ if not gw_connection: return l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID) seg_id = gw_connection.get(l2gw_const.SEG_ID) devices = self._get_l2_gateway_devices(context, l2gw_id) # TODO(salv-orlando): support more than a single interface interface = self._get_l2_gw_interfaces(context, devices[0]['id'])[0] if not seg_id: # Seg-id was not passed as part of connection-create. Retrieve # seg-id from L2 gateway's interface. seg_id = interface.get('segmentation_id') return interface['interface_name'], seg_id def create_l2_gateway_connection_precommit(self, context, gw_connection): """Validate the L2 gateway connection Do not allow another connection with the same bride cluster and seg_id """ admin_ctx = context.elevated() nsxlib = self._core_plugin.nsxlib l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID) devices = self._get_l2_gateway_devices(context, l2gw_id) bep_id = devices[0].get('device_name') # Check for bridge endpoint profile existence # if bridge endpoint profile is not found, this is likely an old # connection, fail with error. try: nsxlib.bridge_endpoint_profile.get_id_by_name_or_id(bep_id) except nsxlib_exc.ManagerError as e: msg = (_("Error while retrieving bridge endpoint profile " "%(bep_id)s from NSX backend. Check that the profile " "exits and there are not multiple profiles with " "the given name. Exception: %(exc)s") % {'bep_id': bep_id, 'exc': e}) raise n_exc.InvalidInput(error_message=msg) interface_name, seg_id = self._get_conn_parameters( admin_ctx, gw_connection) try: # Use search API for listing bridge endpoints on NSX for provided # VLAN id, transport zone id, and Bridge endpoint profile endpoints = nsxlib.search_all_resource_by_attributes( nsxlib.bridge_endpoint.resource_type, bridge_endpoint_profile_id=bep_id, vlan_transport_zone_id=interface_name, vlan=seg_id) endpoint_map = dict((endpoint['id'], endpoint['bridge_endpoint_profile_id']) for endpoint in endpoints) except nsxlib_exc.ManagerError as e: msg = (_("Error while retrieving endpoints for bridge endpoint " "profile %(bep_id)s s from NSX backend. " "Exception: %(exc)s") % {'bep_id': bep_id, 'exc': e}) raise n_exc.InvalidInput(error_message=msg) # get all bridge endpoint ports with db_api.CONTEXT_WRITER.using(admin_ctx): port_filters = {'device_owner': [nsx_constants.BRIDGE_ENDPOINT]} ports = self._core_plugin.get_ports( admin_ctx, filters=port_filters) for port in ports: device_id = port.get('device_id') if endpoint_map.get(device_id) == bep_id: # This device is using the same vlan id and bridge endpoint # profile as the one requested. Not ok. msg = (_("Cannot create multiple connections with the " "same segmentation id %(seg_id)s for bridge " "endpoint profile %(bep_id)s") % {'seg_id': seg_id, 'bep_id': bep_id}) raise n_exc.InvalidInput(error_message=msg) def create_l2_gateway_connection_postcommit(self, context, gw_connection): """Create a L2 gateway connection on the backend""" nsxlib = self._core_plugin.nsxlib l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID) network_id = gw_connection.get(l2gw_const.NETWORK_ID) device_name = self._get_bep(context, l2gw_id) interface_name, seg_id = self._get_conn_parameters( context, gw_connection) self._validate_segment_id(seg_id) tenant_id = gw_connection['tenant_id'] if context.is_admin and not tenant_id: tenant_id = context.tenant_id gw_connection['tenant_id'] = tenant_id try: tags = nsxlib.build_v3_tags_payload( gw_connection, resource_type='os-neutron-l2gw-id', project_name=context.tenant_name) bridge_endpoint = nsxlib.bridge_endpoint.create( device_name=device_name, vlan_transport_zone_id=interface_name, vlan_id=seg_id, tags=tags) except nsxlib_exc.ManagerError as e: LOG.exception("Unable to create bridge endpoint. " "Exception is %s", e) raise l2gw_exc.L2GatewayServiceDriverError( method='create_l2_gateway_connection_postcommit') port_dict = {'port': { 'name': 'l2gw-conn-%s-%s' % ( l2gw_id, seg_id), 'tenant_id': tenant_id, 'network_id': network_id, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'admin_state_up': True, 'fixed_ips': [], 'device_id': bridge_endpoint['id'], 'device_owner': nsx_constants.BRIDGE_ENDPOINT}} try: #TODO(abhiraut): Consider adding UT for port check once UTs are # refactored port = self._core_plugin.create_port(context, port_dict, l2gw_port_check=True) # Deallocate IP address from the port. for fixed_ip in port.get('fixed_ips', []): self._core_plugin._delete_ip_allocation(context, network_id, fixed_ip['subnet_id'], fixed_ip['ip_address']) LOG.debug("IP addresses deallocated on port %s", port['id']) except (nsxlib_exc.ManagerError, n_exc.NeutronException) as e: with excutils.save_and_reraise_exception(): LOG.exception("Unable to create L2 gateway port, " "rolling back changes on backend: %s", e) self._core_plugin.nsxlib.bridge_endpoint.delete( bridge_endpoint['id']) super(NsxV3Driver, self).delete_l2_gateway_connection( context, gw_connection['id']) try: with db_api.CONTEXT_WRITER.using(context): # Update neutron's database with the mappings. nsx_db.add_l2gw_connection_mapping( session=context.session, connection_id=gw_connection['id'], bridge_endpoint_id=bridge_endpoint['id'], port_id=port['id']) # If no segmentation_id was passed as a part of the # connection object, update it with the one inherited # from the device spec if seg_id and not gw_connection.get(l2gw_const.SEG_ID): conn_db = self._plugin._get_l2_gateway_connection( context, gw_connection['id']) conn_db['segmentation_id'] = seg_id # Ensure the object is updated as well so the # create response returns the segmentation_id gw_connection[l2gw_const.SEG_ID] = seg_id except db_exc.DBError: with excutils.save_and_reraise_exception(): LOG.exception("Unable to add L2 gateway connection " "mappings for %(conn_id)s on network " "%(net_id)s. rolling back changes.", {'conn_id': gw_connection['id'], 'net_id': network_id}) self._core_plugin.nsxlib.bridge_endpoint.delete( bridge_endpoint['id']) super(NsxV3Driver, self).delete_l2_gateway_connection( context, gw_connection['id']) return gw_connection def delete_l2_gateway_connection_postcommit(self, context, gw_connection): pass def delete_l2_gateway_connection_precommit(self, context, gw_connection): pass def delete_l2_gateway_connection(self, context, gw_connection): """Delete a L2 gateway connection.""" conn_mapping = nsx_db.get_l2gw_connection_mapping( session=context.session, connection_id=gw_connection) if not conn_mapping: LOG.error("Unable to delete gateway connection %(id)s: mapping " "not found", {'id': gw_connection}) # Do not block the deletion return bridge_endpoint_id = conn_mapping.get('bridge_endpoint_id') # Delete the logical port from the bridge endpoint. self._core_plugin.delete_port(context=context, port_id=conn_mapping.get('port_id'), l2gw_port_check=False) try: self._core_plugin.nsxlib.bridge_endpoint.delete(bridge_endpoint_id) except nsxlib_exc.ManagerError as e: LOG.exception("Unable to delete bridge endpoint %(id)s on the " "backend due to exc: %(exc)s", {'id': bridge_endpoint_id, 'exc': e}) raise l2gw_exc.L2GatewayServiceDriverError( method='delete_l2_gateway_connection') def prevent_l2gw_port_deletion(self, context, port_id): """Prevent core plugin from deleting L2 gateway port.""" try: port = self._core_plugin.get_port(context, port_id) except n_exc.PortNotFound: return if port['device_owner'] == nsx_constants.BRIDGE_ENDPOINT: reason = _("has device owner %s") % port['device_owner'] raise n_exc.ServicePortInUse(port_id=port_id, reason=reason) def add_port_mac(self, context, port_dict): """Process a created Neutron port.""" pass def delete_port_mac(self, context, port): """Process a deleted Neutron port.""" pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2142541 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/0000755000175000017500000000000000000000000022275 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/__init__.py0000644000175000017500000000000000000000000024374 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/base_mgr.py0000644000175000017500000000533200000000000024431 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from oslo_log import log as logging from vmware_nsx.extensions import projectpluginmap LOG = logging.getLogger(__name__) class LoadbalancerBaseManager(object): _lbv2_driver = None _core_plugin = None _flavor_plugin = None def __init__(self): super(LoadbalancerBaseManager, self).__init__() def _get_plugin(self, plugin_type): return directory.get_plugin(plugin_type) @property def lbv2_driver(self): if not self._lbv2_driver: plugin = self._get_plugin( plugin_const.LOADBALANCERV2) self._lbv2_driver = ( plugin.drivers['vmwareedge']) return self._lbv2_driver @property def core_plugin(self): if not self._core_plugin: self._core_plugin = ( self._get_plugin(plugin_const.CORE)) if self._core_plugin.is_tvd_plugin(): # get the plugin that match this driver self._core_plugin = self._core_plugin.get_plugin_by_type( self._plugin_id) return self._core_plugin @property def flavor_plugin(self): if not self._flavor_plugin: self._flavor_plugin = ( self._get_plugin(plugin_const.FLAVORS)) return self._flavor_plugin class EdgeLoadbalancerBaseManager(LoadbalancerBaseManager): def __init__(self, vcns_driver): super(EdgeLoadbalancerBaseManager, self).__init__() self._plugin_id = projectpluginmap.NsxPlugins.NSX_V self.vcns_driver = vcns_driver @property def vcns(self): return self.vcns_driver.vcns class Nsxv3LoadbalancerBaseManager(LoadbalancerBaseManager): def __init__(self): super(Nsxv3LoadbalancerBaseManager, self).__init__() self._plugin_id = projectpluginmap.NsxPlugins.NSX_T class NsxpLoadbalancerBaseManager(LoadbalancerBaseManager): def __init__(self): super(NsxpLoadbalancerBaseManager, self).__init__() self._plugin_id = projectpluginmap.NsxPlugins.NSX_P ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/lb_common.py0000644000175000017500000000460600000000000024622 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.services.lbaas import lb_const LOG = logging.getLogger(__name__) def validate_session_persistence(pool, listener, completor): sp = pool.get('session_persistence') LOG.debug("validate_session_persistence called with session_persistence " "%s", sp) if not listener or not sp: # safety first! return # L4 listeners only allow source IP persistence # (HTTPS is also considers L4 listener) if ((listener['protocol'] == lb_const.LB_PROTOCOL_TCP or listener['protocol'] == lb_const.LB_PROTOCOL_HTTPS) and sp['type'] != lb_const.LB_SESSION_PERSISTENCE_SOURCE_IP): completor(success=False) msg = (_("Invalid session persistence type %(sp_type)s for " "pool on listener %(lst_id)s with %(proto)s protocol") % {'sp_type': sp['type'], 'lst_id': listener['id'], 'proto': listener['protocol']}) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) def session_persistence_type_changed(pool, old_pool): cookie_pers_types = (lb_const.LB_SESSION_PERSISTENCE_HTTP_COOKIE, lb_const.LB_SESSION_PERSISTENCE_APP_COOKIE) sp = pool.get('session_persistence') if not sp: return False if old_pool: oldsp = old_pool.get('session_persistence') if not oldsp: return False if ((sp['type'] == lb_const.LB_SESSION_PERSISTENCE_SOURCE_IP and oldsp['type'] in cookie_pers_types) or (sp['type'] in cookie_pers_types and oldsp['type'] == lb_const.LB_SESSION_PERSISTENCE_SOURCE_IP)): return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/lb_const.py0000644000175000017500000001121700000000000024454 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN' LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS' LB_METHOD_SOURCE_IP = 'SOURCE_IP' BALANCE_MAP = { LB_METHOD_ROUND_ROBIN: 'round-robin', LB_METHOD_LEAST_CONNECTIONS: 'leastconn', LB_METHOD_SOURCE_IP: 'ip-hash'} LB_PROTOCOL_TCP = 'TCP' LB_PROTOCOL_HTTP = 'HTTP' LB_PROTOCOL_HTTPS = 'HTTPS' LB_PROTOCOL_TERMINATED_HTTPS = 'TERMINATED_HTTPS' PROTOCOL_MAP = { LB_PROTOCOL_TCP: 'tcp', LB_PROTOCOL_HTTP: 'http', LB_PROTOCOL_HTTPS: 'https', LB_PROTOCOL_TERMINATED_HTTPS: 'https'} LB_HEALTH_MONITOR_PING = 'PING' LB_HEALTH_MONITOR_TCP = 'TCP' LB_HEALTH_MONITOR_HTTP = 'HTTP' LB_HEALTH_MONITOR_HTTPS = 'HTTPS' HEALTH_MONITOR_MAP = { LB_HEALTH_MONITOR_PING: 'icmp', LB_HEALTH_MONITOR_TCP: 'tcp', LB_HEALTH_MONITOR_HTTP: 'http', LB_HEALTH_MONITOR_HTTPS: 'https'} LB_SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP' LB_SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE' LB_SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE' SESSION_PERSISTENCE_METHOD_MAP = { LB_SESSION_PERSISTENCE_SOURCE_IP: 'sourceip', LB_SESSION_PERSISTENCE_APP_COOKIE: 'cookie', LB_SESSION_PERSISTENCE_HTTP_COOKIE: 'cookie'} SESSION_PERSISTENCE_COOKIE_MAP = { LB_SESSION_PERSISTENCE_APP_COOKIE: 'app', LB_SESSION_PERSISTENCE_HTTP_COOKIE: 'insert'} SESSION_PERSISTENCE_DEFAULT_COOKIE_NAME = 'default_cookie_name' L7_POLICY_ACTION_REJECT = 'REJECT' L7_POLICY_ACTION_REDIRECT_TO_POOL = 'REDIRECT_TO_POOL' L7_POLICY_ACTION_REDIRECT_TO_URL = 'REDIRECT_TO_URL' L7_RULE_TYPE_HOST_NAME = 'HOST_NAME' L7_RULE_TYPE_PATH = 'PATH' L7_RULE_TYPE_FILE_TYPE = 'FILE_TYPE' L7_RULE_TYPE_HEADER = 'HEADER' L7_RULE_TYPE_COOKIE = 'COOKIE' L7_RULE_COMPARE_TYPE_REGEX = 'REGEX' L7_RULE_COMPARE_TYPE_STARTS_WITH = 'STARTS_WITH' L7_RULE_COMPARE_TYPE_ENDS_WITH = 'ENDS_WITH' L7_RULE_COMPARE_TYPE_CONTAINS = 'CONTAINS' L7_RULE_COMPARE_TYPE_EQUAL_TO = 'EQUAL_TO' # Resource type for resources created on NSX backend LB_LB_TYPE = 'os-lbaas-lb-id' LB_LB_NAME = 'os-lbaas-lb-name' LB_LISTENER_TYPE = 'os-lbaas-listener-id' LB_HM_TYPE = 'os-lbaas-hm-id' LB_POOL_TYPE = 'os-lbaas-pool-id' LB_L7POLICY_TYPE = 'os-lbaas-l7policy-id' LB_HTTP_PROFILE = 'LbHttpProfile' LB_TCP_PROFILE = 'LbFastTcpProfile' LB_UDP_PROFILE = 'LbFastUdpProfile' NSXV3_MONITOR_MAP = {LB_HEALTH_MONITOR_PING: 'LbIcmpMonitor', LB_HEALTH_MONITOR_TCP: 'LbTcpMonitor', LB_HEALTH_MONITOR_HTTP: 'LbHttpMonitor', LB_HEALTH_MONITOR_HTTPS: 'LbHttpsMonitor'} LB_POOL_ALGORITHM_MAP = { LB_METHOD_ROUND_ROBIN: 'WEIGHTED_ROUND_ROBIN', LB_METHOD_LEAST_CONNECTIONS: 'LEAST_CONNECTION', LB_METHOD_SOURCE_IP: 'IP_HASH', } LB_STATS_MAP = {'active_connections': 'current_sessions', 'bytes_in': 'bytes_in', 'bytes_out': 'bytes_out', 'total_connections': 'total_sessions'} LB_EMPTY_STATS = {'active_connections': 0, 'bytes_in': 0, 'bytes_out': 0, 'total_connections': 0} LR_ROUTER_TYPE = 'os-neutron-router-id' LR_PORT_TYPE = 'os-neutron-rport-id' LB_CERT_RESOURCE_TYPE = ['certificate_signed', 'certificate_self_signed'] DEFAULT_LB_SIZE = 'SMALL' LB_FLAVOR_SIZES = ['SMALL', 'MEDIUM', 'LARGE', 'small', 'medium', 'large'] LB_RULE_MATCH_TYPE = { L7_RULE_COMPARE_TYPE_CONTAINS: 'CONTAINS', L7_RULE_COMPARE_TYPE_ENDS_WITH: 'ENDS_WITH', L7_RULE_COMPARE_TYPE_EQUAL_TO: 'EQUALS', L7_RULE_COMPARE_TYPE_REGEX: 'REGEX', L7_RULE_COMPARE_TYPE_STARTS_WITH: 'STARTS_WITH'} LB_SELECT_POOL_ACTION = 'LbSelectPoolAction' LB_HTTP_REDIRECT_ACTION = 'LbHttpRedirectAction' LB_REJECT_ACTION = 'LbHttpRejectAction' LB_HTTP_REDIRECT_STATUS = '302' LB_HTTP_REJECT_STATUS = '403' LB_RULE_HTTP_REQUEST_REWRITE = 'HTTP_REQUEST_REWRITE' LB_RULE_HTTP_FORWARDING = 'HTTP_FORWARDING' LB_RULE_HTTP_RESPONSE_REWRITE = 'HTTP_RESPONSE_REWRITE' LOADBALANCERS = 'loadbalancers' LISTENERS = 'listeners' POOLS = 'pools' MEMBERS = 'members' ONLINE = 'ONLINE' OFFLINE = 'OFFLINE' DEGRADED = 'DEGRADED' ENABLED = 'ENABLED' DISABLED = 'DISABLED' VMWARE_LB_VIP_OWNER = 'vmware-lb-vip' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2142541 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/0000755000175000017500000000000000000000000023424 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/__init__.py0000644000175000017500000000000000000000000025523 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2142541 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/implementation/0000755000175000017500000000000000000000000026451 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/implementation/__init__.py0000644000175000017500000000000000000000000030550 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/implementation/healthmonitor_mgr.py0000644000175000017500000001320400000000000032545 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeHealthMonitorManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): def _build_monitor_args(self, hm): body = { 'interval': hm['delay'], 'fall_count': hm['max_retries'], 'timeout': hm['timeout'], 'name': utils.get_name_and_uuid(hm['name'] or 'monitor', hm['id']) } if hm['type'] in [lb_const.LB_HEALTH_MONITOR_HTTP, lb_const.LB_HEALTH_MONITOR_HTTPS]: if hm['http_method']: body['request_method'] = hm['http_method'] if hm['url_path']: body['request_url'] = hm['url_path'] if hm['expected_codes']: codes = hm['expected_codes'].split(",") body['response_status_codes'] = [ int(code) for code in codes] return body def create(self, context, hm, completor): pool_id = hm['pool']['id'] pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool monitor_client = lb_utils.get_monitor_policy_client( self.core_plugin.nsxpolicy.load_balancer, hm) tags = lb_utils.get_tags(self.core_plugin, hm['id'], lb_const.LB_HM_TYPE, hm['tenant_id'], context.project_name) monitor_body = self._build_monitor_args(hm) lb_monitor = None try: lb_monitor = monitor_client.create_or_overwrite( lb_monitor_profile_id=hm['id'], tags=tags, **monitor_body) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) if pool_id and lb_monitor: try: hm_path = monitor_client.get_path(hm['id']) pool_client.add_monitor_to_pool(pool_id, [hm_path]) except nsxlib_exc.ManagerError: completor(success=False) msg = _('Failed to attach monitor %(monitor)s to pool ' '%(pool)s') % {'monitor': hm['id'], 'pool': pool_id} raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) else: completor(success=False) msg = _('Failed to attach monitor %(monitor)s to pool ' '%(pool)s: NSX pool was not found on the DB') % { 'monitor': hm['id'], 'pool': pool_id} raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) completor(success=True) def update(self, context, old_hm, new_hm, completor): monitor_client = lb_utils.get_monitor_policy_client( self.core_plugin.nsxpolicy.load_balancer, new_hm) try: monitor_body = self._build_monitor_args(new_hm) monitor_client.update(new_hm['id'], **monitor_body) except nsxlib_exc.ManagerError as exc: completor(success=False) msg = _('Failed to update monitor %(monitor)s with exception' ' %s(exc)s') % {'monitor': new_hm['id'], 'exc': exc} raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) completor(success=True) def delete(self, context, hm, completor): pool_id = hm['pool']['id'] pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool monitor_client = lb_utils.get_monitor_policy_client( self.core_plugin.nsxpolicy.load_balancer, hm) try: hm_path = monitor_client.get_path(hm['id']) pool_client.remove_monitor_from_pool(pool_id, hm_path) except nsxlib_exc.ResourceNotFound: pass except nsxlib_exc.ManagerError as exc: completor(success=False) msg = _('Failed to remove monitor %(monitor)s from pool %(pool)s ' 'with exception from nsx %(exc)s') % { 'monitor': hm['id'], 'pool': pool_id, 'exc': exc} raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) try: monitor_client.delete(hm['id']) except nsxlib_exc.ResourceNotFound: pass except nsxlib_exc.ManagerError as exc: completor(success=False) msg = _('Failed to delete monitor %(monitor)s from backend with ' 'exception %(exc)s') % {'monitor': hm['id'], 'exc': exc} raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) completor(success=True) def delete_cascade(self, context, hm, completor): self.delete(context, hm, completor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/implementation/l7policy_mgr.py0000644000175000017500000000741700000000000031443 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeL7PolicyManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): def create(self, context, policy, completor): vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server policy_name = utils.get_name_and_uuid(policy['name'] or 'policy', policy['id']) rule_body = lb_utils.convert_l7policy_to_lb_rule( self.core_plugin.nsxpolicy, policy) try: position = policy.get('position', 0) - 1 vs_client.add_lb_rule(policy['listener_id'], name=policy_name, position=position, **rule_body) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to add rule %(rule)% to virtual server ' '%(vs)s at NSX backend', {'rule': policy['id'], 'vs': policy['listener_id']}) completor(success=True) def update(self, context, old_policy, new_policy, completor): vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server policy_name = utils.get_name_and_uuid(old_policy['name'] or 'policy', old_policy['id']) rule_body = lb_utils.convert_l7policy_to_lb_rule( self.core_plugin.nsxpolicy, new_policy) try: vs_client.update_lb_rule( new_policy['listener_id'], policy_name, position=new_policy.get('position', 0) - 1, **rule_body) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update L7policy %(policy)s: ' '%(err)s', {'policy': old_policy['id'], 'err': e}) completor(success=True) def delete(self, context, policy, completor): vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server policy_name = utils.get_name_and_uuid(policy['name'] or 'policy', policy['id']) try: vs_client.remove_lb_rule(policy['listener_id'], policy_name) except nsxlib_exc.ResourceNotFound: pass except nsxlib_exc.ManagerError: completor(success=False) msg = (_('Failed to delete L7 policy: %(policy)s') % {'policy': policy['id']}) raise n_exc.BadRequest(resource='lbaas-l7policy', msg=msg) completor(success=True) def delete_cascade(self, context, policy, completor): self.delete(context, policy, completor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/implementation/l7rule_mgr.py0000644000175000017500000000473600000000000031114 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeL7RuleManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): def _update_l7rule_change(self, rule, completor, delete=False): vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server policy = rule['policy'] policy_name = utils.get_name_and_uuid(policy['name'] or 'policy', policy['id']) if delete: lb_utils.remove_rule_from_policy(rule) else: lb_utils.update_rule_in_policy(rule) rule_body = lb_utils.convert_l7policy_to_lb_rule( self.core_plugin.nsxpolicy, rule['policy']) try: vs_client.update_lb_rule(policy['listener_id'], policy_name, position=policy.get('position', 0) - 1, **rule_body) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update L7policy %(policy)s: ' '%(err)s', {'policy': policy['id'], 'err': e}) completor(success=True) def create(self, context, rule, completor): self._update_l7rule_change(rule, completor) def update(self, context, old_rule, new_rule, completor): self._update_l7rule_change(new_rule, completor) def delete(self, context, rule, completor): self._update_l7rule_change(rule, completor, delete=True) def delete_cascade(self, context, rule, completor): # No action should be taken on rules delete cascade pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/implementation/lb_const.py0000644000175000017500000000137100000000000030630 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. LB_SELECT_POOL_ACTION = 'LBSelectPoolAction' LB_HTTP_REDIRECT_ACTION = 'LBHttpRedirectAction' LB_REJECT_ACTION = 'LBHttpRejectAction' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/implementation/lb_utils.py0000644000175000017500000002534000000000000030644 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib import exceptions as n_exc from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_p.implementation import lb_const as p_const from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsxlib.v3 import load_balancer as nsxlib_lb from vmware_nsxlib.v3.policy import constants as p_constants from vmware_nsxlib.v3.policy import utils as p_utils from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) ADV_RULE_NAME = 'LB external VIP advertisement' def get_rule_match_conditions(policy): match_conditions = [] # values in rule have already been validated in LBaaS API, # we won't need to valid anymore in driver, and just get # the LB rule mapping from the dict. for rule in policy['rules']: match_type = lb_const.LB_RULE_MATCH_TYPE[rule['compare_type']] if rule['type'] == lb_const.L7_RULE_TYPE_COOKIE: header_value = rule['key'] + '=' + rule['value'] match_conditions.append( {'type': 'LBHttpRequestHeaderCondition', 'match_type': match_type, 'header_name': 'Cookie', 'header_value': header_value}) elif rule['type'] == lb_const.L7_RULE_TYPE_FILE_TYPE: match_conditions.append( {'type': 'LBHttpRequestUriCondition', 'match_type': match_type, 'uri': '*.' + rule['value']}) elif rule['type'] == lb_const.L7_RULE_TYPE_HEADER: match_conditions.append( {'type': 'LBHttpRequestHeaderCondition', 'match_type': match_type, 'header_name': rule['key'], 'header_value': rule['value']}) elif rule['type'] == lb_const.L7_RULE_TYPE_HOST_NAME: match_conditions.append( {'type': 'LBHttpRequestHeaderCondition', 'match_type': match_type, 'header_name': 'Host', 'header_value': rule['value']}) elif rule['type'] == lb_const.L7_RULE_TYPE_PATH: match_conditions.append( {'type': 'LBHttpRequestUriCondition', 'match_type': match_type, 'uri': rule['value']}) else: msg = (_('l7rule type %(type)s is not supported in LBaaS') % {'type': rule['type']}) raise n_exc.BadRequest(resource='lbaas-l7rule', msg=msg) return match_conditions def get_rule_actions(nsxpolicy, l7policy): if l7policy['action'] == lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL: if l7policy['redirect_pool_id']: lb_pool_id = l7policy['redirect_pool_id'] lb_pool_path = nsxpolicy.load_balancer.lb_pool.get_path(lb_pool_id) actions = [{'type': p_const.LB_SELECT_POOL_ACTION, 'pool_id': lb_pool_path}] else: msg = _('Failed to get LB pool binding from nsx db') raise n_exc.BadRequest(resource='lbaas-l7rule-create', msg=msg) elif l7policy['action'] == lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL: actions = [{'type': p_const.LB_HTTP_REDIRECT_ACTION, 'redirect_status': lb_const.LB_HTTP_REDIRECT_STATUS, 'redirect_url': l7policy['redirect_url']}] elif l7policy['action'] == lb_const.L7_POLICY_ACTION_REJECT: actions = [{'type': p_const.LB_REJECT_ACTION, 'reply_status': lb_const.LB_HTTP_REJECT_STATUS}] else: msg = (_('Invalid l7policy action: %(action)s') % {'action': l7policy['action']}) raise n_exc.BadRequest(resource='lbaas-l7rule-create', msg=msg) return actions def convert_l7policy_to_lb_rule(nsxpolicy, policy): return { 'match_conditions': get_rule_match_conditions(policy), 'actions': get_rule_actions(nsxpolicy, policy), 'phase': lb_const.LB_RULE_HTTP_FORWARDING, 'match_strategy': 'ALL' } def remove_rule_from_policy(rule): l7rules = rule['policy']['rules'] rule['policy']['rules'] = [r for r in l7rules if r['id'] != rule['id']] def update_rule_in_policy(rule): remove_rule_from_policy(rule) rule['policy']['rules'].append(rule) def update_router_lb_vip_advertisement(context, core_plugin, router_id): router = core_plugin.get_router(context.elevated(), router_id) # Add a rule to advertise external vips on the router external_subnets = core_plugin._find_router_gw_subnets( context.elevated(), router) external_cidrs = [s['cidr'] for s in external_subnets] if external_cidrs: core_plugin.nsxpolicy.tier1.add_advertisement_rule( router_id, ADV_RULE_NAME, p_constants.ADV_RULE_PERMIT, p_constants.ADV_RULE_OPERATOR_GE, [p_constants.ADV_RULE_TIER1_LB_VIP], external_cidrs) def get_monitor_policy_client(lb_client, hm): if hm['type'] == lb_const.LB_HEALTH_MONITOR_TCP: return lb_client.lb_monitor_profile_tcp elif hm['type'] == lb_const.LB_HEALTH_MONITOR_HTTP: return lb_client.lb_monitor_profile_http elif hm['type'] == lb_const.LB_HEALTH_MONITOR_HTTPS: return lb_client.lb_monitor_profile_https elif hm['type'] == lb_const.LB_HEALTH_MONITOR_PING: return lb_client.lb_monitor_profile_icmp else: msg = (_('Cannot create health monitor %(monitor)s with ' 'type %(type)s') % {'monitor': hm['id'], 'type': hm['type']}) raise n_exc.InvalidInput(error_message=msg) def get_tags(plugin, resource_id, resource_type, project_id, project_name): return lb_utils.get_tags(plugin, resource_id, resource_type, project_id, project_name) def build_persistence_profile_tags(pool_tags, listener): tags = pool_tags[:] # With octavia loadbalancer name might not be among data passed # down to the driver lb_data = listener.get('loadbalancer') if lb_data: tags.append({ 'scope': lb_const.LB_LB_NAME, 'tag': lb_data['name'][:utils.MAX_TAG_LEN]}) tags.append({ 'scope': lb_const.LB_LB_TYPE, 'tag': listener['loadbalancer_id']}) tags.append({ 'scope': lb_const.LB_LISTENER_TYPE, 'tag': listener['id']}) return tags def delete_persistence_profile(nsxpolicy, lb_persistence_profile_path): lb_client = nsxpolicy.load_balancer pp_client = lb_client.lb_persistence_profile persistence_profile_id = p_utils.path_to_id(lb_persistence_profile_path) if persistence_profile_id: pp_client.delete(persistence_profile_id) def setup_session_persistence(nsxpolicy, pool, pool_tags, switch_type, listener, vs_data): sp = pool.get('session_persistence') pers_type = None cookie_name = None cookie_mode = None lb_client = nsxpolicy.load_balancer pp_client = None if not sp: LOG.debug("No session persistence info for pool %s", pool['id']) elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_HTTP_COOKIE: pp_client = lb_client.lb_cookie_persistence_profile pers_type = nsxlib_lb.PersistenceProfileTypes.COOKIE pers_id_suffix = 'cookie' cookie_name = sp.get('cookie_name') if not cookie_name: cookie_name = lb_const.SESSION_PERSISTENCE_DEFAULT_COOKIE_NAME cookie_mode = "INSERT" elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_APP_COOKIE: pp_client = lb_client.lb_cookie_persistence_profile pers_type = nsxlib_lb.PersistenceProfileTypes.COOKIE pers_id_suffix = 'cookie' # In this case cookie name is mandatory cookie_name = sp['cookie_name'] cookie_mode = "REWRITE" else: pp_client = lb_client.lb_source_ip_persistence_profile pers_type = nsxlib_lb.PersistenceProfileTypes.SOURCE_IP pers_id_suffix = 'sourceip' if pers_type: # There is a profile to create or update pp_kwargs = { 'name': "persistence_%s" % utils.get_name_and_uuid( pool['name'] or 'pool', pool['id'], maxlen=235), 'tags': lb_utils.build_persistence_profile_tags( pool_tags, listener) } if cookie_name: pp_kwargs['cookie_name'] = cookie_name pp_kwargs['cookie_mode'] = cookie_mode profile_path = vs_data.get('lb_persistence_profile_path', '') persistence_profile_id = p_utils.path_to_id(profile_path) if persistence_profile_id and not switch_type: # NOTE: removal of the persistence profile must be executed # after the virtual server has been updated if pers_type: # Update existing profile LOG.debug("Updating persistence profile %(profile_id)s for " "listener %(listener_id)s with pool %(pool_id)s", {'profile_id': persistence_profile_id, 'listener_id': listener['id'], 'pool_id': pool['id']}) pp_client.update(persistence_profile_id, **pp_kwargs) return persistence_profile_id, None else: # Prepare removal of persistence profile return (None, functools.partial(delete_persistence_profile, nsxpolicy, profile_path)) elif pers_type: # Create persistence profile pp_id = "%s_%s" % (pool['id'], pers_id_suffix) pp_kwargs['persistence_profile_id'] = pp_id pp_client.create_or_overwrite(**pp_kwargs) LOG.debug("Created persistence profile %(profile_id)s for " "listener %(listener_id)s with pool %(pool_id)s", {'profile_id': pp_id, 'listener_id': listener['id'], 'pool_id': pool['id']}) if switch_type: # There is aso a persistence profile to remove! return (pp_id, functools.partial(delete_persistence_profile, nsxpolicy, profile_path)) return pp_id, None return None, None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/implementation/listener_mgr.py0000644000175000017500000003733100000000000031524 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_common from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3.policy import core_resources from vmware_nsxlib.v3.policy import lb_defs from vmware_nsxlib.v3.policy import utils as p_utils from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeListenerManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): def _get_listener_tags(self, context, listener): tags = lb_utils.get_tags(self.core_plugin, listener['id'], lb_const.LB_LISTENER_TYPE, listener.get('tenant_id'), context.project_name) tags.append({ 'scope': lb_const.LB_LB_NAME, 'tag': listener['loadbalancer']['name'][:utils.MAX_TAG_LEN]}) tags.append({ 'scope': lb_const.LB_LB_TYPE, 'tag': listener['loadbalancer_id']}) return tags def _upload_certificate(self, listener_id, cert_href, tags, certificate=None): nsxpolicy = self.core_plugin.nsxpolicy cert_client = nsxpolicy.certificate ssl_client = nsxpolicy.load_balancer.client_ssl_profile passphrase = certificate.get_private_key_passphrase() if not passphrase: passphrase = core_resources.IGNORE cert_client.create_or_overwrite( cert_href, certificate_id=listener_id, pem_encoded=certificate.get_certificate(), private_key=certificate.get_private_key(), passphrase=passphrase, tags=tags) return { 'client_ssl_profile_binding': { 'ssl_profile_path': ssl_client.get_path( self.core_plugin.client_ssl_profile), 'default_certificate_path': cert_client.get_path(listener_id) } } def _get_virtual_server_kwargs(self, context, listener, vs_name, tags, certificate=None): # If loadbalancer vip_port already has floating ip, use floating # IP as the virtual server VIP address. Else, use the loadbalancer # vip_address directly on virtual server. filters = {'port_id': [listener['loadbalancer']['vip_port_id']]} floating_ips = self.core_plugin.get_floatingips(context, filters=filters) if floating_ips: lb_vip_address = floating_ips[0]['floating_ip_address'] else: lb_vip_address = listener['loadbalancer']['vip_address'] kwargs = {'virtual_server_id': listener['id'], 'ip_address': lb_vip_address, 'ports': [listener['protocol_port']], 'application_profile_id': listener['id'], 'lb_service_id': listener['loadbalancer_id'], 'description': listener.get('description')} if vs_name: kwargs['name'] = vs_name if tags: kwargs['tags'] = tags if listener['connection_limit'] != -1: kwargs['max_concurrent_connections'] = listener['connection_limit'] if 'default_pool_id' in listener: if listener['default_pool_id']: kwargs['pool_id'] = listener['default_pool_id'] else: # Remove the default pool kwargs['pool_id'] = '' kwargs['lb_persistence_profile_id'] = '' if certificate: ssl_profile_binding = self._upload_certificate( listener['id'], listener['default_tls_container_id'], tags, certificate=certificate) if (listener['protocol'] == lb_const.LB_PROTOCOL_TERMINATED_HTTPS and ssl_profile_binding): kwargs.update(ssl_profile_binding) waf_profile, mode = self.core_plugin.get_waf_profile_path_and_mode() if (waf_profile and ( listener['protocol'] == lb_const.LB_PROTOCOL_HTTP or listener['protocol'] == lb_const.LB_PROTOCOL_TERMINATED_HTTPS)): kwargs['waf_profile_binding'] = lb_defs.WAFProfileBindingDef( waf_profile_path=waf_profile, operational_mode=mode) return kwargs def _get_nsxlib_app_profile(self, nsxlib_lb, listener): if (listener['protocol'] == lb_const.LB_PROTOCOL_HTTP or listener['protocol'] == lb_const.LB_PROTOCOL_TERMINATED_HTTPS): app_client = nsxlib_lb.lb_http_profile elif (listener['protocol'] == lb_const.LB_PROTOCOL_TCP or listener['protocol'] == lb_const.LB_PROTOCOL_HTTPS): app_client = nsxlib_lb.lb_fast_tcp_profile else: msg = (_('Cannot create listener %(listener)s with ' 'protocol %(protocol)s') % {'listener': listener['id'], 'protocol': listener['protocol']}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) return app_client def _validate_default_pool(self, listener, completor): def_pool_id = listener.get('default_pool_id') if def_pool_id: vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server vs_list = vs_client.list() for vs in vs_list: if vs.get('id') == listener['id']: continue pool_id = p_utils.path_to_id(vs.get('pool_path', '')) if pool_id == def_pool_id: completor(success=False) msg = (_('Default pool %s is already used by another ' 'listener %s') % (def_pool_id, vs.get('id'))) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) lb_common.validate_session_persistence( listener.get('default_pool'), listener, completor) def create(self, context, listener, completor, certificate=None): nsxlib_lb = self.core_plugin.nsxpolicy.load_balancer vs_client = nsxlib_lb.virtual_server vs_name = utils.get_name_and_uuid(listener['name'] or 'listener', listener['id']) tags = self._get_listener_tags(context, listener) self._validate_default_pool(listener, completor) try: app_client = self._get_nsxlib_app_profile(nsxlib_lb, listener) app_client.create_or_overwrite( lb_app_profile_id=listener['id'], name=vs_name, tags=tags) kwargs = self._get_virtual_server_kwargs( context, listener, vs_name, tags, certificate) vs_client.create_or_overwrite(**kwargs) except nsxlib_exc.ManagerError: completor(success=False) msg = _('Failed to create virtual server at NSX backend') raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) self._update_default_pool(context, listener, completor) completor(success=True) def _get_pool_tags(self, context, pool, listener_tenant_id): return lb_utils.get_tags(self.core_plugin, pool['id'], lb_const.LB_POOL_TYPE, pool.get('tenant_id', listener_tenant_id), context.project_name) def _update_default_pool(self, context, listener, completor, old_listener=None): if not listener.get('default_pool_id'): return nsxlib_lb = self.core_plugin.nsxpolicy.load_balancer vs_client = nsxlib_lb.virtual_server vs_data = vs_client.get(listener['id']) pool_id = listener['default_pool_id'] pool = listener['default_pool'] old_pool = None if old_listener: old_pool = old_listener.get('default_pool') try: switch_type = lb_common.session_persistence_type_changed( pool, old_pool) (persistence_profile_id, post_process_func) = lb_utils.setup_session_persistence( self.core_plugin.nsxpolicy, pool, self._get_pool_tags(context, pool, listener.get('tenant_id')), switch_type, listener, vs_data) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error("Failed to configure session persistence " "profile for listener %s", listener['id']) try: # Update persistence profile and pool on virtual server vs_client.update( listener['id'], pool_id=pool_id, lb_persistence_profile_id=persistence_profile_id) LOG.debug("Updated NSX virtual server %(vs_id)s with " "persistence profile %(prof)s", {'vs_id': listener['id'], 'prof': persistence_profile_id}) if post_process_func: post_process_func() except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error("Failed to attach persistence profile %s to " "virtual server %s", persistence_profile_id, listener['id']) def update(self, context, old_listener, new_listener, completor, certificate=None): nsxlib_lb = self.core_plugin.nsxpolicy.load_balancer vs_client = nsxlib_lb.virtual_server app_client = self._get_nsxlib_app_profile(nsxlib_lb, old_listener) vs_name = None tags = None self._validate_default_pool(new_listener, completor) if new_listener['name'] != old_listener['name']: vs_name = utils.get_name_and_uuid( new_listener['name'] or 'listener', new_listener['id']) tags = self._get_listener_tags(context, new_listener) try: app_profile_id = new_listener['id'] updated_kwargs = self._get_virtual_server_kwargs( context, new_listener, vs_name, tags, certificate) vs_client.update(**updated_kwargs) if vs_name: app_client.update(app_profile_id, name=vs_name, tags=tags) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update listener %(listener)s with ' 'error %(error)s', {'listener': old_listener['id'], 'error': e}) # Update default pool and session persistence if (old_listener.get('default_pool_id') != new_listener.get('default_pool_id')): self._update_default_pool(context, new_listener, completor, old_listener) completor(success=True) def delete(self, context, listener, completor): nsxlib_lb = self.core_plugin.nsxpolicy.load_balancer vs_client = nsxlib_lb.virtual_server app_client = self._get_nsxlib_app_profile(nsxlib_lb, listener) vs_id = listener['id'] app_profile_id = listener['id'] try: profile_path = None if listener.get('default_pool_id'): vs_data = vs_client.get(vs_id) profile_path = vs_data.get('lb_persistence_profile_path', '') vs_client.delete(vs_id) # Also delete the old session persistence profile if profile_path: lb_utils.delete_persistence_profile( self.core_plugin.nsxpolicy, profile_path) except nsxlib_exc.ResourceNotFound: LOG.error("virtual server not found on nsx: %(vs)s", {'vs': vs_id}) except nsxlib_exc.ManagerError: completor(success=False) msg = (_('Failed to delete virtual server: %(vs)s') % {'vs': vs_id}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) try: app_client.delete(app_profile_id) except nsxlib_exc.ResourceNotFound: LOG.error("application profile not found on nsx: %s", app_profile_id) except nsxlib_exc.ManagerError as e: # This probably means that the application profile is being # used by a listener outside of openstack LOG.error("Failed to delete application profile %s from the " "NSX: %s", app_profile_id, e) # Delete imported NSX cert if there is any if listener.get('default_tls_container_id'): cert_client = self.core_plugin.nsxpolicy.certificate try: cert_client.delete(listener['id']) except nsxlib_exc.ResourceNotFound: LOG.error("Certificate not found on nsx: %s", listener['id']) except nsxlib_exc.ManagerError: completor(success=False) msg = (_('Failed to delete certificate: %(crt)s') % {'crt': listener['id']}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) completor(success=True) def delete_cascade(self, context, listener, completor): self.delete(context, listener, completor) def stats_getter(context, core_plugin, ignore_list=None): """Update Octavia statistics for each listener (virtual server)""" stat_list = [] lb_service_client = core_plugin.nsxpolicy.load_balancer.lb_service lb_services = lb_service_client.list(silent_if_empty=True) # Go over all the loadbalancers & services for lb_service in lb_services: if ignore_list and lb_service['id'] in ignore_list: continue lb_service_id = lb_service.get('id') try: # get the NSX statistics for this LB service stats_results = lb_service_client.get_statistics( lb_service_id).get('results', []) if stats_results: rsp = stats_results[0] else: rsp = {} # Go over each virtual server in the response for vs in rsp.get('virtual_servers', []): # look up the virtual server in the DB if vs.get('statistics'): vs_stats = vs['statistics'] stats = copy.copy(lb_const.LB_EMPTY_STATS) stats['id'] = p_utils.path_to_id( vs['virtual_server_path']) stats['request_errors'] = 0 # currently unsupported for stat in lb_const.LB_STATS_MAP: lb_stat = lb_const.LB_STATS_MAP[stat] stats[stat] += vs_stats[lb_stat] stat_list.append(stats) except nsxlib_exc.ManagerError: pass return stat_list ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/implementation/loadbalancer_mgr.py0000644000175000017500000002726100000000000032307 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils as p_utils from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsx.services.lbaas.octavia import constants as oct_const from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3.policy import utils as lib_p_utils from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeLoadBalancerManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): def _get_lb_router(self, context, lb): router_id = lb_utils.get_router_from_network( context, self.core_plugin, lb['vip_subnet_id']) return router_id def create(self, context, lb, completor): lb_id = lb['id'] network = lb_utils.get_network_from_subnet( context, self.core_plugin, lb['vip_subnet_id']) router_id = self._get_lb_router(context, lb) if not router_id and network and not network.get('router:external'): completor(success=False) msg = (_('Cannot create a loadbalancer %(lb_id)s on subnet. ' '%(subnet)s is neither public nor connected to the LB ' 'router') % {'lb_id': lb_id, 'subnet': lb['vip_subnet_id']}) raise n_exc.BadRequest(resource='lbaas-subnet', msg=msg) if router_id: # Validate that there is no other LB on this router # as NSX does not allow it if self.core_plugin.service_router_has_loadbalancers( context.elevated(), router_id): completor(success=False) msg = (_('Cannot create a loadbalancer %(lb_id)s on router ' '%(router)s, as it already has a loadbalancer') % {'lb_id': lb_id, 'router': router_id}) raise n_exc.BadRequest(resource='lbaas-router', msg=msg) # Create the service router if it does not exist if not self.core_plugin.service_router_has_services( context.elevated(), router_id): self.core_plugin.create_service_router(context, router_id) lb_name = utils.get_name_and_uuid(lb['name'] or 'lb', lb_id) tags = p_utils.get_tags(self.core_plugin, router_id if router_id else '', lb_const.LR_ROUTER_TYPE, lb['tenant_id'], context.project_name) lb_size = lb_utils.get_lb_flavor_size(self.flavor_plugin, context, lb.get('flavor_id')) service_client = self.core_plugin.nsxpolicy.load_balancer.lb_service try: if network and network.get('router:external'): connectivity_path = None else: connectivity_path = self.core_plugin.nsxpolicy.tier1.get_path( router_id) service_client.create_or_overwrite( lb_name, lb_service_id=lb['id'], description=lb['description'], tags=tags, size=lb_size, connectivity_path=connectivity_path) # Add rule to advertise external vips if router_id: p_utils.update_router_lb_vip_advertisement( context, self.core_plugin, router_id) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create loadbalancer %(lb)s for lb with ' 'exception %(e)s', {'lb': lb['id'], 'e': e}) # Make sure the vip port is marked with a device owner port = self.core_plugin.get_port( context.elevated(), lb['vip_port_id']) if not port.get('device_owner'): self.core_plugin.update_port( context.elevated(), lb['vip_port_id'], {'port': {'device_id': oct_const.DEVICE_ID_PREFIX + lb['id'], 'device_owner': lb_const.VMWARE_LB_VIP_OWNER}}) completor(success=True) def update(self, context, old_lb, new_lb, completor): completor(success=True) def delete(self, context, lb, completor): router_id = None try: router_id = lb_utils.get_router_from_network( context, self.core_plugin, lb['vip_subnet_id']) except n_exc.SubnetNotFound: LOG.warning("VIP subnet %s not found while deleting " "loadbalancer %s", lb['vip_subnet_id'], lb['id']) service_client = self.core_plugin.nsxpolicy.load_balancer.lb_service if not router_id: # Try to get it from the service try: service = service_client.get(lb['id']) except nsxlib_exc.ResourceNotFound: pass else: router_id = lib_p_utils.path_to_id( service.get('connectivity_path', '')) try: service_client.delete(lb['id']) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to delete loadbalancer %(lb)s for lb ' 'with error %(err)s', {'lb': lb['id'], 'err': e}) # if no router for vip - should check the member router if router_id: try: if not self.core_plugin.service_router_has_services( context.elevated(), router_id): self.core_plugin.delete_service_router(router_id) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to delete service router upon deletion ' 'of loadbalancer %(lb)s with error %(err)s', {'lb': lb['id'], 'err': e}) # Make sure the vip port is not marked with a vmware device owner try: port = self.core_plugin.get_port( context.elevated(), lb['vip_port_id']) if port.get('device_owner') == lb_const.VMWARE_LB_VIP_OWNER: self.core_plugin.update_port( context.elevated(), lb['vip_port_id'], {'port': {'device_id': '', 'device_owner': ''}}) except n_exc.PortNotFound: # Only log the error and continue anyway LOG.warning("VIP port %s not found while deleting loadbalancer %s", lb['vip_port_id'], lb['id']) except Exception as e: # Just log the error as all other resources were deleted LOG.error("Failed to update neutron port %s devices upon " "loadbalancer deletion: %s", lb['vip_port_id'], e) completor(success=True) def delete_cascade(self, context, lb, completor): """Delete all backend and DB resources of this loadbalancer""" self.delete(context, lb, completor) def refresh(self, context, lb): # TODO(kobis): implement pass def _get_lb_virtual_servers(self, context, lb): # Get all virtual servers that belong to this loadbalancer vs_list = [vs['id'] for vs in lb['listeners']] return vs_list def stats(self, context, lb): # Since multiple LBaaS loadbalancer can share the same LB service, # get the corresponding virtual servers' stats instead of LB service. stats = {'active_connections': 0, 'bytes_in': 0, 'bytes_out': 0, 'total_connections': 0} service_client = self.core_plugin.nsxpolicy.load_balancer.lb_service vs_list = self._get_lb_virtual_servers(context, lb) try: rsp = service_client.get_statistics(lb['id']) for result in rsp.get('results', []): for vs in result.get('virtual_servers', []): # Skip the virtual server that doesn't belong # to this loadbalancer vs_id = lib_p_utils.path_to_id(vs['virtual_server_path']) if vs_id not in vs_list: continue vs_stats = vs.get('statistics', {}) for stat in lb_const.LB_STATS_MAP: lb_stat = lb_const.LB_STATS_MAP[stat] stats[stat] += vs_stats.get(lb_stat, 0) except nsxlib_exc.ManagerError: msg = _('Failed to retrieve stats from LB service ' 'for loadbalancer %(lb)s') % {'lb': lb['id']} raise n_exc.BadRequest(resource='lbaas-lb', msg=msg) return stats def get_operating_status(self, context, id, with_members=False): service_client = self.core_plugin.nsxpolicy.load_balancer.lb_service try: service_status = service_client.get_status(id) if not isinstance(service_status, dict): service_status = {} except nsxlib_exc.ManagerError: LOG.warning("LB service %(lbs)s is not found", {'lbs': id}) return {} # get the loadbalancer status from the LB service lb_status = lb_const.ONLINE lb_status_results = service_status.get('results') if lb_status_results: result = lb_status_results[0] if result.get('service_status'): # Use backend service_status lb_status = self._nsx_status_to_lb_status( result['service_status']) elif result.get('alarm'): # No status, but has alarms -> ERROR lb_status = lb_const.OFFLINE else: # Unknown - assume it is ok lb_status = lb_const.ONLINE statuses = {lb_const.LOADBALANCERS: [{'id': id, 'status': lb_status}], lb_const.LISTENERS: [], lb_const.POOLS: [], lb_const.MEMBERS: []} # TODO(asarfaty): Go over all VS of this loadbalancer by tags # to add the listeners statuses from the virtual servers statuses return statuses def _nsx_status_to_lb_status(self, nsx_status): if not nsx_status: # default fallback return lb_const.ONLINE # Statuses that are considered ONLINE: if nsx_status.upper() in ['UP', 'UNKNOWN', 'PARTIALLY_UP', 'NO_STANDBY']: return lb_const.ONLINE # Statuses that are considered OFFLINE: if nsx_status.upper() in ['PRIMARY_DOWN', 'DETACHED', 'DOWN', 'ERROR']: return lb_const.OFFLINE if nsx_status.upper() == 'DISABLED': return lb_const.DISABLED # default fallback LOG.debug("NSX LB status %s - interpreted as ONLINE", nsx_status) return lb_const.ONLINE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/implementation/member_mgr.py0000644000175000017500000002040500000000000031140 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils as p_utils from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils LOG = logging.getLogger(__name__) def _translate_member_state(state): return lb_const.ENABLED if state else lb_const.DISABLED class EdgeMemberManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): def _get_info_from_fip(self, context, fip): filters = {'floating_ip_address': [fip]} floating_ips = self.core_plugin.get_floatingips(context, filters=filters) if floating_ips: return floating_ips[0]['fixed_ip_address'] else: msg = (_('Member IP %(fip)s is an external IP, and is expected to ' 'be a floating IP') % {'fip': fip}) raise n_exc.BadRequest(resource='lbaas-vip', msg=msg) def _validate_member_lb_connectivity(self, context, member, completor): lb = member['pool'].get('loadbalancer') if not lb: msg = (_('Member %s loadbalancer object is missing') % member['id']) raise n_exc.BadRequest(resource='lbaas-vip', msg=msg) subnet_id = lb.get('vip_subnet_id') network = lb_utils.get_network_from_subnet( context, self.core_plugin, subnet_id) if network and not network.get('router:external'): return # If VIP is attached to an external network, loadbalancer_mgr might not # attach it to a router. If not, set the LB service connectivity path # to the member subnet's router. service_client = self.core_plugin.nsxpolicy.load_balancer.lb_service service = service_client.get(lb['id']) if not service.get('connectivity_path'): router_id = lb_utils.get_router_from_network( context, self.core_plugin, member['subnet_id']) # Validate that there is no other LB on this router # as NSX does not allow it if self.core_plugin.service_router_has_loadbalancers( context.elevated(), router_id): completor(success=False) msg = (_('Cannot attach a loadbalancer %(lb_id)s on router ' '%(router)s, as it already has a loadbalancer') % {'lb_id': lb['id'], 'router': router_id}) raise n_exc.BadRequest(resource='lbaas-router', msg=msg) if not self.core_plugin.service_router_has_services(context, router_id): self.core_plugin.create_service_router(context, router_id) connectivity_path = self.core_plugin.nsxpolicy.tier1.get_path( router_id) tags = p_utils.get_tags(self.core_plugin, router_id, lb_const.LR_ROUTER_TYPE, lb.get('tenant_id'), context.project_name) try: service_client.update(lb['id'], tags=tags, connectivity_path=connectivity_path) p_utils.update_router_lb_vip_advertisement( context, self.core_plugin, router_id) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to set connectivity for loadbalancer ' '%(lb)s on subnet %(sub)s with error %(err)s', {'lb': lb['id'], 'sub': member['subnet_id'], 'err': e}) def create(self, context, member, completor): pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool self._validate_member_lb_connectivity(context, member, completor) if member.get('subnet_id'): network = lb_utils.get_network_from_subnet( context, self.core_plugin, member['subnet_id']) else: network = None if network and network.get('router:external'): fixed_ip = self._get_info_from_fip(context, member['address']) else: fixed_ip = member['address'] pool_id = member['pool']['id'] try: pool_client.create_pool_member_and_add_to_pool( pool_id, fixed_ip, port=member['protocol_port'], display_name=member['name'][:218] + '_' + member['id'], weight=member['weight'], backup_member=member.get('backup', False), admin_state=_translate_member_state(member['admin_state_up'])) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create member %(member)s on pool %(pool)s' ': %(err)s', {'member': member['id'], 'pool': pool_id, 'err': e}) completor(success=True) def update(self, context, old_member, new_member, completor): network = lb_utils.get_network_from_subnet( context, self.core_plugin, new_member['subnet_id']) if network and network.get('router:external'): fixed_ip = self._get_info_from_fip(context, new_member['address']) else: fixed_ip = new_member['address'] pool_id = old_member['pool']['id'] pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool try: pool_client.update_pool_member( pool_id, fixed_ip, port=new_member['protocol_port'], display_name=new_member['name'][:219] + '_' + new_member['id'], weight=new_member['weight'], backup_member=new_member.get('backup', False), admin_state=_translate_member_state( new_member['admin_state_up'])) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update member %(member)s on pool %(pool)s' ': %(err)s', {'member': new_member['id'], 'pool': pool_id, 'err': e}) completor(success=True) def delete(self, context, member, completor): network = lb_utils.get_network_from_subnet( context, self.core_plugin, member['subnet_id']) if network and network.get('router:external'): fixed_ip = self._get_info_from_fip(context, member['address']) else: fixed_ip = member['address'] pool_id = member['pool']['id'] pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool try: pool_client.remove_pool_member( pool_id, fixed_ip, port=member['protocol_port']) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create member %(member)s on pool %(pool)s' ': %(err)s', {'member': member['id'], 'pool': pool_id, 'err': e}) completor(success=True) def delete_cascade(self, context, member, completor): # No action should be taken on members delete cascade pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_p/implementation/pool_mgr.py0000644000175000017500000002522700000000000030651 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_common from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgePoolManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): def _get_pool_kwargs(self, name=None, tags=None, algorithm=None, description=None): kwargs = { 'snat_translation': {'type': "LBSnatAutoMap"}} if name: kwargs['name'] = name if tags: kwargs['tags'] = tags if algorithm: kwargs['algorithm'] = algorithm if description: kwargs['description'] = description return kwargs def _get_pool_tags(self, context, pool): return lb_utils.get_tags(self.core_plugin, pool['id'], lb_const.LB_POOL_TYPE, pool['tenant_id'], context.project_name) def _remove_persistence(self, vs_data): lb_utils.delete_persistence_profile( self.core_plugin.nsxpolicy, vs_data.get('lb_persistence_profile_path', '')) def _process_vs_update(self, context, pool, pool_id, switch_type, listener, completor): vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server try: # Process pool persistence profile and # create/update/delete profile for virtual server vs_data = vs_client.get(listener['id']) if pool and pool_id: (persistence_profile_id, post_process_func) = lb_utils.setup_session_persistence( self.core_plugin.nsxpolicy, pool, self._get_pool_tags(context, pool), switch_type, listener, vs_data) else: post_process_func = functools.partial( self._remove_persistence, vs_data) persistence_profile_id = None except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error("Failed to configure session persistence " "profile for pool %(pool_id)s", {'pool_id': pool['id']}) try: # Update persistence profile and pool on virtual server vs_client.update( listener['id'], pool_id=pool_id, lb_persistence_profile_id=persistence_profile_id) LOG.debug("Updated NSX virtual server %(vs_id)s with " "pool %(pool_id)s and persistence profile %(prof)s", {'vs_id': listener['id'], 'pool_id': pool['id'], 'prof': persistence_profile_id}) if post_process_func: post_process_func() except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to attach pool %s to virtual ' 'server %s', pool['id'], listener['id']) def create(self, context, pool, completor): pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool pool_name = utils.get_name_and_uuid(pool['name'] or 'pool', pool['id']) tags = self._get_pool_tags(context, pool) description = pool.get('description') lb_algorithm = lb_const.LB_POOL_ALGORITHM_MAP.get(pool['lb_algorithm']) if pool.get('listeners') and len(pool['listeners']) > 1: completor(success=False) msg = (_('Failed to create pool: Multiple listeners are not ' 'supported.')) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) # NOTE(salv-orlando): Guard against accidental compat breakages try: listener = pool['listener'] or pool['listeners'][0] except IndexError: # If listeners is an empty list we hit this exception listener = None # Perform additional validation for session persistence before # creating resources in the backend lb_common.validate_session_persistence(pool, listener, completor) try: kwargs = self._get_pool_kwargs( pool_name, tags, lb_algorithm, description) pool_client.create_or_overwrite(lb_pool_id=pool['id'], **kwargs) except nsxlib_exc.ManagerError: completor(success=False) msg = (_('Failed to create pool on NSX backend: %(pool)s') % {'pool': pool['id']}) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) # The pool object can be created with either --listener or # --loadbalancer option. If listener is present, the virtual server # will be updated with the pool. Otherwise, just return. The binding # will be added later when the pool is associated with layer7 rule. # FIXME(salv-orlando): This two-step process can leave a zombie pool on # NSX if the VS update operation fails if listener: self._process_vs_update(context, pool, pool['id'], False, listener, completor) completor(success=True) def update(self, context, old_pool, new_pool, completor): pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool pool_name = None tags = None lb_algorithm = None description = None if new_pool.get('listeners') and len(new_pool['listeners']) > 1: completor(success=False) msg = (_('Failed to update pool %s: Multiple listeners are not ' 'supported.') % new_pool['id']) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) if new_pool['name'] != old_pool['name']: pool_name = utils.get_name_and_uuid(new_pool['name'] or 'pool', new_pool['id']) tags = self._get_pool_tags(context, new_pool) if new_pool['lb_algorithm'] != old_pool['lb_algorithm']: lb_algorithm = lb_const.LB_POOL_ALGORITHM_MAP.get( new_pool['lb_algorithm']) if new_pool.get('description') != old_pool.get('description'): description = new_pool['description'] # NOTE(salv-orlando): Guard against accidental compat breakages try: listener = new_pool['listener'] or new_pool['listeners'][0] except IndexError: # If listeners is an empty list we hit this exception listener = None # Perform additional validation for session persistence before # operating on resources in the backend lb_common.validate_session_persistence(new_pool, listener, completor) try: kwargs = self._get_pool_kwargs(pool_name, tags, lb_algorithm, description) pool_client.update(lb_pool_id=new_pool['id'], **kwargs) if (listener and new_pool['session_persistence'] != old_pool['session_persistence']): switch_type = lb_common.session_persistence_type_changed( new_pool, old_pool) self._process_vs_update(context, new_pool, new_pool['id'], switch_type, listener, completor) completor(success=True) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update pool %(pool)s with ' 'error %(error)s', {'pool': old_pool['id'], 'error': e}) def delete(self, context, pool, completor): pool_client = self.core_plugin.nsxpolicy.load_balancer.lb_pool # NOTE(salv-orlando): Guard against accidental compat breakages try: listener = pool.get('listener') or pool.get('listeners', [])[0] except IndexError: # If listeners is an empty list we hit this exception listener = None if listener: try: self._process_vs_update( context, pool, None, False, listener, completor) except Exception as e: LOG.error('Disassociation of listener %(lsn)s from pool ' '%(pool)s failed with error %(err)s', {'lsn': listener['id'], 'pool': pool['id'], 'err': e}) try: pool_client.delete(pool['id']) except nsxlib_exc.ResourceNotFound: pass except nsxlib_exc.ManagerError: completor(success=False) msg = (_('Failed to delete lb pool from nsx: %(pool)s') % {'pool': pool['id']}) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) # Delete the attached health monitor as well if pool.get('healthmonitor'): hm = pool['healthmonitor'] monitor_client = lb_utils.get_monitor_policy_client( self.core_plugin.nsxpolicy.load_balancer, hm) try: monitor_client.delete(hm['id']) except nsxlib_exc.ResourceNotFound: pass except nsxlib_exc.ManagerError as exc: completor(success=False) msg = _('Failed to delete monitor %(monitor)s from backend ' 'with exception %(exc)s') % {'monitor': hm['id'], 'exc': exc} raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) completor(success=True) def delete_cascade(self, context, pool, completor): self.delete(context, pool, completor) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2142541 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/0000755000175000017500000000000000000000000023432 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/__init__.py0000644000175000017500000000000000000000000025531 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2182543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/implementation/0000755000175000017500000000000000000000000026457 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/implementation/__init__.py0000644000175000017500000000000000000000000030556 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/implementation/healthmon_mgr.py0000644000175000017500000001675600000000000031674 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from neutron_lib import exceptions as n_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common LOG = logging.getLogger(__name__) class EdgeHealthMonitorManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager): def _convert_lbaas_monitor(self, hm): """ Transform OpenStack health monitor dict to NSXv health monitor dict. """ mon = { 'type': lb_const.HEALTH_MONITOR_MAP.get(hm['type'], 'icmp'), 'interval': hm['delay'], 'timeout': hm['timeout'], 'maxRetries': hm['max_retries'], 'name': hm['id']} if hm['http_method']: mon['method'] = hm['http_method'] if hm['url_path']: mon['url'] = hm['url_path'] if hm['expected_codes']: mon['expected'] = hm['expected_codes'] return mon @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeHealthMonitorManagerFromDict, self).__init__(vcns_driver) def create(self, context, hm, completor): lb_id = hm['pool']['loadbalancer_id'] lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] pool_id = hm['pool']['id'] pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, pool_id) if not pool_binding: completor(success=False) msg = _('Failed to create health monitor on edge: %s. ' 'Binding not found') % edge_id LOG.error(msg) raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) edge_pool_id = pool_binding['edge_pool_id'] hm_binding = nsxv_db.get_nsxv_lbaas_monitor_binding( context.session, lb_id, pool_id, hm['id'], edge_id) edge_mon_id = None if hm_binding: edge_mon_id = hm_binding['edge_mon_id'] else: edge_monitor = self._convert_lbaas_monitor(hm) try: with locking.LockManager.get_lock(edge_id): h = self.vcns.create_health_monitor(edge_id, edge_monitor)[0] edge_mon_id = lb_common.extract_resource_id(h['location']) nsxv_db.add_nsxv_lbaas_monitor_binding( context.session, lb_id, pool_id, hm['id'], edge_id, edge_mon_id) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create health monitor on edge: %s', edge_id) try: # Associate monitor with Edge pool with locking.LockManager.get_lock(edge_id): edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] if edge_pool.get('monitorId'): edge_pool['monitorId'].append(edge_mon_id) else: edge_pool['monitorId'] = [edge_mon_id] self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create health monitor on edge: %s', edge_id) completor(success=True) def update(self, context, old_hm, new_hm, completor): lb_id = new_hm['pool']['loadbalancer_id'] lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] hm_binding = nsxv_db.get_nsxv_lbaas_monitor_binding( context.session, lb_id, new_hm['pool']['id'], new_hm['id'], edge_id) edge_monitor = self._convert_lbaas_monitor(new_hm) try: with locking.LockManager.get_lock(edge_id): self.vcns.update_health_monitor(edge_id, hm_binding['edge_mon_id'], edge_monitor) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update monitor on edge: %s', edge_id) completor(success=True) def delete(self, context, hm, completor): pool_id = hm['pool']['id'] lb_id = hm['pool']['loadbalancer_id'] lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, pool_id) if not pool_binding: nsxv_db.del_nsxv_lbaas_monitor_binding( context.session, lb_id, pool_id, hm['id'], edge_id) completor(success=True) return edge_pool_id = pool_binding['edge_pool_id'] hm_binding = nsxv_db.get_nsxv_lbaas_monitor_binding( context.session, lb_id, pool_id, hm['id'], edge_id) edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] if hm_binding and hm_binding['edge_mon_id'] in edge_pool['monitorId']: edge_pool['monitorId'].remove(hm_binding['edge_mon_id']) try: with locking.LockManager.get_lock(edge_id): self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to delete monitor mapping on edge: %s', edge_id) # If this monitor is not used on this edge anymore, delete it if hm_binding and not edge_pool['monitorId']: try: with locking.LockManager.get_lock(edge_id): self.vcns.delete_health_monitor(hm_binding['edge_id'], hm_binding['edge_mon_id']) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to delete monitor on edge: %s', edge_id) nsxv_db.del_nsxv_lbaas_monitor_binding( context.session, lb_id, pool_id, hm['id'], edge_id) completor(success=True) def delete_cascade(self, context, hm, completor): self.delete(context, hm, completor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/implementation/l7policy_mgr.py0000644000175000017500000003402500000000000031444 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from neutron_lib import constants from neutron_lib import exceptions as n_exc from vmware_nsx._i18n import _ from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common LOG = logging.getLogger(__name__) type_by_compare_type = { lb_const.L7_RULE_COMPARE_TYPE_EQUAL_TO: '', lb_const.L7_RULE_COMPARE_TYPE_REGEX: '_reg', lb_const.L7_RULE_COMPARE_TYPE_STARTS_WITH: '_beg', lb_const.L7_RULE_COMPARE_TYPE_ENDS_WITH: '_end', lb_const.L7_RULE_COMPARE_TYPE_CONTAINS: '_sub' } def policy_to_application_rule(policy): condition = '' rule_lines = [] for rule in policy['rules']: if rule['provisioning_status'] == constants.PENDING_DELETE: # skip this rule as it is being deleted continue type_by_comp = type_by_compare_type.get(rule['compare_type']) if type_by_comp is None: type_by_comp = '' LOG.warnning('Unsupported compare type %(type)s is used in ' 'policy %(id)s', {'type': rule['compare_type'], 'id': policy['id']}) if rule['type'] == lb_const.L7_RULE_TYPE_COOKIE: # Example: acl hdr_sub(cookie) SEEN=1 hdr_type = 'hdr' + type_by_comp rule_line = ('acl %(rule_id)s %(hdr_type)s(cookie) ' '%(key)s=%(val)s' % {'rule_id': rule['id'], 'hdr_type': hdr_type, 'key': rule['key'], 'val': rule['value']}) elif rule['type'] == lb_const.L7_RULE_TYPE_HEADER: # Example: acl hdr(user-agent) -i test hdr_type = 'hdr' + type_by_comp rule_line = ('acl %(rule_id)s %(hdr_type)s(%(key)s) ' '-i %(val)s' % {'rule_id': rule['id'], 'hdr_type': hdr_type, 'key': rule['key'], 'val': rule['value']}) elif rule['type'] == lb_const.L7_RULE_TYPE_HOST_NAME: # Example: acl hdr_beg(host) -i abcd hdr_type = 'hdr' + type_by_comp # -i for case insensitive host name rule_line = ('acl %(rule_id)s %(hdr_type)s(host) ' '-i %(val)s' % {'rule_id': rule['id'], 'hdr_type': hdr_type, 'val': rule['value']}) elif rule['type'] == lb_const.L7_RULE_TYPE_PATH: # Example: acl path_beg -i /images # -i for case insensitive path path_type = 'path' + type_by_comp rule_line = ('acl %(rule_id)s %(path_type)s ' '-i %(val)s' % {'rule_id': rule['id'], 'path_type': path_type, 'val': rule['value']}) elif rule['type'] == lb_const.L7_RULE_TYPE_FILE_TYPE: # Example: acl path_sub -i .jpg # Regardless of the compare type, always check contained in path. # -i for case insensitive file type val = rule['value'] if not val.startswith('.'): val = '.' + val rule_line = ('acl %(rule_id)s path_sub ' '-i %(val)s' % {'rule_id': rule['id'], 'val': val}) else: msg = _('Unsupported L7rule type %s') % rule['type'] raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) rule_lines.append(rule_line) invert_sign = '!' if rule['invert'] else '' condition = condition + invert_sign + rule['id'] + ' ' if rule_lines: # concatenate all the rules with new lines all_rules = '\n'.join(rule_lines + ['']) # remove he last space from the condition condition = condition[:-1] else: all_rules = '' condition = 'TRUE' # prepare the action if policy['action'] == lb_const.L7_POLICY_ACTION_REJECT: # return HTTP 403 response action = 'http-request deny' elif policy['action'] == lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL: action = 'use_backend pool_%s' % policy['redirect_pool_id'] elif policy['action'] == lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL: action = 'redirect location %s' % policy['redirect_url'] else: msg = _('Unsupported L7policy action %s') % policy['action'] raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) # Build the final script script = all_rules + '%(action)s if %(cond)s' % { 'action': action, 'cond': condition} app_rule = {'name': 'pol_' + policy['id'], 'script': script} return app_rule def policy_to_edge_and_rule_id(context, policy_id): # get the nsx application rule id and edge id binding = nsxv_db.get_nsxv_lbaas_l7policy_binding( context.session, policy_id) if not binding: msg = _('No suitable Edge found for policy %s') % policy_id raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) return binding['edge_id'], binding['edge_app_rule_id'] class EdgeL7PolicyManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeL7PolicyManagerFromDict, self).__init__(vcns_driver) def _add_app_rule_to_virtual_server(self, edge_id, vse_id, app_rule_id, policy_position): """Add the new nsx application rule to the virtual server""" # Get the current virtual server configuration vse = self.vcns.get_vip(edge_id, vse_id)[1] if 'applicationRuleId' not in vse: vse['applicationRuleId'] = [] # Add the policy (=application rule) in the correct position # (position begins at 1) if len(vse['applicationRuleId']) < policy_position: vse['applicationRuleId'].append(app_rule_id) else: vse['applicationRuleId'].insert(policy_position - 1, app_rule_id) # update the backend with the new configuration self.vcns.update_vip(edge_id, vse_id, vse) def _del_app_rule_from_virtual_server(self, edge_id, vse_id, app_rule_id): """Delete nsx application rule from the virtual server""" # Get the current virtual server configuration vse = self.vcns.get_vip(edge_id, vse_id)[1] if 'applicationRuleId' not in vse: vse['applicationRuleId'] = [] # Remove the rule from the list if (app_rule_id in vse['applicationRuleId']): vse['applicationRuleId'].remove(app_rule_id) # update the backend with the new configuration self.vcns.update_vip(edge_id, vse_id, vse) def _update_app_rule_possition_in_virtual_server(self, edge_id, vse_id, app_rule_id, policy_position): """Move the new nsx application rule to another position""" # Get the current virtual server configuration vse = self.vcns.get_vip(edge_id, vse_id)[1] # delete the policy (= application rule) from the list if app_rule_id in vse['applicationRuleId']: vse['applicationRuleId'].remove(app_rule_id) # Add the policy (=application rule) in the correct position # (position begins at 1) if len(vse['applicationRuleId']) < policy_position: vse['applicationRuleId'].append(app_rule_id) else: vse['applicationRuleId'].insert(policy_position - 1, app_rule_id) # update the backend with the new configuration self.vcns.update_vip(edge_id, vse_id, vse) def _get_vse_id(self, context, pol): lb_id = pol['listener']['loadbalancer_id'] list_id = pol['listener']['id'] listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, list_id) if listener_binding: return listener_binding['vse_id'] def create(self, context, pol, completor): # find out the edge to be updated, by the listener of this policy listener = pol['listener'] lb_id = listener['loadbalancer_id'] lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) if not lb_binding: msg = _( 'No suitable Edge found for listener %s') % listener['id'] raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) if (listener['protocol'] == lb_const.LB_PROTOCOL_HTTPS or listener['protocol'] == lb_const.LB_PROTOCOL_TERMINATED_HTTPS): msg = _( 'L7 policy is not supported for %(prot)s listener %(ls)s') % { 'prot': listener['protocol'], 'ls': pol['listener_id']} raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) edge_id = lb_binding['edge_id'] app_rule = policy_to_application_rule(pol) app_rule_id = None try: with locking.LockManager.get_lock(edge_id): # create the backend application rule for this policy h = (self.vcns.create_app_rule(edge_id, app_rule))[0] app_rule_id = lb_common.extract_resource_id(h['location']) # add the nsx application rule (neutron policy) to the nsx # virtual server (neutron listener) vse_id = self._get_vse_id(context, pol) if vse_id: self._add_app_rule_to_virtual_server( edge_id, vse_id, app_rule_id, pol['position']) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create L7policy on edge %(edge)s: ' '%(err)s', {'edge': edge_id, 'err': e}) if app_rule_id: # Failed to add the rule to the vip: delete the rule # from the backend. try: self.vcns.delete_app_rule(edge_id, app_rule_id) except Exception: pass # save the nsx application rule id in the DB nsxv_db.add_nsxv_lbaas_l7policy_binding(context.session, pol['id'], edge_id, app_rule_id) # complete the transaction completor(success=True) def update(self, context, old_pol, new_pol, completor): # get the nsx application rule id and edge id from the nsx DB edge_id, app_rule_id = policy_to_edge_and_rule_id( context, new_pol['id']) # create the script for the new policy data app_rule = policy_to_application_rule(new_pol) try: with locking.LockManager.get_lock(edge_id): # update the backend application rule for the new policy self.vcns.update_app_rule(edge_id, app_rule_id, app_rule) # if the position changed - update it too if old_pol['position'] != new_pol['position']: vse_id = self._get_vse_id(context, new_pol) if vse_id: self._update_app_rule_possition_in_virtual_server( edge_id, vse_id, app_rule_id, new_pol['position']) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update L7policy on edge %(edge)s: ' '%(err)s', {'edge': edge_id, 'err': e}) # complete the transaction completor(success=True) def delete(self, context, pol, completor): # get the nsx application rule id and edge id from the nsx DB try: edge_id, app_rule_id = policy_to_edge_and_rule_id( context, pol['id']) except n_exc.BadRequest: # This is probably a policy that we failed to create properly. # We should allow deleting it completor(success=True) return with locking.LockManager.get_lock(edge_id): try: # remove the nsx application rule from the virtual server vse_id = self._get_vse_id(context, pol) if vse_id: self._del_app_rule_from_virtual_server( edge_id, vse_id, app_rule_id) # delete the nsx application rule self.vcns.delete_app_rule(edge_id, app_rule_id) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to delete L7policy on edge ' '%(edge)s: %(err)s', {'edge': edge_id, 'err': e}) # delete the nsxv db entry nsxv_db.del_nsxv_lbaas_l7policy_binding(context.session, pol['id']) # complete the transaction completor(success=True) def delete_cascade(self, context, policy, completor): self.delete(context, policy, completor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/implementation/l7rule_mgr.py0000644000175000017500000000533500000000000031116 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx.common import locking from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import l7policy_mgr LOG = logging.getLogger(__name__) class EdgeL7RuleManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeL7RuleManagerFromDict, self).__init__(vcns_driver) def _handle_l7policy_rules_change(self, context, rule, completor, delete=False): # Get the nsx application rule id and edge id edge_id, app_rule_id = l7policy_mgr.policy_to_edge_and_rule_id( context, rule['l7policy_id']) # Create the script for the new policy data. # The policy obj on the rule is already updated with the # created/updated/deleted rule. app_rule = l7policy_mgr.policy_to_application_rule(rule['policy']) try: with locking.LockManager.get_lock(edge_id): # update the backend application rule for the updated policy self.vcns.update_app_rule(edge_id, app_rule_id, app_rule) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update L7rules on edge %(edge)s: ' '%(err)s', {'edge': edge_id, 'err': e}) # complete the transaction completor(success=True) def create(self, context, rule, completor): self._handle_l7policy_rules_change(context, rule, completor) def update(self, context, old_rule, new_rule, completor): self._handle_l7policy_rules_change(context, new_rule, completor) def delete(self, context, rule, completor): self._handle_l7policy_rules_change(context, rule, completor, delete=True) def delete_cascade(self, context, rule, completor): self.delete(context, rule, completor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/implementation/listener_mgr.py0000644000175000017500000003375100000000000031534 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common LOG = logging.getLogger(__name__) def listener_to_edge_app_profile(listener, edge_cert_id): edge_app_profile = { 'insertXForwardedFor': False, 'name': listener['id'], 'serverSslEnabled': False, 'sslPassthrough': False, 'template': lb_const.PROTOCOL_MAP[listener['protocol']], } if (listener['protocol'] == lb_const.LB_PROTOCOL_HTTPS or listener['protocol'] == lb_const.LB_PROTOCOL_TERMINATED_HTTPS): if edge_cert_id: edge_app_profile['clientSsl'] = { 'caCertificate': [], 'clientAuth': 'ignore', 'crlCertificate': [], 'serviceCertificate': [edge_cert_id]} else: edge_app_profile['sslPassthrough'] = True if (listener.get('default_pool') and listener['default_pool'].get('session_persistence')): pool_sess_persist = listener['default_pool']['session_persistence'] sess_persist_type = pool_sess_persist['type'] persistence = { 'method': lb_const.SESSION_PERSISTENCE_METHOD_MAP.get( sess_persist_type)} if (sess_persist_type in lb_const.SESSION_PERSISTENCE_COOKIE_MAP): cookie_name = pool_sess_persist.get('cookie_name', None) if cookie_name is None: cookie_name = lb_const.SESSION_PERSISTENCE_DEFAULT_COOKIE_NAME persistence.update({ 'cookieName': cookie_name, 'cookieMode': lb_const.SESSION_PERSISTENCE_COOKIE_MAP[ sess_persist_type]}) edge_app_profile['persistence'] = persistence return edge_app_profile def listener_to_edge_vse(context, listener, vip_address, default_pool, app_profile_id): if listener['connection_limit']: connection_limit = max(0, listener['connection_limit']) else: connection_limit = 0 vse = { 'name': 'vip_' + listener['id'], 'description': listener['description'], 'ipAddress': vip_address, 'protocol': lb_const.PROTOCOL_MAP[listener['protocol']], 'port': listener['protocol_port'], 'connectionLimit': connection_limit, 'defaultPoolId': default_pool, 'accelerationEnabled': ( listener['protocol'] == lb_const.LB_PROTOCOL_TCP), 'applicationProfileId': app_profile_id, 'enabled': listener['admin_state_up']} # Add the L7 policies if listener['l7_policies']: app_rule_ids = [] for pol in listener['l7_policies']: binding = nsxv_db.get_nsxv_lbaas_l7policy_binding( context.session, pol['id']) if binding: app_rule_ids.append(binding['edge_app_rule_id']) vse['applicationRuleId'] = app_rule_ids return vse def update_app_profile(vcns, context, listener, edge_id, edge_cert_id=None): lb_id = listener['loadbalancer_id'] listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, listener['id']) app_profile_id = listener_binding['app_profile_id'] app_profile = listener_to_edge_app_profile(listener, edge_cert_id) with locking.LockManager.get_lock(edge_id): vcns.update_app_profile( edge_id, app_profile_id, app_profile) return app_profile_id class EdgeListenerManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeListenerManagerFromDict, self).__init__(vcns_driver) def _upload_certificate(self, context, edge_id, cert_id, certificate): cert_binding = nsxv_db.get_nsxv_lbaas_certificate_binding( context.session, cert_id, edge_id) if cert_binding: return cert_binding['edge_cert_id'] request = { 'pemEncoding': certificate.get_certificate(), 'privateKey': certificate.get_private_key()} passphrase = certificate.get_private_key_passphrase() if passphrase: request['passphrase'] = passphrase cert_obj = self.vcns.upload_edge_certificate(edge_id, request)[1] cert_list = cert_obj.get('certificates', {}) if cert_list: edge_cert_id = cert_list[0]['objectId'] else: error = _("Failed to upload a certificate to edge %s") % edge_id raise nsxv_exc.NsxPluginException(err_msg=error) nsxv_db.add_nsxv_lbaas_certificate_binding( context.session, cert_id, edge_id, edge_cert_id) return edge_cert_id def create(self, context, listener, completor, certificate=None): default_pool = None lb_id = listener['loadbalancer_id'] lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] if listener.get('default_pool') and listener['default_pool'].get('id'): pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, listener['default_pool']['id']) if pool_binding: default_pool = pool_binding['edge_pool_id'] edge_cert_id = None if certificate: try: edge_cert_id = self._upload_certificate( context, edge_id, listener['default_tls_container_id'], certificate) except Exception: with excutils.save_and_reraise_exception(): completor(success=False) app_profile = listener_to_edge_app_profile(listener, edge_cert_id) app_profile_id = None try: with locking.LockManager.get_lock(edge_id): h = (self.vcns.create_app_profile(edge_id, app_profile))[0] app_profile_id = lb_common.extract_resource_id(h['location']) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create app profile on edge: %s', lb_binding['edge_id']) vse = listener_to_edge_vse(context, listener, lb_binding['vip_address'], default_pool, app_profile_id) try: with locking.LockManager.get_lock(edge_id): h = self.vcns.create_vip(edge_id, vse)[0] edge_vse_id = lb_common.extract_resource_id(h['location']) nsxv_db.add_nsxv_lbaas_listener_binding(context.session, lb_id, listener['id'], app_profile_id, edge_vse_id) completor(success=True) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create vip on Edge: %s', edge_id) self.vcns.delete_app_profile(edge_id, app_profile_id) def update(self, context, old_listener, new_listener, completor, certificate=None): default_pool = None if (new_listener.get('default_pool') and new_listener['default_pool'].get('id')): pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, new_listener['loadbalancer_id'], new_listener['default_pool']['id']) if pool_binding: default_pool = pool_binding['edge_pool_id'] else: LOG.error("Couldn't find pool binding for pool %s", new_listener['default_pool']['id']) lb_id = new_listener['loadbalancer_id'] listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, new_listener['id']) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] edge_cert_id = None if certificate: if (old_listener['default_tls_container_id'] != new_listener['default_tls_container_id']): try: edge_cert_id = self._upload_certificate( context, edge_id, new_listener['default_tls_container_id'], certificate) except Exception: with excutils.save_and_reraise_exception(): completor(success=False) else: cert_binding = nsxv_db.get_nsxv_lbaas_certificate_binding( context.session, new_listener['default_tls_container_id'], edge_id) edge_cert_id = cert_binding['edge_cert_id'] try: app_profile_id = update_app_profile( self.vcns, context, new_listener, edge_id, edge_cert_id=edge_cert_id) vse = listener_to_edge_vse(context, new_listener, lb_binding['vip_address'], default_pool, app_profile_id) with locking.LockManager.get_lock(edge_id): self.vcns.update_vip(edge_id, listener_binding['vse_id'], vse) completor(success=True) except vcns_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update app profile on edge: %s', edge_id) def delete(self, context, listener, completor): lb_id = listener['loadbalancer_id'] listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, listener['id']) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) if lb_binding and listener_binding: edge_id = lb_binding['edge_id'] edge_vse_id = listener_binding['vse_id'] app_profile_id = listener_binding['app_profile_id'] try: with locking.LockManager.get_lock(edge_id): self.vcns.delete_vip(edge_id, edge_vse_id) except (vcns_exc.ResourceNotFound, vcns_exc.RequestBad): LOG.error('vip not found on edge: %s', edge_id) except vcns_exc.VcnsApiException: LOG.error('Failed to delete vip on edge: %s', edge_id) try: with locking.LockManager.get_lock(edge_id): self.vcns.delete_app_profile(edge_id, app_profile_id) except (vcns_exc.ResourceNotFound, vcns_exc.RequestBad): LOG.error('app profile not found on edge: %s', edge_id) except vcns_exc.VcnsApiException: LOG.error('Failed to delete app profile on Edge: %s', edge_id) nsxv_db.del_nsxv_lbaas_listener_binding(context.session, lb_id, listener['id']) completor(success=True) def delete_cascade(self, context, listener, completor): self.delete(context, listener, completor) def stats_getter(context, core_plugin, ignore_list=None): """Update Octavia statistics for each listener (virtual server)""" stat_list = [] vcns = core_plugin.nsx_v.vcns # go over all LB edges bindings = nsxv_db.get_nsxv_lbaas_loadbalancer_bindings(context.session) for binding in bindings: lb_id = binding['loadbalancer_id'] if ignore_list and lb_id in ignore_list: continue edge_id = binding['edge_id'] try: lb_stats = vcns.get_loadbalancer_statistics(edge_id) virtual_servers_stats = lb_stats[1].get('virtualServer', []) for vs_stats in virtual_servers_stats: # Get the stats of the virtual server stats = copy.copy(lb_const.LB_EMPTY_STATS) stats['bytes_in'] += vs_stats.get('bytesIn', 0) stats['bytes_out'] += vs_stats.get('bytesOut', 0) stats['active_connections'] += vs_stats.get('curSessions', 0) stats['total_connections'] += vs_stats.get('totalSessions', 0) stats['request_errors'] = 0 # currently unsupported # Find the listener Id vs_id = vs_stats.get('virtualServerId') list_bind = nsxv_db.get_nsxv_lbaas_listener_binding_by_vse( context.session, lb_id, vs_id) if not list_bind: continue stats['id'] = list_bind['listener_id'] stat_list.append(stats) except vcns_exc.VcnsApiException as e: LOG.warning('Failed to read load balancer statistics for %s: %s', edge_id, e) return stat_list ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/implementation/loadbalancer_mgr.py0000644000175000017500000003074400000000000032315 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.flavors import flavors_plugin from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common from vmware_nsx.services.lbaas.octavia import constants as oct_const LOG = logging.getLogger(__name__) class EdgeLoadBalancerManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeLoadBalancerManagerFromDict, self).__init__(vcns_driver) registry.subscribe( self._handle_subnet_gw_change, resources.SUBNET, events.AFTER_UPDATE) def _get_lb_flavor_size(self, context, flavor_id): if not flavor_id: return vcns_const.SERVICE_SIZE_MAPPING['lb'] else: flavor = flavors_plugin.FlavorsPlugin.get_flavor( self.flavor_plugin, context, flavor_id) flavor_size = flavor['name'] if flavor_size.lower() in vcns_const.ALLOWED_EDGE_SIZES: return flavor_size.lower() else: err_msg = (_("Invalid flavor size %(flavor)s, only %(sizes)s " "are supported") % {'flavor': flavor_size, 'sizes': vcns_const.ALLOWED_EDGE_SIZES}) raise n_exc.InvalidInput(error_message=err_msg) def create(self, context, lb, completor): sub_id = lb['vip_subnet_id'] if cfg.CONF.nsxv.use_routers_as_lbaas_platform: edge_id = lb_common.get_lbaas_edge_id_for_subnet( context, self.core_plugin, sub_id, lb['tenant_id']) if not edge_id: msg = _('No suitable Edge found for subnet %s') % sub_id raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) else: lb_size = self._get_lb_flavor_size(context, lb.get('flavor_id')) edge_id = lb_common.get_lbaas_edge_id( context, self.core_plugin, lb['id'], lb['vip_address'], sub_id, lb['tenant_id'], lb_size) if not edge_id: msg = _('Failed to allocate Edge on subnet %(sub)s for ' 'loadbalancer %(lb)s') % {'sub': sub_id, 'lb': lb['id']} raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) try: if cfg.CONF.nsxv.use_routers_as_lbaas_platform: if not nsxv_db.get_nsxv_lbaas_loadbalancer_binding_by_edge( context.session, edge_id): lb_common.enable_edge_acceleration(self.vcns, edge_id) lb_common.add_vip_as_secondary_ip(self.vcns, edge_id, lb['vip_address']) else: lb_common.enable_edge_acceleration(self.vcns, edge_id) edge_fw_rule_id = lb_common.add_vip_fw_rule( self.vcns, edge_id, lb['id'], lb['vip_address']) # set LB default rule if not cfg.CONF.nsxv.use_routers_as_lbaas_platform: lb_common.set_lb_firewall_default_rule(self.vcns, edge_id, 'accept') nsxv_db.add_nsxv_lbaas_loadbalancer_binding( context.session, lb['id'], edge_id, edge_fw_rule_id, lb['vip_address']) completor(success=True) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create loadbalancer %s', lb['id']) def update(self, context, old_lb, new_lb, completor): completor(success=True) def delete(self, context, lb, completor): # Discard any ports which are associated with LB filters = { 'device_id': [lb['id'], oct_const.DEVICE_ID_PREFIX + lb['id']], 'device_owner': [lb_common.LBAAS_DEVICE_OWNER]} lb_ports = self.core_plugin.get_ports(context.elevated(), filters=filters) for lb_port in lb_ports: self.core_plugin.delete_port(context.elevated(), lb_port['id'], allow_delete_lb_if=True) binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb['id']) if binding: edge_binding = nsxv_db.get_nsxv_router_binding_by_edge( context.session, binding['edge_id']) # set LB default rule lb_common.set_lb_firewall_default_rule( self.vcns, binding['edge_id'], 'deny') if edge_binding: old_lb = lb_common.is_lb_on_router_edge( context, self.core_plugin, binding['edge_id']) if not old_lb: resource_id = lb_common.get_lb_resource_id(lb['id']) self.core_plugin.edge_manager.delete_lrouter( context, resource_id, dist=False) else: # Edge was created on an exclusive router with the old code try: lb_common.del_vip_fw_rule( self.vcns, binding['edge_id'], binding['edge_fw_rule_id']) except nsxv_exc.VcnsApiException as e: LOG.error('Failed to delete loadbalancer %(lb)s ' 'FW rule. exception is %(exc)s', {'lb': lb['id'], 'exc': e}) try: lb_common.del_vip_as_secondary_ip(self.vcns, binding['edge_id'], lb['vip_address']) except Exception as e: LOG.error('Failed to delete loadbalancer %(lb)s ' 'interface IP. exception is %(exc)s', {'lb': lb['id'], 'exc': e}) nsxv_db.del_nsxv_lbaas_loadbalancer_binding( context.session, lb['id']) completor(success=True) def delete_cascade(self, context, lb, completor): #TODO(asarfaty): implement a better delete cascade for NSX-V self.delete(context, lb, completor) def refresh(self, context, lb): # TODO(kobis): implement pass def stats(self, context, lb): binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding(context.session, lb['id']) stats = _get_edge_loadbalancer_statistics(self.vcns, binding['edge_id']) return stats def get_operating_status(self, context, id, with_members=False): """Return a map of the operating status of all connected LB objects """ lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, id) if not lb_binding or not lb_binding['edge_id']: return {} edge_id = lb_binding['edge_id'] lb_stats = self.vcns.get_loadbalancer_statistics(edge_id) lb_status = (lb_const.ONLINE if lb_stats is not None else lb_const.OFFLINE) statuses = {lb_const.LOADBALANCERS: [{'id': id, 'status': lb_status}], lb_const.LISTENERS: [], lb_const.POOLS: [], lb_const.MEMBERS: []} for vs in lb_stats[1].get('virtualServer', []): vs_id = vs['name'][4:] vs_status = (lb_const.ONLINE if vs['status'] == 'OPEN' else lb_const.OFFLINE) statuses[lb_const.LISTENERS].append( {'id': vs_id, 'status': vs_status}) for pool in lb_stats[1].get('pool', []): pool_id = pool['name'][5:] pool_status = (lb_const.ONLINE if pool['status'] == 'UP' else lb_const.OFFLINE) statuses[lb_const.POOLS].append( {'id': pool_id, 'status': pool_status}) if with_members: for member in pool.get('member', []): member_id = member['name'][7:] member_status = (lb_const.ONLINE if member['status'] == 'UP' else lb_const.OFFLINE) statuses[lb_const.MEMBERS].append( {'id': member_id, 'status': member_status}) return statuses def _handle_subnet_gw_change(self, *args, **kwargs): # As the Edge appliance doesn't use DHCP, we should change the # default gateway here when the subnet GW changes. context = kwargs.get('context') orig = kwargs['original_subnet'] updated = kwargs['subnet'] if (orig['gateway_ip'] == updated['gateway_ip'] and self._routes_equal(orig['host_routes'], updated['host_routes'])): return subnet_id = updated['id'] subnet = self.core_plugin.get_subnet(context.elevated(), subnet_id) filters = {'fixed_ips': {'subnet_id': [subnet_id]}, 'device_owner': [constants.DEVICE_OWNER_LOADBALANCERV2, oct_const.DEVICE_OWNER_OCTAVIA]} lb_ports = self.core_plugin.get_ports(context.elevated(), filters=filters) if lb_ports: for lb_port in lb_ports: if lb_port['device_id']: device_id = lb_port['device_id'] if device_id.startswith(oct_const.DEVICE_ID_PREFIX): device_id = device_id[len(oct_const.DEVICE_ID_PREFIX):] edge_bind = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, device_id) edge_id = edge_bind['edge_id'] routes = [{'cidr': r['destination'], 'nexthop': r['nexthop']} for r in subnet['host_routes']] self.core_plugin.nsx_v.update_routes( edge_id, subnet['gateway_ip'], routes) def _routes_equal(self, a, b): if len(a) != len(b): return False for a_item in a: found = False for b_item in b: # compare values as keysets should be same if set(a_item.values()) == set(b_item.values()): found = True if not found: return False return True def _get_edge_loadbalancer_statistics(vcns, edge_id): stats = {'bytes_in': 0, 'bytes_out': 0, 'active_connections': 0, 'total_connections': 0} try: lb_stats = vcns.get_loadbalancer_statistics(edge_id) except nsxv_exc.VcnsApiException: msg = (_('Failed to read load balancer statistics, edge: %s') % edge_id) raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) pools_stats = lb_stats[1].get('pool', []) for pool_stats in pools_stats: stats['bytes_in'] += pool_stats.get('bytesIn', 0) stats['bytes_out'] += pool_stats.get('bytesOut', 0) stats['active_connections'] += pool_stats.get('curSessions', 0) stats['total_connections'] += pool_stats.get('totalSessions', 0) return stats ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/implementation/member_mgr.py0000644000175000017500000002273400000000000031155 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from neutron_lib import exceptions as n_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common LOG = logging.getLogger(__name__) class EdgeMemberManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgeMemberManagerFromDict, self).__init__(vcns_driver) self._fw_section_id = None def _get_pool_lb_id(self, member): if not member.get('pool'): return listener = member['pool']['listener'] if listener: lb_id = listener['loadbalancer_id'] else: lb_id = member['pool']['loadbalancer']['id'] return lb_id def _get_pool_member_ips(self, pool, operation, address): member_ips = [member['address'] for member in pool['members']] if operation == 'add' and address not in member_ips: member_ips.append(address) elif operation == 'del' and address in member_ips: member_ips.remove(address) return member_ips def _get_lbaas_fw_section_id(self): if not self._fw_section_id: self._fw_section_id = lb_common.get_lbaas_fw_section_id(self.vcns) return self._fw_section_id def create(self, context, member, completor): lb_id = self._get_pool_lb_id(member) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) edge_id = lb_binding['edge_id'] pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, member['pool_id']) if not pool_binding: completor(success=False) msg = _('Failed to create member on edge: %s. ' 'Binding not found') % edge_id LOG.error(msg) raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) edge_pool_id = pool_binding['edge_pool_id'] old_lb = lb_common.is_lb_on_router_edge( context, self.core_plugin, edge_id) with locking.LockManager.get_lock(edge_id): if not cfg.CONF.nsxv.use_routers_as_lbaas_platform and not old_lb: # Verify that Edge appliance is connected to the member's # subnet (only if this is a dedicated loadbalancer edge) if not lb_common.get_lb_interface( context, self.core_plugin, lb_id, member['subnet_id']): lb_common.create_lb_interface( context, self.core_plugin, lb_id, member['subnet_id'], member['tenant_id']) edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] edge_member = { 'ipAddress': member['address'], 'weight': member['weight'], 'port': member['protocol_port'], 'monitorPort': member['protocol_port'], 'name': lb_common.get_member_id(member['id']), 'condition': 'enabled' if member['admin_state_up'] else 'disabled'} if edge_pool.get('member'): edge_pool['member'].append(edge_member) else: edge_pool['member'] = [edge_member] try: self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) completor(success=True) if old_lb: member_ips = self._get_pool_member_ips(member['pool'], 'add', member['address']) lb_common.update_pool_fw_rule( self.vcns, member['pool_id'], edge_id, self._get_lbaas_fw_section_id(), member_ips) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create member on edge: %s', edge_id) def update(self, context, old_member, new_member, completor): lb_id = self._get_pool_lb_id(new_member) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, new_member['pool_id']) edge_id = lb_binding['edge_id'] edge_pool_id = pool_binding['edge_pool_id'] edge_member = { 'ipAddress': new_member['address'], 'weight': new_member['weight'], 'port': new_member['protocol_port'], 'monitorPort': new_member['protocol_port'], 'name': lb_common.get_member_id(new_member['id']), 'condition': 'enabled' if new_member['admin_state_up'] else 'disabled'} with locking.LockManager.get_lock(edge_id): edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] if edge_pool.get('member'): for i, m in enumerate(edge_pool['member']): if m['name'] == lb_common.get_member_id(new_member['id']): edge_pool['member'][i] = edge_member break try: self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) completor(success=True) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update member on edge: %s', edge_id) else: LOG.error('Pool %(pool_id)s on Edge %(edge_id)s has no ' 'members to update', {'pool_id': new_member['pool']['id'], 'edge_id': edge_id}) def delete(self, context, member, completor): lb_id = self._get_pool_lb_id(member) lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, member['pool_id']) edge_id = lb_binding['edge_id'] old_lb = lb_common.is_lb_on_router_edge( context, self.core_plugin, edge_id) with locking.LockManager.get_lock(edge_id): if not cfg.CONF.nsxv.use_routers_as_lbaas_platform: # we should remove LB subnet interface if no members are # attached and this is not the LB's VIP interface remove_interface = True pool = member['pool'] subnet_id = member['subnet_id'] if subnet_id == pool['loadbalancer']['vip_subnet_id']: remove_interface = False else: for m in pool['members']: if (m['subnet_id'] == subnet_id and m['id'] != member['id']): remove_interface = False if remove_interface: lb_common.delete_lb_interface(context, self.core_plugin, lb_id, subnet_id) if not pool_binding: completor(success=True) return edge_pool_id = pool_binding['edge_pool_id'] edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] for i, m in enumerate(edge_pool['member']): if m['name'] == lb_common.get_member_id(member['id']): edge_pool['member'].pop(i) break try: self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) if old_lb: member_ips = self._get_pool_member_ips(member['pool'], 'del', member['address']) lb_common.update_pool_fw_rule( self.vcns, member['pool_id'], edge_id, self._get_lbaas_fw_section_id(), member_ips) completor(success=True) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to delete member on edge: %s', edge_id) def delete_cascade(self, context, member, completor): self.delete(context, member, completor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/implementation/pool_mgr.py0000644000175000017500000002125500000000000030654 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from neutron_lib import exceptions as n_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as nsxv_exc from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v.implementation import listener_mgr from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common LOG = logging.getLogger(__name__) class EdgePoolManagerFromDict(base_mgr.EdgeLoadbalancerBaseManager): @log_helpers.log_method_call def __init__(self, vcns_driver): super(EdgePoolManagerFromDict, self).__init__(vcns_driver) self._fw_section_id = None def create(self, context, pool, completor): pool_id = pool['id'] edge_pool = { 'name': 'pool_' + pool_id, 'description': pool.get('description', pool.get('name')), 'algorithm': lb_const.BALANCE_MAP.get(pool['lb_algorithm'], 'round-robin'), 'transparent': False } lb_id = pool['loadbalancer_id'] lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) if not lb_binding: msg = _( 'No suitable Edge found for pool %s') % pool_id raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) edge_id = lb_binding['edge_id'] try: with locking.LockManager.get_lock(edge_id): h = self.vcns.create_pool(edge_id, edge_pool)[0] edge_pool_id = lb_common.extract_resource_id(h['location']) nsxv_db.add_nsxv_lbaas_pool_binding(context.session, lb_id, pool_id, edge_pool_id) if pool['listener']: listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, pool['listener']['id']) # Associate listener with pool vse = listener_mgr.listener_to_edge_vse( context, pool['listener'], lb_binding['vip_address'], edge_pool_id, listener_binding['app_profile_id']) with locking.LockManager.get_lock(edge_id): self.vcns.update_vip(edge_id, listener_binding['vse_id'], vse) # This action also set this pool as the default pool of the # listener, so the application profile may need to be updated if pool['session_persistence']: listener_mgr.update_app_profile( self.vcns, context, pool['listener'], edge_id) completor(success=True) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create pool %s', pool['id']) def update(self, context, old_pool, new_pool, completor): edge_pool = { 'name': 'pool_' + new_pool['id'], 'description': new_pool.get('description', new_pool.get('name')), 'algorithm': lb_const.BALANCE_MAP.get( new_pool['lb_algorithm'], 'round-robin'), 'transparent': False } if new_pool['listener']: listener = new_pool['listener'] lb_id = listener['loadbalancer_id'] else: lb_id = new_pool['loadbalancer_id'] lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, new_pool['id']) edge_id = lb_binding['edge_id'] edge_pool_id = pool_binding['edge_pool_id'] try: with locking.LockManager.get_lock(edge_id): # get the configured monitor-id org_edge_pool = self.vcns.get_pool(edge_id, edge_pool_id)[1] monitor_id = org_edge_pool.get('monitorId') if monitor_id: edge_pool['monitorId'] = monitor_id # Keep the current members if org_edge_pool.get('member'): edge_pool['member'] = org_edge_pool['member'] self.vcns.update_pool(edge_id, edge_pool_id, edge_pool) completor(success=True) # if the session_persistence was changed, # we may need to update the listener application profile if new_pool['listener']: old_sess_persist = old_pool['session_persistence'] new_sess_persist = new_pool['session_persistence'] if new_sess_persist != old_sess_persist: listener_mgr.update_app_profile( self.vcns, context, new_pool['listener'], edge_id) except nsxv_exc.VcnsApiException: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update pool %s', new_pool['id']) def delete(self, context, pool, completor): lb_id = pool['loadbalancer_id'] lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) pool_binding = nsxv_db.get_nsxv_lbaas_pool_binding( context.session, lb_id, pool['id']) edge_id = lb_binding['edge_id'] if not pool_binding: completor(success=True) return edge_pool_id = pool_binding['edge_pool_id'] listeners_to_update = [] try: if pool['listeners']: for listener in pool['listeners']: # the pool session persistence may affect the associated # pool application profile if (pool['session_persistence'] and listener['default_pool'] and listener['default_pool']['id'] == pool['id']): listeners_to_update.append(listener) listener_binding = nsxv_db.get_nsxv_lbaas_listener_binding( context.session, lb_id, listener['id']) vse = listener_mgr.listener_to_edge_vse( context, listener, lb_binding['vip_address'], None, listener_binding['app_profile_id']) with locking.LockManager.get_lock(edge_id): self.vcns.update_vip( edge_id, listener_binding['vse_id'], vse) self.vcns.delete_pool(edge_id, edge_pool_id) completor(success=True) nsxv_db.del_nsxv_lbaas_pool_binding( context.session, lb_id, pool['id']) for listener in listeners_to_update: # need to update the listeners too, now with no default pool listener['default_pool'] = None listener_mgr.update_app_profile( self.vcns, context, listener, edge_id) old_lb = lb_common.is_lb_on_router_edge( context, self.core_plugin, lb_binding['edge_id']) if old_lb: lb_common.update_pool_fw_rule(self.vcns, pool['id'], edge_id, self._get_lbaas_fw_section_id(), []) except nsxv_exc.VcnsApiException: completor(success=False) LOG.error('Failed to delete pool %s', pool['id']) def delete_cascade(self, context, pool, completor): self.delete(context, pool, completor) def _get_lbaas_fw_section_id(self): if not self._fw_section_id: self._fw_section_id = lb_common.get_lbaas_fw_section_id(self.vcns) return self._fw_section_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v/lbaas_common.py0000644000175000017500000003470700000000000026451 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import xml.etree.ElementTree as et import netaddr from neutron_lib import constants from neutron_lib import exceptions as n_exc from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield import vcns as nsxv_api LOG = logging.getLogger(__name__) MEMBER_ID_PFX = 'member-' RESOURCE_ID_PFX = 'lbaas-' LBAAS_FW_SECTION_NAME = 'LBaaS FW Rules' LBAAS_DEVICE_OWNER = constants.DEVICE_OWNER_NEUTRON_PREFIX + 'LB' def get_member_id(member_id): return MEMBER_ID_PFX + member_id def get_lb_resource_id(lb_id): return (RESOURCE_ID_PFX + lb_id)[:36] def get_lbaas_edge_id_for_subnet(context, plugin, subnet_id, tenant_id): """ Grab the id of an Edge appliance that is connected to subnet_id. """ subnet = plugin.get_subnet(context, subnet_id) net_id = subnet.get('network_id') filters = {'network_id': [net_id], 'device_owner': ['network:router_interface'], 'tenant_id': [tenant_id]} attached_routers = plugin.get_ports(context.elevated(), filters=filters, fields=['device_id']) for attached_router in attached_routers: router = plugin.get_router(context, attached_router['device_id']) if router.get('router_type') == 'exclusive': rtr_bindings = nsxv_db.get_nsxv_router_binding(context.session, router['id']) return rtr_bindings['edge_id'] def get_lb_edge_name(context, lb_id): """Look for the resource name of the edge hosting the LB. For older loadbalancers this may be a router edge """ binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( context.session, lb_id) if binding: edge_binding = nsxv_db.get_nsxv_router_binding_by_edge( context.session, binding['edge_id']) if edge_binding: return edge_binding['router_id'] # fallback return get_lb_resource_id(lb_id) def get_lb_interface(context, plugin, lb_id, subnet_id): filters = {'fixed_ips': {'subnet_id': [subnet_id]}, 'device_id': [lb_id], 'device_owner': [LBAAS_DEVICE_OWNER]} lb_ports = plugin.get_ports(context.elevated(), filters=filters) return lb_ports def create_lb_interface(context, plugin, lb_id, subnet_id, tenant_id, vip_addr=None, subnet=None): if not subnet: subnet = plugin.get_subnet(context, subnet_id) network_id = subnet.get('network_id') port_dict = {'name': 'lb_if-' + lb_id, 'admin_state_up': True, 'network_id': network_id, 'tenant_id': tenant_id, 'fixed_ips': [{'subnet_id': subnet['id']}], 'device_owner': LBAAS_DEVICE_OWNER, 'device_id': lb_id, 'mac_address': constants.ATTR_NOT_SPECIFIED } port = plugin.base_create_port(context, {'port': port_dict}) ip_addr = port['fixed_ips'][0]['ip_address'] net = netaddr.IPNetwork(subnet['cidr']) resource_id = get_lb_edge_name(context, lb_id) address_groups = [{'primaryAddress': ip_addr, 'subnetPrefixLength': str(net.prefixlen), 'subnetMask': str(net.netmask)}] if vip_addr: address_groups[0]['secondaryAddresses'] = { 'type': 'secondary_addresses', 'ipAddress': [vip_addr]} edge_utils.update_internal_interface( plugin.nsx_v, context, resource_id, network_id, address_groups) def delete_lb_interface(context, plugin, lb_id, subnet_id): resource_id = get_lb_edge_name(context, lb_id) subnet = plugin.get_subnet(context, subnet_id) network_id = subnet.get('network_id') lb_ports = get_lb_interface(context, plugin, lb_id, subnet_id) for lb_port in lb_ports: plugin.delete_port(context, lb_port['id'], allow_delete_lb_if=True) edge_utils.delete_interface(plugin.nsx_v, context, resource_id, network_id, dist=False) def get_lbaas_edge_id(context, plugin, lb_id, vip_addr, subnet_id, tenant_id, appliance_size): subnet = plugin.get_subnet(context, subnet_id) network_id = subnet.get('network_id') availability_zone = plugin.get_network_az_by_net_id(context, network_id) resource_id = get_lb_resource_id(lb_id) edge_id = plugin.edge_manager.allocate_lb_edge_appliance( context, resource_id, availability_zone=availability_zone, appliance_size=appliance_size) create_lb_interface(context, plugin, lb_id, subnet_id, tenant_id, vip_addr=vip_addr, subnet=subnet) gw_ip = subnet.get('gateway_ip') if gw_ip or subnet['host_routes']: routes = [{'cidr': r['destination'], 'nexthop': r['nexthop']} for r in subnet['host_routes']] plugin.nsx_v.update_routes(edge_id, gw_ip, routes) return edge_id def find_address_in_same_subnet(ip_addr, address_groups): """ Lookup an address group with a matching subnet to ip_addr. If found, return address_group. """ for address_group in address_groups['addressGroups']: net_addr = '%(primaryAddress)s/%(subnetPrefixLength)s' % address_group if netaddr.IPAddress(ip_addr) in netaddr.IPNetwork(net_addr): return address_group def add_address_to_address_groups(ip_addr, address_groups): """ Add ip_addr as a secondary IP address to an address group which belongs to the same subnet. """ address_group = find_address_in_same_subnet( ip_addr, address_groups) if address_group: sec_addr = address_group.get('secondaryAddresses') if not sec_addr: sec_addr = { 'type': 'secondary_addresses', 'ipAddress': [ip_addr]} else: sec_addr['ipAddress'].append(ip_addr) address_group['secondaryAddresses'] = sec_addr return True return False def del_address_from_address_groups(ip_addr, address_groups): """ Delete ip_addr from secondary address list in address groups. """ address_group = find_address_in_same_subnet(ip_addr, address_groups) if address_group: sec_addr = address_group.get('secondaryAddresses') if sec_addr and ip_addr in sec_addr['ipAddress']: sec_addr['ipAddress'].remove(ip_addr) return True return False def vip_as_secondary_ip(vcns, edge_id, vip, handler): with locking.LockManager.get_lock(edge_id): r = vcns.get_interfaces(edge_id)[1] vnics = r.get('vnics', []) for vnic in vnics: if vnic['type'] == 'trunk': for sub_interface in vnic.get('subInterfaces', {}).get( 'subInterfaces', []): address_groups = sub_interface.get('addressGroups') if handler(vip, address_groups): vcns.update_interface(edge_id, vnic) return True else: address_groups = vnic.get('addressGroups') if handler(vip, address_groups): vcns.update_interface(edge_id, vnic) return True return False def add_vip_as_secondary_ip(vcns, edge_id, vip): """ Edge appliance requires that a VIP will be configured as a primary or a secondary IP address on an interface. To do so, we locate an interface which is connected to the same subnet that vip belongs to. This can be a regular interface, on a sub-interface on a trunk. """ if not vip_as_secondary_ip(vcns, edge_id, vip, add_address_to_address_groups): msg = _('Failed to add VIP %(vip)s as secondary IP on ' 'Edge %(edge_id)s') % {'vip': vip, 'edge_id': edge_id} raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) def del_vip_as_secondary_ip(vcns, edge_id, vip): """ While removing vip, delete the secondary interface from Edge config. """ if not vip_as_secondary_ip(vcns, edge_id, vip, del_address_from_address_groups): msg = _('Failed to delete VIP %(vip)s as secondary IP on ' 'Edge %(edge_id)s') % {'vip': vip, 'edge_id': edge_id} raise n_exc.BadRequest(resource='edge-lbaas', msg=msg) def extract_resource_id(location_uri): """ Edge assigns an ID for each resource that is being created: it is postfixes the uri specified in the Location header. This ID should be used while updating/deleting this resource. """ uri_elements = location_uri.split('/') return uri_elements[-1] def set_lb_firewall_default_rule(vcns, edge_id, action): with locking.LockManager.get_lock(edge_id): vcns.update_firewall_default_policy(edge_id, {'action': action}) def add_vip_fw_rule(vcns, edge_id, vip_id, ip_address): fw_rule = { 'firewallRules': [ {'action': 'accept', 'destination': { 'ipAddress': [ip_address]}, 'enabled': True, 'name': vip_id}]} with locking.LockManager.get_lock(edge_id): h = vcns.add_firewall_rule(edge_id, fw_rule)[0] fw_rule_id = extract_resource_id(h['location']) return fw_rule_id def del_vip_fw_rule(vcns, edge_id, vip_fw_rule_id): with locking.LockManager.get_lock(edge_id): vcns.delete_firewall_rule(edge_id, vip_fw_rule_id) def get_edge_ip_addresses(vcns, edge_id): edge_ips = [] r = vcns.get_interfaces(edge_id)[1] vnics = r.get('vnics', []) for vnic in vnics: if vnic['type'] == 'trunk': for sub_interface in vnic.get('subInterfaces', {}).get( 'subInterfaces', []): address_groups = sub_interface.get('addressGroups') for address_group in address_groups['addressGroups']: edge_ips.append(address_group['primaryAddress']) else: address_groups = vnic.get('addressGroups') for address_group in address_groups['addressGroups']: edge_ips.append(address_group['primaryAddress']) return edge_ips def update_pool_fw_rule(vcns, pool_id, edge_id, section_id, member_ips): edge_ips = get_edge_ip_addresses(vcns, edge_id) with locking.LockManager.get_lock('lbaas-fw-section'): section_uri = '%s/%s/%s' % (nsxv_api.FIREWALL_PREFIX, 'layer3sections', section_id) xml_section = vcns.get_section(section_uri)[1] section = et.fromstring(xml_section) pool_rule = None for rule in section.iter('rule'): if rule.find('name').text == pool_id: pool_rule = rule sources = pool_rule.find('sources') if sources: pool_rule.remove(sources) destinations = pool_rule.find('destinations') if destinations: pool_rule.remove(destinations) break if not pool_rule and member_ips: pool_rule = et.SubElement(section, 'rule') et.SubElement(pool_rule, 'name').text = pool_id et.SubElement(pool_rule, 'action').text = 'allow' if member_ips: sources = et.SubElement(pool_rule, 'sources') sources.attrib['excluded'] = 'false' for edge_ip in edge_ips: source = et.SubElement(sources, 'source') et.SubElement(source, 'type').text = 'Ipv4Address' et.SubElement(source, 'value').text = edge_ip destinations = et.SubElement(pool_rule, 'destinations') destinations.attrib['excluded'] = 'false' for member_ip in member_ips: destination = et.SubElement(destinations, 'destination') et.SubElement(destination, 'type').text = 'Ipv4Address' et.SubElement(destination, 'value').text = member_ip elif pool_rule: section.remove(pool_rule) vcns.update_section(section_uri, et.tostring(section, encoding="us-ascii"), None) def get_lbaas_fw_section_id(vcns): # Avoid concurrent creation of section by multiple neutron # instances with locking.LockManager.get_lock('lbaas-fw-section'): fw_section_id = vcns.get_section_id(LBAAS_FW_SECTION_NAME) if not fw_section_id: section = et.Element('section') section.attrib['name'] = LBAAS_FW_SECTION_NAME sect = vcns.create_section('ip', et.tostring(section))[1] fw_section_id = et.fromstring(sect).attrib['id'] return fw_section_id def enable_edge_acceleration(vcns, edge_id): with locking.LockManager.get_lock(edge_id): # Query the existing load balancer config in case metadata lb is set _, config = vcns.get_loadbalancer_config(edge_id) config['accelerationEnabled'] = True config['enabled'] = True config['featureType'] = 'loadbalancer_4.0' vcns.enable_service_loadbalancer(edge_id, config) def is_lb_on_router_edge(context, core_plugin, edge_id): binding = nsxv_db.get_nsxv_router_binding_by_edge( context.session, edge_id) router_id = binding['router_id'] if router_id.startswith(RESOURCE_ID_PFX): # New lbaas edge return False # verify that this is a router (and an exclusive one) try: router = core_plugin.get_router(context, router_id) if router.get('router_type') == 'exclusive': return True except Exception: pass LOG.error("Edge %(edge)s router %(rtr)s is not an lbaas edge, but also " "not an exclusive router", {'edge': edge_id, 'rtr': router_id}) return False ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2182543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/0000755000175000017500000000000000000000000023515 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/__init__.py0000644000175000017500000000000000000000000025614 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2182543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/implementation/0000755000175000017500000000000000000000000026542 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/implementation/__init__.py0000644000175000017500000000000000000000000030641 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/implementation/healthmonitor_mgr.py0000644000175000017500000001647000000000000032646 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeHealthMonitorManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager): def _build_monitor_args(self, hm): if hm['type'] in lb_const.NSXV3_MONITOR_MAP: monitor_type = lb_const.NSXV3_MONITOR_MAP.get(hm['type']) else: msg = (_('Cannot create health monitor %(monitor)s with ' 'type %(type)s') % {'monitor': hm['id'], 'type': hm['type']}) raise n_exc.InvalidInput(error_message=msg) body = {'resource_type': monitor_type, 'interval': hm['delay'], 'fall_count': hm['max_retries'], 'timeout': hm['timeout']} if hm['type'] in [lb_const.LB_HEALTH_MONITOR_HTTP, lb_const.LB_HEALTH_MONITOR_HTTPS]: if hm['http_method']: body['request_method'] = hm['http_method'] if hm['url_path']: body['request_url'] = hm['url_path'] if hm['expected_codes']: codes = hm['expected_codes'].split(",") body['response_status_codes'] = [ int(code) for code in codes] return body def create(self, context, hm, completor): lb_id = hm['pool']['loadbalancer_id'] pool_id = hm['pool']['id'] pool_client = self.core_plugin.nsxlib.load_balancer.pool monitor_client = self.core_plugin.nsxlib.load_balancer.monitor monitor_name = utils.get_name_and_uuid(hm['name'] or 'monitor', hm['id']) tags = lb_utils.get_tags(self.core_plugin, hm['id'], lb_const.LB_HM_TYPE, hm['tenant_id'], context.project_name) monitor_body = self._build_monitor_args(hm) try: lb_monitor = monitor_client.create( display_name=monitor_name, tags=tags, **monitor_body) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, pool_id) if binding: lb_pool_id = binding['lb_pool_id'] try: pool_client.add_monitor_to_pool(lb_pool_id, lb_monitor['id']) except nsxlib_exc.ManagerError: completor(success=False) msg = _('Failed to attach monitor %(monitor)s to pool ' '%(pool)s') % {'monitor': lb_monitor['id'], 'pool': lb_pool_id} raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) nsx_db.add_nsx_lbaas_monitor_binding( context.session, lb_id, pool_id, hm['id'], lb_monitor['id'], lb_pool_id) else: completor(success=False) msg = _('Failed to attach monitor %(monitor)s to pool ' '%(pool)s: NSX pool was not found on the DB') % { 'monitor': hm['id'], 'pool': pool_id} raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) completor(success=True) def update(self, context, old_hm, new_hm, completor): lb_id = new_hm['pool']['loadbalancer_id'] pool_id = new_hm['pool']['id'] monitor_client = self.core_plugin.nsxlib.load_balancer.monitor binding = nsx_db.get_nsx_lbaas_monitor_binding( context.session, lb_id, pool_id, new_hm['id']) if binding: lb_monitor_id = binding['lb_monitor_id'] monitor_body = self._build_monitor_args(new_hm) monitor_name = utils.get_name_and_uuid(new_hm['name'] or 'monitor', new_hm['id']) monitor_client.update(lb_monitor_id, display_name=monitor_name, **monitor_body) else: completor(success=False) msg = _('Failed to update monitor %(monitor)s: NSX monitor was ' 'not found in DB') % {'monitor': new_hm['id'], 'pool': pool_id} raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) completor(success=True) def delete(self, context, hm, completor): lb_id = hm['pool']['loadbalancer_id'] pool_id = hm['pool']['id'] pool_client = self.core_plugin.nsxlib.load_balancer.pool monitor_client = self.core_plugin.nsxlib.load_balancer.monitor binding = nsx_db.get_nsx_lbaas_monitor_binding( context.session, lb_id, pool_id, hm['id']) if binding: lb_monitor_id = binding['lb_monitor_id'] lb_pool_id = binding['lb_pool_id'] try: pool_client.remove_monitor_from_pool(lb_pool_id, lb_monitor_id) except nsxlib_exc.ResourceNotFound: pass except nsxlib_exc.ManagerError as exc: completor(success=False) msg = _('Failed to remove monitor %(monitor)s from pool ' '%(pool)s with exception from nsx %(exc)s)') % { 'monitor': lb_monitor_id, 'pool': lb_pool_id, 'exc': exc} raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) try: monitor_client.delete(lb_monitor_id) except nsxlib_exc.ResourceNotFound: pass except nsxlib_exc.ManagerError as exc: completor(success=False) msg = _('Failed to delete monitor %(monitor)s from ' 'backend with exception %(exc)s') % { 'monitor': lb_monitor_id, 'exc': exc} raise n_exc.BadRequest(resource='lbaas-hm', msg=msg) nsx_db.delete_nsx_lbaas_monitor_binding(context.session, lb_id, pool_id, hm['id']) else: # Do not fail a delete action pass completor(success=True) def delete_cascade(self, context, hm, completor): self.delete(context, hm, completor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/implementation/l7policy_mgr.py0000644000175000017500000001364000000000000031527 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc LOG = logging.getLogger(__name__) class EdgeL7PolicyManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager): def _update_policy_position(self, vs_id, rule_id, position): vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server vs = vs_client.get(vs_id) lb_rules = vs.get('rule_ids', []) if rule_id in lb_rules: lb_rules.remove(rule_id) if len(lb_rules) < position: lb_rules.append(rule_id) else: lb_rules.insert(position - 1, rule_id) vs_client.update(vs_id, rule_ids=lb_rules) def create(self, context, policy, completor): lb_id = policy['listener']['loadbalancer_id'] listener_id = policy['listener_id'] rule_client = self.core_plugin.nsxlib.load_balancer.rule tags = lb_utils.get_tags(self.core_plugin, policy['id'], lb_const.LB_L7POLICY_TYPE, policy['tenant_id'], context.project_name) binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, lb_id, listener_id) if not binding: completor(success=False) msg = _('Cannot find nsx lbaas binding for listener ' '%(listener_id)s') % {'listener_id': listener_id} raise n_exc.BadRequest(resource='lbaas-l7policy-create', msg=msg) vs_id = binding['lb_vs_id'] rule_body = lb_utils.convert_l7policy_to_lb_rule(context, policy) try: lb_rule = rule_client.create(tags=tags, **rule_body) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to create lb rule at NSX backend') try: self._update_policy_position(vs_id, lb_rule['id'], policy['position']) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to add rule %(rule)% to virtual server ' '%(vs)s at NSX backend', {'rule': lb_rule['id'], 'vs': vs_id}) nsx_db.add_nsx_lbaas_l7policy_binding( context.session, policy['id'], lb_rule['id'], vs_id) completor(success=True) def update(self, context, old_policy, new_policy, completor): rule_client = self.core_plugin.nsxlib.load_balancer.rule binding = nsx_db.get_nsx_lbaas_l7policy_binding(context.session, old_policy['id']) if not binding: completor(success=False) msg = _('Cannot find nsx lbaas binding for policy ' '%(policy_id)s') % {'policy_id': old_policy['id']} raise n_exc.BadRequest(resource='lbaas-l7policy-update', msg=msg) vs_id = binding['lb_vs_id'] lb_rule_id = binding['lb_rule_id'] rule_body = lb_utils.convert_l7policy_to_lb_rule(context, new_policy) try: rule_client.update(lb_rule_id, **rule_body) if new_policy['position'] != old_policy['position']: self._update_policy_position(vs_id, lb_rule_id, new_policy['position']) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update L7policy %(policy)s: ' '%(err)s', {'policy': old_policy['id'], 'err': e}) completor(success=True) def delete(self, context, policy, completor): vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server rule_client = self.core_plugin.nsxlib.load_balancer.rule binding = nsx_db.get_nsx_lbaas_l7policy_binding(context.session, policy['id']) if binding: vs_id = binding['lb_vs_id'] rule_id = binding['lb_rule_id'] try: # Update virtual server to remove lb rule vs_client.remove_rule(vs_id, rule_id) rule_client.delete(rule_id) except nsxlib_exc.ResourceNotFound: LOG.warning('LB rule %(rule)s is not found on NSX', {'rule': rule_id}) except nsxlib_exc.ManagerError: completor(success=False) msg = (_('Failed to delete lb rule: %(rule)s') % {'rule': rule_id}) raise n_exc.BadRequest(resource='lbaas-l7policy-delete', msg=msg) nsx_db.delete_nsx_lbaas_l7policy_binding( context.session, policy['id']) completor(success=True) def delete_cascade(self, context, policy, completor): self.delete(context, policy, completor) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/implementation/l7rule_mgr.py0000644000175000017500000000534200000000000031177 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils LOG = logging.getLogger(__name__) class EdgeL7RuleManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager): def _update_l7rule_change(self, context, rule, completor, delete=False): rule_client = self.core_plugin.nsxlib.load_balancer.rule policy_id = rule['policy']['id'] binding = nsx_db.get_nsx_lbaas_l7policy_binding(context.session, policy_id) if not binding: completor(success=False) msg = _('Cannot find nsx lbaas binding for policy ' '%(policy_id)s') % {'policy_id': policy_id} raise n_exc.BadRequest(resource='lbaas-l7policy-update', msg=msg) lb_rule_id = binding['lb_rule_id'] if delete: lb_utils.remove_rule_from_policy(rule) else: lb_utils.update_rule_in_policy(rule) rule_body = lb_utils.convert_l7policy_to_lb_rule( context, rule['policy']) try: rule_client.update(lb_rule_id, **rule_body) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update L7policy %(policy)s: ' '%(err)s', {'policy': policy_id, 'err': e}) completor(success=True) def create(self, context, rule, completor): self._update_l7rule_change(context, rule, completor) def update(self, context, old_rule, new_rule, completor): self._update_l7rule_change(context, new_rule, completor) def delete(self, context, rule, completor): self._update_l7rule_change(context, rule, completor, delete=True) def delete_cascade(self, context, rulle, completor): # No action should be taken on rules delete cascade pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/implementation/lb_utils.py0000644000175000017500000003252600000000000030741 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron.services.flavors import flavors_plugin from neutron_lib import exceptions as n_exc from oslo_log import helpers as log_helpers from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import lb_const from vmware_nsxlib.v3 import load_balancer as nsxlib_lb from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) ADV_RULE_NAME = 'LB external VIP advertisement' NO_ROUTER_ID = 'NO ROUTER' @log_helpers.log_method_call def get_tags(plugin, resource_id, resource_type, project_id, project_name): resource = {'project_id': project_id, 'id': resource_id} tags = plugin.nsxlib.build_v3_tags_payload( resource, resource_type=resource_type, project_name=project_name) return tags @log_helpers.log_method_call def get_network_from_subnet(context, plugin, subnet_id): subnet = plugin.get_subnet(context.elevated(), subnet_id) if subnet: return plugin.get_network(context.elevated(), subnet['network_id']) @log_helpers.log_method_call def get_router_from_network(context, plugin, subnet_id): subnet = plugin.get_subnet(context, subnet_id) network_id = subnet['network_id'] ports = plugin._get_network_interface_ports( context.elevated(), network_id) if ports: router = plugin.get_router(context.elevated(), ports[0]['device_id']) if router.get('external_gateway_info'): return router['id'] @log_helpers.log_method_call def get_lb_flavor_size(flavor_plugin, context, flavor_id): if not flavor_id: return lb_const.DEFAULT_LB_SIZE else: flavor = flavors_plugin.FlavorsPlugin.get_flavor( flavor_plugin, context, flavor_id) flavor_size = flavor['name'] if flavor_size in lb_const.LB_FLAVOR_SIZES: return flavor_size.upper() else: err_msg = (_("Invalid flavor size %(flavor)s, only 'small', " "'medium', or 'large' are supported") % {'flavor': flavor_size}) raise n_exc.InvalidInput(error_message=err_msg) @log_helpers.log_method_call def validate_lb_subnet(context, plugin, subnet_id): '''Validate LB subnet before creating loadbalancer on it. To create a loadbalancer, the network has to be either an external network or private network that connects to a tenant router. The tenant router needs to connect to gateway. It will throw exception if the network doesn't meet this requirement. :param context: context :param plugin: core plugin :param subnet_id: loadbalancer's subnet id :return: True if subnet meet requirement, otherwise return False ''' network = get_network_from_subnet(context, plugin, subnet_id) valid_router = get_router_from_network( context, plugin, subnet_id) if network.get('router:external') or valid_router: return True else: return False @log_helpers.log_method_call def validate_lb_member_subnet(context, plugin, subnet_id, lb): '''Validate LB member subnet before creating a member. The member subnet should belong to an external network or be connected to the same T1 router as the Lb vip. It will throw exception if the subnet doesn't meet this requirement. :param context: context :param plugin: core plugin :param subnet_id: loadbalancer's subnet id :return: True if subnet meet requirement, otherwise return False ''' network = get_network_from_subnet(context, plugin, subnet_id) if network.get('router:external'): return True member_router_id = get_router_from_network( context, plugin, subnet_id) lb_router_id = get_router_from_network( context, plugin, lb['vip_subnet_id']) if lb_router_id: # Lb on non-external network. member must be on the same router if lb_router_id == member_router_id: return True else: return False else: # LB on external network. member subnet must have a router if member_router_id: return True else: return False def get_rule_match_conditions(policy): match_conditions = [] # values in rule have already been validated in LBaaS API, # we won't need to valid anymore in driver, and just get # the LB rule mapping from the dict. for rule in policy['rules']: match_type = lb_const.LB_RULE_MATCH_TYPE[rule['compare_type']] if rule['type'] == lb_const.L7_RULE_TYPE_COOKIE: header_value = rule['key'] + '=' + rule['value'] match_conditions.append( {'type': 'LbHttpRequestHeaderCondition', 'match_type': match_type, 'header_name': 'Cookie', 'header_value': header_value}) elif rule['type'] == lb_const.L7_RULE_TYPE_FILE_TYPE: match_conditions.append( {'type': 'LbHttpRequestUriCondition', 'match_type': match_type, 'uri': '*.' + rule['value']}) elif rule['type'] == lb_const.L7_RULE_TYPE_HEADER: match_conditions.append( {'type': 'LbHttpRequestHeaderCondition', 'match_type': match_type, 'header_name': rule['key'], 'header_value': rule['value']}) elif rule['type'] == lb_const.L7_RULE_TYPE_HOST_NAME: match_conditions.append( {'type': 'LbHttpRequestHeaderCondition', 'match_type': match_type, 'header_name': 'Host', 'header_value': rule['value']}) elif rule['type'] == lb_const.L7_RULE_TYPE_PATH: match_conditions.append( {'type': 'LbHttpRequestUriCondition', 'match_type': match_type, 'uri': rule['value']}) else: msg = (_('l7rule type %(type)s is not supported in LBaaS') % {'type': rule['type']}) raise n_exc.BadRequest(resource='lbaas-l7rule', msg=msg) return match_conditions def get_rule_actions(context, l7policy): lb_id = l7policy['listener']['loadbalancer_id'] if l7policy['action'] == lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL: pool_binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, l7policy['redirect_pool_id']) if pool_binding: lb_pool_id = pool_binding['lb_pool_id'] actions = [{'type': lb_const.LB_SELECT_POOL_ACTION, 'pool_id': lb_pool_id}] else: msg = _('Failed to get LB pool binding from nsx db') raise n_exc.BadRequest(resource='lbaas-l7rule-create', msg=msg) elif l7policy['action'] == lb_const.L7_POLICY_ACTION_REDIRECT_TO_URL: actions = [{'type': lb_const.LB_HTTP_REDIRECT_ACTION, 'redirect_status': lb_const.LB_HTTP_REDIRECT_STATUS, 'redirect_url': l7policy['redirect_url']}] elif l7policy['action'] == lb_const.L7_POLICY_ACTION_REJECT: actions = [{'type': lb_const.LB_REJECT_ACTION, 'reply_status': lb_const.LB_HTTP_REJECT_STATUS}] else: msg = (_('Invalid l7policy action: %(action)s') % {'action': l7policy['action']}) raise n_exc.BadRequest(resource='lbaas-l7rule-create', msg=msg) return actions def convert_l7policy_to_lb_rule(context, policy): return { 'match_conditions': get_rule_match_conditions(policy), 'actions': get_rule_actions(context, policy), 'phase': lb_const.LB_RULE_HTTP_FORWARDING, 'match_strategy': 'ALL' } def remove_rule_from_policy(rule): l7rules = rule['policy']['rules'] rule['policy']['rules'] = [r for r in l7rules if r['id'] != rule['id']] def update_rule_in_policy(rule): remove_rule_from_policy(rule) rule['policy']['rules'].append(rule) @log_helpers.log_method_call def update_router_lb_vip_advertisement(context, core_plugin, router, nsx_router_id): # Add a rule to advertise external vips on the router external_subnets = core_plugin._find_router_gw_subnets( context.elevated(), router) external_cidrs = [s['cidr'] for s in external_subnets] if external_cidrs: adv_rule = { 'display_name': ADV_RULE_NAME, 'action': nsx_constants.FW_ACTION_ALLOW, 'networks': external_cidrs, 'rule_filter': {'prefix_operator': 'GE', 'match_route_types': ['T1_LB_VIP']}} core_plugin.nsxlib.logical_router.update_advertisement_rules( nsx_router_id, [adv_rule], name_prefix=ADV_RULE_NAME) @log_helpers.log_method_call def delete_persistence_profile(nsxlib, persistence_profile_id): if persistence_profile_id: nsxlib.load_balancer.persistence_profile.delete(persistence_profile_id) def build_persistence_profile_tags(pool_tags, listener): tags = pool_tags[:] # With octavia loadbalancer name might not be among data passed # down to the driver lb_data = listener.get('loadbalancer') if lb_data: tags.append({ 'scope': lb_const.LB_LB_NAME, 'tag': lb_data['name'][:utils.MAX_TAG_LEN]}) tags.append({ 'scope': lb_const.LB_LB_TYPE, 'tag': listener['loadbalancer_id']}) tags.append({ 'scope': lb_const.LB_LISTENER_TYPE, 'tag': listener['id']}) return tags def get_pool_tags(context, core_plugin, pool): return get_tags(core_plugin, pool['id'], lb_const.LB_POOL_TYPE, pool.get('tenant_id', ''), context.project_name) def setup_session_persistence(nsxlib, pool, pool_tags, switch_type, listener, vs_data): sp = pool.get('session_persistence') pers_type = None cookie_name = None cookie_mode = None if not sp: LOG.debug("No session persistence info for pool %s", pool['id']) elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_HTTP_COOKIE: pers_type = nsxlib_lb.PersistenceProfileTypes.COOKIE cookie_name = sp.get('cookie_name') if not cookie_name: cookie_name = lb_const.SESSION_PERSISTENCE_DEFAULT_COOKIE_NAME cookie_mode = "INSERT" elif sp['type'] == lb_const.LB_SESSION_PERSISTENCE_APP_COOKIE: pers_type = nsxlib_lb.PersistenceProfileTypes.COOKIE # In this case cookie name is mandatory cookie_name = sp['cookie_name'] cookie_mode = "REWRITE" else: pers_type = nsxlib_lb.PersistenceProfileTypes.SOURCE_IP if pers_type: # There is a profile to create or update pp_kwargs = { 'resource_type': pers_type, 'display_name': "persistence_%s" % utils.get_name_and_uuid( pool['name'] or 'pool', pool['id'], maxlen=235), 'tags': build_persistence_profile_tags(pool_tags, listener) } if cookie_name: pp_kwargs['cookie_name'] = cookie_name pp_kwargs['cookie_mode'] = cookie_mode pp_client = nsxlib.load_balancer.persistence_profile persistence_profile_id = vs_data.get('persistence_profile_id') if persistence_profile_id and not switch_type: # NOTE: removal of the persistence profile must be executed # after the virtual server has been updated if pers_type: # Update existing profile LOG.debug("Updating persistence profile %(profile_id)s for " "listener %(listener_id)s with pool %(pool_id)s", {'profile_id': persistence_profile_id, 'listener_id': listener['id'], 'pool_id': pool['id']}) pp_client.update(persistence_profile_id, **pp_kwargs) return persistence_profile_id, None else: # Prepare removal of persistence profile return (None, functools.partial(delete_persistence_profile, nsxlib, persistence_profile_id)) elif pers_type: # Create persistence profile pp_data = pp_client.create(**pp_kwargs) LOG.debug("Created persistence profile %(profile_id)s for " "listener %(listener_id)s with pool %(pool_id)s", {'profile_id': pp_data['id'], 'listener_id': listener['id'], 'pool_id': pool['id']}) if switch_type: # There is also a persistence profile to remove! return (pp_data['id'], functools.partial(delete_persistence_profile, nsxlib, persistence_profile_id)) return pp_data['id'], None return None, None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/implementation/listener_mgr.py0000644000175000017500000005007700000000000031617 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_common from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeListenerManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager): def _get_virtual_server_kwargs(self, context, listener, vs_name, tags, app_profile_id, certificate=None): # If loadbalancer vip_port already has floating ip, use floating # IP as the virtual server VIP address. Else, use the loadbalancer # vip_address directly on virtual server. filters = {'port_id': [listener['loadbalancer']['vip_port_id']]} floating_ips = self.core_plugin.get_floatingips(context, filters=filters) if floating_ips: lb_vip_address = floating_ips[0]['floating_ip_address'] else: lb_vip_address = listener['loadbalancer']['vip_address'] kwargs = {'enabled': listener['admin_state_up'], 'ip_address': lb_vip_address, 'port': listener['protocol_port'], 'application_profile_id': app_profile_id, 'description': listener.get('description')} if vs_name: kwargs['display_name'] = vs_name if tags: kwargs['tags'] = tags if listener['connection_limit'] != -1: kwargs['max_concurrent_connections'] = \ listener['connection_limit'] if 'default_pool_id' in listener: if listener['default_pool_id']: pool_binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, listener['loadbalancer']['id'], listener['default_pool_id']) if pool_binding: kwargs['pool_id'] = pool_binding.get('lb_pool_id') else: # Remove the default pool kwargs['pool_id'] = None kwargs['persistence_profile_id'] = '' ssl_profile_binding = self._get_ssl_profile_binding( tags, certificate=certificate) if (listener['protocol'] == lb_const.LB_PROTOCOL_TERMINATED_HTTPS and ssl_profile_binding): kwargs.update(ssl_profile_binding) return kwargs def _get_ssl_profile_binding(self, tags, certificate=None): tm_client = self.core_plugin.nsxlib.trust_management if certificate: # First check if NSX already has certificate with same pem. # If so, use that certificate for ssl binding. Otherwise, # create a new certificate on NSX. cert_ids = tm_client.find_cert_with_pem( certificate.get_certificate()) if cert_ids: nsx_cert_id = cert_ids[0] else: nsx_cert_id = tm_client.create_cert( certificate.get_certificate(), private_key=certificate.get_private_key(), passphrase=certificate.get_private_key_passphrase(), tags=tags) return { 'client_ssl_profile_binding': { 'ssl_profile_id': self.core_plugin.client_ssl_profile, 'default_certificate_id': nsx_cert_id } } def _get_listener_tags(self, context, listener): tags = lb_utils.get_tags(self.core_plugin, listener['id'], lb_const.LB_LISTENER_TYPE, listener['tenant_id'], context.project_name) tags.append({ 'scope': lb_const.LB_LB_NAME, 'tag': listener['loadbalancer']['name'][:utils.MAX_TAG_LEN]}) tags.append({ 'scope': lb_const.LB_LB_TYPE, 'tag': listener['loadbalancer_id']}) return tags def _validate_default_pool(self, context, listener, vs_id, completor, old_listener=None): if listener.get('default_pool_id'): pool_binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, listener['loadbalancer']['id'], listener['default_pool_id']) if (pool_binding and pool_binding['lb_vs_id'] and (vs_id is None or pool_binding['lb_vs_id'] != vs_id)): completor(success=False) msg = (_('Default pool %s is already used by another ' 'listener') % listener['default_pool_id']) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) lb_common.validate_session_persistence( listener.get('default_pool'), listener, completor) def _update_default_pool_and_binding(self, context, listener, vs_data, completor, old_listener=None): vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server if listener.get('default_pool_id'): vs_id = vs_data['id'] lb_id = (listener.get('loadbalancer_id') or listener.get('loadbalancer', {}).get('id')) pool_id = listener['default_pool_id'] pool = listener['default_pool'] old_pool = None if old_listener: old_pool = old_listener.get('default_pool') try: switch_type = lb_common.session_persistence_type_changed( pool, old_pool) (persistence_profile_id, post_process_func) = lb_utils.setup_session_persistence( self.core_plugin.nsxlib, pool, lb_utils.get_pool_tags(context, self.core_plugin, pool), switch_type, listener, vs_data) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error("Failed to configure session persistence " "profile for listener %s", listener['id']) try: # Update persistence profile and pool on virtual server vs_client.update( vs_id, persistence_profile_id=persistence_profile_id) LOG.debug("Updated NSX virtual server %(vs_id)s with " "persistence profile %(prof)s", {'vs_id': vs_id, 'prof': persistence_profile_id}) if post_process_func: post_process_func() except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error("Failed to attach persistence profile %s to " "virtual server %s", persistence_profile_id, vs_id) # Update the DB binding of the default pool nsx_db.update_nsx_lbaas_pool_binding( context.session, lb_id, pool_id, vs_id) def _remove_default_pool_binding(self, context, listener): if not listener.get('default_pool_id'): return # Remove the current default pool from the DB bindings lb_id = (listener.get('loadbalancer_id') or listener.get('loadbalancer', {}).get('id')) pool_id = listener['default_pool_id'] pool_binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, pool_id) if pool_binding: nsx_db.update_nsx_lbaas_pool_binding( context.session, lb_id, pool_id, None) def create(self, context, listener, completor, certificate=None): lb_id = listener['loadbalancer_id'] nsxlib_lb = self.core_plugin.nsxlib.load_balancer app_client = nsxlib_lb.application_profile vs_client = nsxlib_lb.virtual_server service_client = nsxlib_lb.service vs_name = utils.get_name_and_uuid(listener['name'] or 'listener', listener['id']) tags = self._get_listener_tags(context, listener) if (listener['protocol'] == lb_const.LB_PROTOCOL_HTTP or listener['protocol'] == lb_const.LB_PROTOCOL_TERMINATED_HTTPS): profile_type = lb_const.LB_HTTP_PROFILE elif (listener['protocol'] == lb_const.LB_PROTOCOL_TCP or listener['protocol'] == lb_const.LB_PROTOCOL_HTTPS): profile_type = lb_const.LB_TCP_PROFILE else: completor(success=False) msg = (_('Cannot create listener %(listener)s with ' 'protocol %(protocol)s') % {'listener': listener['id'], 'protocol': listener['protocol']}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) # Validate default pool self._validate_default_pool(context, listener, None, completor) try: app_profile = app_client.create( display_name=vs_name, resource_type=profile_type, tags=tags) app_profile_id = app_profile['id'] kwargs = self._get_virtual_server_kwargs( context, listener, vs_name, tags, app_profile_id, certificate) virtual_server = vs_client.create(**kwargs) except nsxlib_exc.ManagerError: completor(success=False) msg = _('Failed to create virtual server at NSX backend') raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) # If there is already lb:lb_service binding, add the virtual # server to the lb service binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, lb_id) if not binding: completor(success=False) msg = _('Failed to get loadbalancer %s binding') % lb_id raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) lb_service_id = binding['lb_service_id'] try: service_client.add_virtual_server(lb_service_id, virtual_server['id']) except nsxlib_exc.ManagerError: completor(success=False) msg = _('Failed to add virtual server to lb service ' 'at NSX backend') # delete the backend virtual server vs_client.delete(virtual_server['id']) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) nsx_db.add_nsx_lbaas_listener_binding( context.session, lb_id, listener['id'], app_profile_id, virtual_server['id']) self._update_default_pool_and_binding( context, listener, virtual_server, completor) completor(success=True) def update(self, context, old_listener, new_listener, completor, certificate=None): nsxlib_lb = self.core_plugin.nsxlib.load_balancer vs_client = nsxlib_lb.virtual_server app_client = nsxlib_lb.application_profile vs_name = None tags = None if new_listener['name'] != old_listener['name']: vs_name = utils.get_name_and_uuid( new_listener['name'] or 'listener', new_listener['id']) tags = self._get_listener_tags(context, new_listener) binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, old_listener['loadbalancer_id'], old_listener['id']) if not binding: msg = (_('Cannot find listener %(listener)s binding on NSX ' 'backend'), {'listener': old_listener['id']}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) # Validate default pool self._validate_default_pool( context, new_listener, binding['lb_vs_id'], completor, old_listener=old_listener) try: vs_id = binding['lb_vs_id'] app_profile_id = binding['app_profile_id'] updated_kwargs = self._get_virtual_server_kwargs( context, new_listener, vs_name, tags, app_profile_id, certificate) vs_data = vs_client.update(vs_id, **updated_kwargs) if vs_name: app_client.update(app_profile_id, display_name=vs_name, tags=tags) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update listener %(listener)s with ' 'error %(error)s', {'listener': old_listener['id'], 'error': e}) # Update default pool and session persistence (do this even if the # default pool did not change, as there might have been an error the # last time) self._remove_default_pool_binding(context, old_listener) self._update_default_pool_and_binding(context, new_listener, vs_data, completor, old_listener) completor(success=True) def delete(self, context, listener, completor): lb_id = listener['loadbalancer_id'] nsxlib_lb = self.core_plugin.nsxlib.load_balancer service_client = nsxlib_lb.service vs_client = nsxlib_lb.virtual_server app_client = nsxlib_lb.application_profile binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, lb_id, listener['id']) if binding: vs_id = binding['lb_vs_id'] app_profile_id = binding['app_profile_id'] lb_binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, lb_id) if not lb_binding: completor(success=False) msg = (_('Failed to delete virtual server: %(listener)s: ' 'loadbalancer %(lb)s mapping was not found') % {'listener': listener['id'], 'lb': lb_id}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) try: lbs_id = lb_binding.get('lb_service_id') lb_service = service_client.get(lbs_id) vs_list = lb_service.get('virtual_server_ids') if vs_list and vs_id in vs_list: service_client.remove_virtual_server(lbs_id, vs_id) except nsxlib_exc.ManagerError: completor(success=False) msg = (_('Failed to remove virtual server: %(listener)s ' 'from lb service %(lbs)s') % {'listener': listener['id'], 'lbs': lbs_id}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) try: persist_profile_id = None if listener.get('default_pool_id'): vs_data = vs_client.update(vs_id, pool_id='') persist_profile_id = vs_data.get('persistence_profile_id') # Update pool binding to disassociate virtual server self._remove_default_pool_binding(context, listener) vs_client.delete(vs_id) # Also delete the old session persistence profile if persist_profile_id: lb_utils.delete_persistence_profile( self.core_plugin.nsxlib, persist_profile_id) except nsx_exc.NsxResourceNotFound: msg = (_("virtual server not found on nsx: %(vs)s") % {'vs': vs_id}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) except nsxlib_exc.ManagerError: completor(success=False) msg = (_('Failed to delete virtual server: %(listener)s') % {'listener': listener['id']}) raise n_exc.BadRequest(resource='lbaas-listener', msg=msg) try: app_client.delete(app_profile_id) except nsx_exc.NsxResourceNotFound: LOG.error("application profile not found on nsx: %s", app_profile_id) except nsxlib_exc.ManagerError as e: # This probably means that the application profile is being # used by a listener outside of openstack LOG.error("Failed to delete application profile %s from the " "NSX: %s", app_profile_id, e) # Delete imported NSX cert if there is any cert_tags = [{'scope': lb_const.LB_LISTENER_TYPE, 'tag': listener['id']}] results = self.core_plugin.nsxlib.search_by_tags( tags=cert_tags) # Only delete object related to certificate used by listener for res_obj in results['results']: res_type = res_obj.get('resource_type') if res_type in lb_const.LB_CERT_RESOURCE_TYPE: tm_client = self.core_plugin.nsxlib.trust_management try: tm_client.delete_cert(res_obj['id']) except nsxlib_exc.ManagerError: LOG.error("Exception thrown when trying to delete " "certificate: %(cert)s", {'cert': res_obj['id']}) nsx_db.delete_nsx_lbaas_listener_binding( context.session, lb_id, listener['id']) completor(success=True) def delete_cascade(self, context, listener, completor): self.delete(context, listener, completor) def stats_getter(context, core_plugin, ignore_list=None): """Update Octavia statistics for each listener (virtual server)""" stat_list = [] lb_service_client = core_plugin.nsxlib.load_balancer.service # Go over all the loadbalancers & services lb_bindings = nsx_db.get_nsx_lbaas_loadbalancer_bindings( context.session) for lb_binding in lb_bindings: if ignore_list and lb_binding['loadbalancer_id'] in ignore_list: continue lb_service_id = lb_binding.get('lb_service_id') try: # get the NSX statistics for this LB service rsp = lb_service_client.get_stats(lb_service_id) if rsp and 'virtual_servers' in rsp: # Go over each virtual server in the response for vs in rsp['virtual_servers']: # look up the virtual server in the DB vs_bind = nsx_db.get_nsx_lbaas_listener_binding_by_vs_id( context.session, vs['virtual_server_id']) if vs_bind and 'statistics' in vs: vs_stats = vs['statistics'] stats = copy.copy(lb_const.LB_EMPTY_STATS) stats['id'] = vs_bind.listener_id stats['request_errors'] = 0 # currently unsupported for stat in lb_const.LB_STATS_MAP: lb_stat = lb_const.LB_STATS_MAP[stat] stats[stat] += vs_stats[lb_stat] stat_list.append(stats) except nsxlib_exc.ManagerError: pass return stat_list ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/implementation/loadbalancer_mgr.py0000644000175000017500000004533300000000000032400 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsx.services.lbaas.octavia import constants as oct_const from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgeLoadBalancerManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager): def create(self, context, lb, completor): if not lb_utils.validate_lb_subnet(context, self.core_plugin, lb['vip_subnet_id']): completor(success=False) msg = (_('Cannot create lb on subnet %(sub)s for ' 'loadbalancer %(lb)s. The subnet needs to connect a ' 'router which is already set gateway.') % {'sub': lb['vip_subnet_id'], 'lb': lb['id']}) raise n_exc.BadRequest(resource='lbaas-subnet', msg=msg) service_client = self.core_plugin.nsxlib.load_balancer.service nsx_router_id = None lb_service = None nsx_router_id = lb_utils.NO_ROUTER_ID router_id = lb_utils.get_router_from_network( context, self.core_plugin, lb['vip_subnet_id']) if router_id: nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) lb_service = service_client.get_router_lb_service(nsx_router_id) if not lb_service: lb_size = lb_utils.get_lb_flavor_size( self.flavor_plugin, context, lb.get('flavor_id')) if router_id: # Make sure the NSX service router exists if not self.core_plugin.service_router_has_services( context, router_id): self.core_plugin.create_service_router(context, router_id) lb_service = self._create_lb_service( context, service_client, lb['tenant_id'], router_id, nsx_router_id, lb['id'], lb_size) else: lb_service = self._create_lb_service_without_router( context, service_client, lb['tenant_id'], lb, lb_size) if not lb_service: completor(success=False) msg = (_('Failed to create lb service for loadbalancer ' '%s') % lb['id']) raise nsx_exc.NsxPluginException(err_msg=msg) nsx_db.add_nsx_lbaas_loadbalancer_binding( context.session, lb['id'], lb_service['id'], nsx_router_id, lb['vip_address']) # Make sure the vip port is marked with a device owner port = self.core_plugin.get_port( context.elevated(), lb['vip_port_id']) if not port.get('device_owner'): self.core_plugin.update_port( context.elevated(), lb['vip_port_id'], {'port': {'device_id': oct_const.DEVICE_ID_PREFIX + lb['id'], 'device_owner': lb_const.VMWARE_LB_VIP_OWNER}}) completor(success=True) def _create_lb_service(self, context, service_client, tenant_id, router_id, nsx_router_id, lb_id, lb_size): """Create NSX LB service for a specific neutron router""" router = self.core_plugin.get_router(context, router_id) if not router.get('external_gateway_info'): msg = (_('Tenant router %(router)s does not connect to ' 'external gateway') % {'router': router['id']}) raise n_exc.BadRequest(resource='lbaas-lbservice-create', msg=msg) lb_name = utils.get_name_and_uuid(router['name'] or 'router', router_id) tags = lb_utils.get_tags(self.core_plugin, router_id, lb_const.LR_ROUTER_TYPE, tenant_id, context.project_name) attachment = {'target_id': nsx_router_id, 'target_type': 'LogicalRouter'} try: lb_service = service_client.create(display_name=lb_name, tags=tags, attachment=attachment, size=lb_size) except nsxlib_exc.ManagerError as e: # If it failed, it is probably because the service was already # created by another loadbalancer simultaneously lb_service = service_client.get_router_lb_service(nsx_router_id) if lb_service: return lb_service LOG.error("Failed to create LB service: %s", e) return # Add rule to advertise external vips lb_utils.update_router_lb_vip_advertisement( context, self.core_plugin, router, nsx_router_id) return lb_service def _create_lb_service_without_router(self, context, service_client, tenant_id, lb, lb_size): """Create NSX LB service for an external VIP This service will not be attached to a router yet, and it will be updated once the first member is created. """ lb_id = lb['id'] lb_name = utils.get_name_and_uuid(lb['name'] or 'loadbalancer', lb_id) tags = lb_utils.get_tags(self.core_plugin, '', lb_const.LR_ROUTER_TYPE, tenant_id, context.project_name) try: lb_service = service_client.create(display_name=lb_name, tags=tags, size=lb_size) except nsxlib_exc.ManagerError as e: LOG.error("Failed to create LB service: %s", e) return return lb_service def update(self, context, old_lb, new_lb, completor): vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server app_client = self.core_plugin.nsxlib.load_balancer.application_profile if new_lb['name'] != old_lb['name']: for listener in new_lb['listeners']: binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, new_lb['id'], listener['id']) if binding: vs_id = binding['lb_vs_id'] app_profile_id = binding['app_profile_id'] new_lb_name = new_lb['name'][:utils.MAX_TAG_LEN] try: # Update tag on virtual server with new lb name vs = vs_client.get(vs_id) updated_tags = utils.update_v3_tags( vs['tags'], [{'scope': lb_const.LB_LB_NAME, 'tag': new_lb_name}]) vs_client.update(vs_id, tags=updated_tags) # Update tag on application profile with new lb name app_profile = app_client.get(app_profile_id) app_client.update( app_profile_id, tags=updated_tags, resource_type=app_profile['resource_type']) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update tag %(tag)s for lb ' '%(lb)s', {'tag': updated_tags, 'lb': new_lb['name']}) completor(success=True) def delete(self, context, lb, completor): service_client = self.core_plugin.nsxlib.load_balancer.service router_client = self.core_plugin.nsxlib.logical_router lb_binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, lb['id']) if lb_binding: lb_service_id = lb_binding['lb_service_id'] nsx_router_id = lb_binding['lb_router_id'] try: lb_service = service_client.get(lb_service_id) except nsxlib_exc.ManagerError: LOG.warning("LB service %(lbs)s is not found", {'lbs': lb_service_id}) else: vs_list = lb_service.get('virtual_server_ids') if not vs_list: try: service_client.delete(lb_service_id) # If there is no lb service attached to the router, # delete the router advertise_lb_vip rule. if nsx_router_id != lb_utils.NO_ROUTER_ID: router_client.update_advertisement_rules( nsx_router_id, [], name_prefix=lb_utils.ADV_RULE_NAME) except nsxlib_exc.ManagerError: completor(success=False) msg = (_('Failed to delete lb service %(lbs)s from nsx' ) % {'lbs': lb_service_id}) raise n_exc.BadRequest(resource='lbaas-lb', msg=msg) nsx_db.delete_nsx_lbaas_loadbalancer_binding( context.session, lb['id']) if nsx_router_id != lb_utils.NO_ROUTER_ID: router_id = nsx_db.get_neutron_from_nsx_router_id( context.session, nsx_router_id) # Service router is needed only when the LB exist, and # no other services are using it. if not self.core_plugin.service_router_has_services( context, router_id): self.core_plugin.delete_service_router(context, router_id) # Make sure the vip port is not marked with a vmware device owner try: port = self.core_plugin.get_port( context.elevated(), lb['vip_port_id']) if port.get('device_owner') == lb_const.VMWARE_LB_VIP_OWNER: self.core_plugin.update_port( context.elevated(), lb['vip_port_id'], {'port': {'device_id': '', 'device_owner': ''}}) except n_exc.PortNotFound: # Only log the error and continue anyway LOG.warning("VIP port %s not found while deleting loadbalancer %s", lb['vip_port_id'], lb['id']) except Exception as e: # Just log the error as all other resources were deleted LOG.error("Failed to update neutron port %s devices upon " "loadbalancer deletion: %s", lb['vip_port_id'], e) completor(success=True) def delete_cascade(self, context, lb, completor): """Delete all backend and DB resources of this loadbalancer""" self.delete(context, lb, completor) def refresh(self, context, lb): # TODO(tongl): implement pass def _nsx_status_to_lb_status(self, nsx_status): if not nsx_status: # default fallback return lb_const.ONLINE # Statuses that are considered ONLINE: if nsx_status.upper() in ['UP', 'UNKNOWN', 'PARTIALLY_UP', 'NO_STANDBY']: return lb_const.ONLINE # Statuses that are considered OFFLINE: if nsx_status.upper() in ['PRIMARY_DOWN', 'DETACHED', 'DOWN', 'ERROR']: return lb_const.OFFLINE if nsx_status.upper() == 'DISABLED': return lb_const.DISABLED # default fallback LOG.debug("NSX LB status %s - interpreted as ONLINE", nsx_status) return lb_const.ONLINE def get_lb_pool_members_statuses(self, nsx_pool_id, members_statuses): # Combine the NSX pool members data and the NSX statuses to provide # member statuses list # Get the member id from the suffix of the member in the NSX pool list # and find the matching ip+port member in the statuses list # get the members list from the NSX nsx_pool = self.core_plugin.nsxlib.load_balancer.pool.get(nsx_pool_id) if not nsx_pool or not nsx_pool.get('members'): return [] # create a map of existing members: ip+port -> lbaas ID (which is the # suffix of the member name) members_map = {} for member in nsx_pool['members']: ip = member['ip_address'] port = member['port'] if ip not in members_map: members_map[ip] = {} members_map[ip][port] = member['display_name'][-36:] # go over the statuses map, and match the member ip_port, to the ID # in the map statuses = [] for member in members_statuses: ip = member['ip_address'] port = member['port'] if ip in members_map and port in members_map[ip]: member_id = members_map[ip][port] member_status = self._nsx_status_to_lb_status(member['status']) statuses.append({'id': member_id, 'status': member_status}) return statuses def get_operating_status(self, context, id, with_members=False): """Return a map of the operating status of all connected LB objects """ service_client = self.core_plugin.nsxlib.load_balancer.service lb_binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, id) if not lb_binding: LOG.warning("Failed to get loadbalancer %s operating status. " "Mapping was not found", id) return {} lb_service_id = lb_binding['lb_service_id'] try: service_status = service_client.get_status(lb_service_id) if not isinstance(service_status, dict): service_status = {} vs_statuses = service_client.get_virtual_servers_status( lb_service_id) if not isinstance(vs_statuses, dict): vs_statuses = {} except nsxlib_exc.ManagerError: LOG.warning("LB service %(lbs)s is not found", {'lbs': lb_service_id}) return {} # get the loadbalancer status from the LB service lb_status = self._nsx_status_to_lb_status( service_status.get('service_status')) statuses = {lb_const.LOADBALANCERS: [{'id': id, 'status': lb_status}], lb_const.LISTENERS: [], lb_const.POOLS: [], lb_const.MEMBERS: []} # Add the listeners statuses from the virtual servers statuses for vs in vs_statuses.get('results', []): vs_status = self._nsx_status_to_lb_status(vs.get('status')) vs_id = vs.get('virtual_server_id') list_binding = nsx_db.get_nsx_lbaas_listener_binding_by_lb_and_vs( context.session, id, vs_id) if list_binding: listener_id = list_binding['listener_id'] statuses[lb_const.LISTENERS].append( {'id': listener_id, 'status': vs_status}) # Add the pools statuses from the LB service status for pool in service_status.get('pools', []): nsx_pool_id = pool.get('pool_id') pool_status = self._nsx_status_to_lb_status(pool.get('status')) pool_binding = nsx_db.get_nsx_lbaas_pool_binding_by_lb_pool( context.session, id, nsx_pool_id) if pool_binding: pool_id = pool_binding['pool_id'] statuses[lb_const.POOLS].append( {'id': pool_id, 'status': pool_status}) # Add the pools members if with_members and pool.get('members'): statuses[lb_const.MEMBERS].extend( self.get_lb_pool_members_statuses( nsx_pool_id, pool['members'])) return statuses def stats(self, context, lb): # Since multiple LBaaS loadbalancer can share the same LB service, # get the corresponding virtual servers' stats instead of LB service. stats = {'active_connections': 0, 'bytes_in': 0, 'bytes_out': 0, 'total_connections': 0} service_client = self.core_plugin.nsxlib.load_balancer.service lb_binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, lb['id']) vs_list = self._get_lb_virtual_servers(context, lb) if lb_binding: lb_service_id = lb_binding.get('lb_service_id') try: rsp = service_client.get_stats(lb_service_id) if rsp: for vs in rsp.get('virtual_servers', []): # Skip the virtual server that doesn't belong # to this loadbalancer if vs['virtual_server_id'] not in vs_list: continue vs_stats = vs.get('statistics', {}) for stat in lb_const.LB_STATS_MAP: lb_stat = lb_const.LB_STATS_MAP[stat] stats[stat] += vs_stats.get(lb_stat, 0) except nsxlib_exc.ManagerError: msg = _('Failed to retrieve stats from LB service ' 'for loadbalancer %(lb)s') % {'lb': lb['id']} raise n_exc.BadRequest(resource='lbaas-lb', msg=msg) return stats def _get_lb_virtual_servers(self, context, lb): # Get all virtual servers that belong to this loadbalancer vs_list = [] for listener in lb['listeners']: vs_binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, lb['id'], listener['id']) if vs_binding: vs_list.append(vs_binding.get('lb_vs_id')) return vs_list ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/implementation/member_mgr.py0000644000175000017500000002534100000000000031235 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import locking from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc LOG = logging.getLogger(__name__) class EdgeMemberManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager): def _get_info_from_fip(self, context, fip): filters = {'floating_ip_address': [fip]} floating_ips = self.core_plugin.get_floatingips(context, filters=filters) if floating_ips: return (floating_ips[0]['fixed_ip_address'], floating_ips[0]['router_id']) else: msg = (_('Member IP %(fip)s is an external IP, and is expected to ' 'be a floating IP') % {'fip': fip}) raise n_exc.BadRequest(resource='lbaas-vip', msg=msg) def _get_updated_pool_members(self, context, lb_pool, member): network = lb_utils.get_network_from_subnet( context, self.core_plugin, member['subnet_id']) if network.get('router:external'): fixed_ip, router_id = self._get_info_from_fip( context, member['address']) else: fixed_ip = member['address'] for m in lb_pool['members']: if m['ip_address'] == fixed_ip: m['display_name'] = member['name'][:219] + '_' + member['id'] m['weight'] = member['weight'] m['backup_member'] = member.get('backup', False) return lb_pool['members'] def create(self, context, member, completor): with locking.LockManager.get_lock( 'member-%s' % str(member['pool']['loadbalancer_id'])): self._member_create(context, member, completor) def _member_create(self, context, member, completor): lb_id = member['pool']['loadbalancer_id'] pool_id = member['pool']['id'] loadbalancer = member['pool']['loadbalancer'] if not lb_utils.validate_lb_member_subnet(context, self.core_plugin, member['subnet_id'], loadbalancer): completor(success=False) msg = (_('Cannot add member %(member)s to pool as member subnet ' '%(subnet)s is neither public nor connected to the LB ' 'router') % {'member': member['id'], 'subnet': member['subnet_id']}) raise n_exc.BadRequest(resource='lbaas-subnet', msg=msg) pool_client = self.core_plugin.nsxlib.load_balancer.pool service_client = self.core_plugin.nsxlib.load_balancer.service network = lb_utils.get_network_from_subnet( context, self.core_plugin, member['subnet_id']) if network.get('router:external'): fixed_ip, router_id = self._get_info_from_fip( context, member['address']) if not router_id: completor(success=False) msg = (_('Floating ip %(fip)s has no router') % { 'fip': member['address']}) raise n_exc.BadRequest(resource='lbaas-vip', msg=msg) else: router_id = lb_utils.get_router_from_network( context, self.core_plugin, member['subnet_id']) fixed_ip = member['address'] binding = nsx_db.get_nsx_lbaas_pool_binding(context.session, lb_id, pool_id) if binding: lb_pool_id = binding.get('lb_pool_id') lb_binding = nsx_db.get_nsx_lbaas_loadbalancer_binding( context.session, lb_id) if not lb_binding: completor(success=False) msg = (_('Failed to get LB binding for member %s') % member['id']) raise nsx_exc.NsxPluginException(err_msg=msg) if lb_binding.lb_router_id == lb_utils.NO_ROUTER_ID: # Need to attach the LB service to the router now # This will happen here in case of external vip nsx_router_id = nsx_db.get_nsx_router_id(context.session, router_id) try: # Make sure the NSX service router exists if not self.core_plugin.verify_sr_at_backend( context, router_id): self.core_plugin.create_service_router( context, router_id) tags = lb_utils.get_tags(self.core_plugin, router_id, lb_const.LR_ROUTER_TYPE, member['tenant_id'], context.project_name) service_client.update_service_with_attachment( lb_binding.lb_service_id, nsx_router_id, tags=tags) # TODO(asarfaty): Also update the tags except nsxlib_exc.ManagerError as e: # This will happen if there is another service already # attached to this router. # This is currently a limitation. completor(success=False) msg = (_('Failed to attach router %(rtr)s to LB service ' '%(srv)s: %(e)s') % {'rtr': router_id, 'srv': lb_binding.lb_service_id, 'e': e}) raise nsx_exc.NsxPluginException(err_msg=msg) # Update the nsx router in the DB binding nsx_db.update_nsx_lbaas_loadbalancer_binding( context.session, lb_id, nsx_router_id) # Add rule to advertise external vips router = self.core_plugin.get_router(context, router_id) lb_utils.update_router_lb_vip_advertisement( context, self.core_plugin, router, nsx_router_id) with locking.LockManager.get_lock('pool-member-%s' % lb_pool_id): lb_pool = pool_client.get(lb_pool_id) old_m = lb_pool.get('members', None) new_m = [{ 'display_name': member['name'][:219] + '_' + member['id'], 'ip_address': fixed_ip, 'port': member['protocol_port'], 'weight': member['weight'], 'backup_member': member.get('backup', False)}] members = (old_m + new_m) if old_m else new_m pool_client.update_pool_with_members(lb_pool_id, members) else: completor(success=False) msg = (_('Failed to get pool binding to add member %s') % member['id']) raise nsx_exc.NsxPluginException(err_msg=msg) completor(success=True) def update(self, context, old_member, new_member, completor): lb_id = old_member['pool']['loadbalancer_id'] pool_id = old_member['pool']['id'] pool_client = self.core_plugin.nsxlib.load_balancer.pool pool_binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, pool_id) if pool_binding: lb_pool_id = pool_binding.get('lb_pool_id') try: with locking.LockManager.get_lock('pool-member-%s' % lb_pool_id): lb_pool = pool_client.get(lb_pool_id) updated_members = self._get_updated_pool_members( context, lb_pool, new_member) pool_client.update_pool_with_members(lb_pool_id, updated_members) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update member %(member)s: ' '%(err)s', {'member': old_member['id'], 'err': e}) completor(success=True) def delete(self, context, member, completor): lb_id = member['pool']['loadbalancer_id'] pool_id = member['pool']['id'] pool_client = self.core_plugin.nsxlib.load_balancer.pool pool_binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, pool_id) if pool_binding: lb_pool_id = pool_binding.get('lb_pool_id') try: with locking.LockManager.get_lock('pool-member-%s' % lb_pool_id): lb_pool = pool_client.get(lb_pool_id) network = lb_utils.get_network_from_subnet( context, self.core_plugin, member['subnet_id']) if network.get('router:external'): fixed_ip, router_id = self._get_info_from_fip( context, member['address']) else: fixed_ip = member['address'] if 'members' in lb_pool: m_list = lb_pool['members'] members = [m for m in m_list if m['ip_address'] != fixed_ip] pool_client.update_pool_with_members(lb_pool_id, members) except nsxlib_exc.ResourceNotFound: pass except nsxlib_exc.ManagerError: completor(success=False) msg = _('Failed to remove member from pool on NSX backend') raise n_exc.BadRequest(resource='lbaas-member', msg=msg) completor(success=True) def delete_cascade(self, context, member, completor): # No action should be taken on members delete cascade pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/nsx_v3/implementation/pool_mgr.py0000644000175000017500000003131600000000000030736 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib import exceptions as n_exc from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas import lb_common from vmware_nsx.services.lbaas import lb_const from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import utils LOG = logging.getLogger(__name__) class EdgePoolManagerFromDict(base_mgr.Nsxv3LoadbalancerBaseManager): def _get_pool_kwargs(self, name=None, tags=None, algorithm=None, description=None): kwargs = {} if name: kwargs['display_name'] = name if tags: kwargs['tags'] = tags if algorithm: kwargs['algorithm'] = algorithm if description: kwargs['description'] = description kwargs['snat_translation'] = {'type': "LbSnatAutoMap"} return kwargs def _process_vs_update(self, context, pool, switch_type, listener, nsx_pool_id, nsx_vs_id, completor): LOG.debug("Processing NSX virtual server update for pool %(pool_id)s. " "Will update VS %(nsx_vs_id)s", {'pool_id': pool['id'], 'nsx_vs_id': nsx_vs_id}) vs_client = self.core_plugin.nsxlib.load_balancer.virtual_server try: # Process pool persistence profile and # create/update/delete profile for virtual server vs_data = vs_client.get(nsx_vs_id) if nsx_pool_id: (persistence_profile_id, post_process_func) = lb_utils.setup_session_persistence( self.core_plugin.nsxlib, pool, self._get_pool_tags(context, pool), switch_type, listener, vs_data) else: post_process_func = functools.partial( lb_utils.delete_persistence_profile, self.core_plugin.nsxlib, vs_data.get('persistence_profile_id')) persistence_profile_id = None except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error("Failed to configure session persistence " "profile for pool %(pool_id)s", {'pool_id': pool['id']}) try: # Update persistence profile and pool on virtual server vs_client.update(nsx_vs_id, pool_id=nsx_pool_id, persistence_profile_id=persistence_profile_id) LOG.debug("Updated NSX virtual server %(vs_id)s with " "pool %(pool_id)s and persistence profile %(prof)s", {'vs_id': nsx_vs_id, 'pool_id': nsx_pool_id, 'prof': persistence_profile_id}) if post_process_func: post_process_func() except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to attach pool %s to virtual ' 'server %s', nsx_pool_id, nsx_vs_id) def _get_pool_tags(self, context, pool): return lb_utils.get_pool_tags(context, self.core_plugin, pool) def create(self, context, pool, completor): lb_id = pool['loadbalancer_id'] pool_client = self.core_plugin.nsxlib.load_balancer.pool pool_name = utils.get_name_and_uuid(pool['name'] or 'pool', pool['id']) tags = self._get_pool_tags(context, pool) description = pool.get('description') lb_algorithm = lb_const.LB_POOL_ALGORITHM_MAP.get(pool['lb_algorithm']) if pool.get('listeners') and len(pool['listeners']) > 1: completor(success=False) msg = (_('Failed to create pool: Multiple listeners are not ' 'supported.')) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) # NOTE(salv-orlando): Guard against accidental compat breakages try: listener = pool['listener'] or pool['listeners'][0] except IndexError: # If listeners is an empty list we hit this exception listener = None # Perform additional validation for session persistence before # creating resources in the backend lb_common.validate_session_persistence(pool, listener, completor) try: kwargs = self._get_pool_kwargs(pool_name, tags, lb_algorithm, description) lb_pool = pool_client.create(**kwargs) nsx_db.add_nsx_lbaas_pool_binding( context.session, lb_id, pool['id'], lb_pool['id']) except nsxlib_exc.ManagerError: completor(success=False) msg = (_('Failed to create pool on NSX backend: %(pool)s') % {'pool': pool['id']}) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) # The pool object can be created with either --listener or # --loadbalancer option. If listener is present, the virtual server # will be updated with the pool. Otherwise, just return. The binding # will be added later when the pool is associated with layer7 rule. # FIXME(salv-orlando): This two-step process can leave a zombie pool on # NSX if the VS update operation fails if listener: listener_id = listener['id'] binding = nsx_db.get_nsx_lbaas_listener_binding( context.session, lb_id, listener_id) if binding: vs_id = binding['lb_vs_id'] # Updae the virtual server only if it exists if vs_id: self._process_vs_update(context, pool, False, listener, lb_pool['id'], vs_id, completor) nsx_db.update_nsx_lbaas_pool_binding( context.session, lb_id, pool['id'], vs_id) else: completor(success=False) msg = (_("Couldn't find binding on the listener: %s") % listener['id']) raise nsx_exc.NsxPluginException(err_msg=msg) completor(success=True) def update(self, context, old_pool, new_pool, completor): pool_client = self.core_plugin.nsxlib.load_balancer.pool pool_name = None tags = None lb_algorithm = None description = None if new_pool['name'] != old_pool['name']: pool_name = utils.get_name_and_uuid(new_pool['name'] or 'pool', new_pool['id']) tags = self._get_pool_tags(context, new_pool) if new_pool['lb_algorithm'] != old_pool['lb_algorithm']: lb_algorithm = lb_const.LB_POOL_ALGORITHM_MAP.get( new_pool['lb_algorithm']) if new_pool.get('description') != old_pool.get('description'): description = new_pool['description'] binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, old_pool['loadbalancer_id'], old_pool['id']) if not binding: completor(success=False) msg = (_('Cannot find pool %(pool)s binding on NSX db ' 'mapping') % {'pool': old_pool['id']}) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) if new_pool.get('listeners') and len(new_pool['listeners']) > 1: completor(success=False) msg = (_('Failed to update pool %s: Multiple listeners are not ' 'supported.') % new_pool['id']) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) # NOTE(salv-orlando): Guard against accidental compat breakages try: listener = new_pool['listener'] or new_pool['listeners'][0] except IndexError: # If listeners is an empty list we hit this exception listener = None # Perform additional validation for session persistence before # operating on resources in the backend lb_common.validate_session_persistence(new_pool, listener, completor) try: lb_pool_id = binding['lb_pool_id'] kwargs = self._get_pool_kwargs(pool_name, tags, lb_algorithm, description) pool_client.update(lb_pool_id, **kwargs) # Update virtual servers if it exists and there were changes # in session persistence if (listener and new_pool['session_persistence'] != old_pool['session_persistence'] and binding['lb_vs_id']): switch_type = lb_common.session_persistence_type_changed( new_pool, old_pool) self._process_vs_update(context, new_pool, switch_type, listener, lb_pool_id, binding['lb_vs_id'], completor) completor(success=True) except Exception as e: with excutils.save_and_reraise_exception(): completor(success=False) LOG.error('Failed to update pool %(pool)s with ' 'error %(error)s', {'pool': old_pool['id'], 'error': e}) def delete(self, context, pool, completor): lb_id = pool['loadbalancer_id'] pool_client = self.core_plugin.nsxlib.load_balancer.pool binding = nsx_db.get_nsx_lbaas_pool_binding( context.session, lb_id, pool['id']) if binding: vs_id = binding.get('lb_vs_id') lb_pool_id = binding.get('lb_pool_id') if vs_id: # NOTE(salv-orlando): Guard against accidental compat breakages try: listener = pool['listener'] or pool['listeners'][0] except IndexError: # If listeners is an empty list we hit this exception listener = None if listener: self._process_vs_update(context, pool, False, listener, None, vs_id, completor) try: pool_client.delete(lb_pool_id) except nsxlib_exc.ResourceNotFound: pass except nsxlib_exc.ManagerError: completor(success=False) msg = (_('Failed to delete lb pool from nsx: %(pool)s') % {'pool': lb_pool_id}) raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) nsx_db.delete_nsx_lbaas_pool_binding(context.session, lb_id, pool['id']) # Delete the attached health monitor as well if pool.get('healthmonitor'): hm = pool['healthmonitor'] monitor_client = self.core_plugin.nsxlib.load_balancer.monitor hm_binding = nsx_db.get_nsx_lbaas_monitor_binding( context.session, lb_id, pool['id'], hm['id']) if hm_binding: lb_monitor_id = hm_binding['lb_monitor_id'] try: monitor_client.delete(lb_monitor_id) except nsxlib_exc.ResourceNotFound: pass except nsxlib_exc.ManagerError as exc: completor(success=False) msg = _('Failed to delete monitor %(monitor)s from ' 'backend with exception %(exc)s') % { 'monitor': hm['id'], 'exc': exc} raise n_exc.BadRequest(resource='lbaas-pool', msg=msg) nsx_db.delete_nsx_lbaas_monitor_binding( context.session, lb_id, pool['id'], hm['id']) completor(success=True) def delete_cascade(self, context, pool, completor): self.delete(context, pool, completor) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2182543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/octavia/0000755000175000017500000000000000000000000023723 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/octavia/__init__.py0000644000175000017500000000000000000000000026022 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/octavia/constants.py0000644000175000017500000000267000000000000026316 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. OCTAVIA_TO_DRIVER_TOPIC = 'vmware_nsx__lb_listener' OCTAVIA_TO_DRIVER_MIGRATION_TOPIC = 'vmware_nsx__lb_listener_migration' DRIVER_TO_OCTAVIA_TOPIC = 'vmware_nsx__driver_listener' DRIVER_TO_OCTAVIA_MIGRATION_TOPIC = 'vmware_nsx__driver_listener_migration' LOADBALANCER = 'loadbalancer' LISTENER = 'listener' POOL = 'pool' HEALTHMONITOR = 'healthmonitor' MEMBER = 'member' L7POLICY = 'l7policy' L7RULE = 'l7rule' LOADBALANCERS = 'loadbalancers' LISTENERS = 'listeners' POOLS = 'pools' HEALTHMONITORS = 'healthmonitors' MEMBERS = 'members' L7POLICIES = 'l7policies' L7RULES = 'l7rules' ONLINE = 'ONLINE' OFFLINE = 'OFFLINE' ERROR = 'ERROR' ACTIVE = 'ACTIVE' DELETED = 'DELETED' ERROR = 'ERROR' OPERATING_STATUS = 'operating_status' PROVISIONING_STATUS = 'provisioning_status' DEVICE_OWNER_OCTAVIA = 'Octavia' DEVICE_ID_PREFIX = 'lb-' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/octavia/octavia_driver.py0000644000175000017500000006042600000000000027306 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import socket import time from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher import pecan from stevedore import driver as stevedore_driver from octavia.api.drivers import utils as oct_utils from octavia.db import api as db_apis from octavia.db import repositories from octavia_lib.api.drivers import driver_lib from octavia_lib.api.drivers import exceptions from octavia_lib.api.drivers import provider_base as driver_base from vmware_nsx.services.lbaas.octavia import constants as d_const LOG = logging.getLogger(__name__) cfg.CONF.import_group('oslo_messaging', 'octavia.common.config') TRANSPORT = None RPC_SERVER = None def get_transport(): global TRANSPORT if not TRANSPORT: TRANSPORT = messaging.get_rpc_transport(cfg.CONF) return TRANSPORT def get_rpc_server(target, endpoints, access_policy): global RPC_SERVER if not RPC_SERVER: RPC_SERVER = messaging.get_rpc_server( TRANSPORT, target, endpoints, executor='threading', access_policy=access_policy) return RPC_SERVER # List of keys per object type that will not be sent to the listener unsupported_keys = {'Loadbalancer': ['vip_qos_policy_id'], 'Listener': ['sni_container_refs', 'insert_headers', 'timeout_client_data', 'timeout_member_connect', 'timeout_member_data', 'timeout_tcp_inspect'], 'HealthMonitor': ['max_retries_down'], 'Member': ['monitor_address', 'monitor_port']} class NSXOctaviaDriver(driver_base.ProviderDriver): @log_helpers.log_method_call def __init__(self): super(NSXOctaviaDriver, self).__init__() self._init_rpc_messaging() self._init_cert_manager() self.repositories = repositories.Repositories() @log_helpers.log_method_call def _init_rpc_messaging(self): topic = d_const.OCTAVIA_TO_DRIVER_TOPIC transport = get_transport() target = messaging.Target(topic=topic, exchange="common", namespace='control', fanout=False, version='1.0') self.client = messaging.RPCClient(transport, target) @log_helpers.log_method_call def _init_cert_manager(self): self.cert_manager = stevedore_driver.DriverManager( namespace='octavia.cert_manager', name=cfg.CONF.certificates.cert_manager, invoke_on_load=True).driver def get_obj_project_id(self, obj_type, obj_dict): if obj_dict.get('project_id'): return obj_dict['project_id'] if obj_dict.get('tenant_id'): return obj_dict['tenant_id'] # look for the project id of the attached objects project_id = None if obj_dict.get('loadbalancer_id'): db_lb = self.repositories.load_balancer.get( db_apis.get_session(), id=obj_dict['loadbalancer_id']) if db_lb: project_id = db_lb.project_id if not project_id and obj_dict.get('pool_id'): db_pool = self.repositories.pool.get( db_apis.get_session(), id=obj_dict['pool_id']) if db_pool: project_id = db_pool.load_balancer.project_id if not project_id and obj_dict.get('listener_id'): db_list = self.repositories.listener.get( db_apis.get_session(), id=obj_dict['listener_id']) if db_list: project_id = db_list.load_balancer.project_id if not project_id and obj_dict.get('l7policy_id'): db_policy = self.repositories.l7policy.get( db_apis.get_session(), id=obj_dict['l7policy_id']) if db_policy: if db_policy.listener: db_lb = db_policy.listener.load_balancer elif db_policy.redirect_pool: db_lb = db_policy.redirect_pool.load_balancer if db_lb: project_id = db_lb.project_id if not project_id: LOG.warning("Could not find the tenant id for %(type)s " "%(obj)s", {'type': obj_type, 'obj': obj_dict}) return project_id def _get_load_balancer_dict(self, loadbalancer_id): if not loadbalancer_id: return db_lb = self.repositories.load_balancer.get( db_apis.get_session(), id=loadbalancer_id) if not db_lb: return lb_dict = {'name': db_lb.name, 'id': loadbalancer_id} if db_lb.vip: lb_dict['vip_port_id'] = db_lb.vip.port_id lb_dict['vip_address'] = db_lb.vip.ip_address lb_dict['vip_port_id'] = db_lb.vip.port_id lb_dict['vip_network_id'] = db_lb.vip.network_id lb_dict['vip_subnet_id'] = db_lb.vip.subnet_id return lb_dict def _get_listener_in_pool_dict(self, pool_dict, is_update): if 'listener' not in pool_dict: if pool_dict.get('listener_id'): db_listener = self.repositories.listener.get( db_apis.get_session(), id=pool_dict['listener_id']) listener_obj = oct_utils.db_listener_to_provider_listener( db_listener) listener_dict = listener_obj.to_dict( recurse=False, render_unsets=True) listener_dict['id'] = listener_dict['listener_id'] listener_dict['l7_policies'] = listener_dict['l7policies'] # Add the loadbalancer to the listener dict if pool_dict.get('loadbalancer_id'): # Generate a loadbalancer object listener_dict['loadbalancer'] = ( self._get_load_balancer_dict( pool_dict['loadbalancer_id'])) pool_dict['listener'] = listener_dict if 'listeners' not in pool_dict: # multiple listeners is not really supported yet pool_dict['listeners'] = [listener_dict] # Do not add listener in update situation, as we want to use # the original listener of this pool elif not is_update: pool_dict['listener'] = None if 'listeners' not in pool_dict: pool_dict['listeners'] = [] def _get_pool_dict(self, pool_id, is_update, parent_project_id=None): if not pool_id: return {} db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id) if not db_pool: return {} pool_obj = oct_utils.db_pool_to_provider_pool(db_pool) pool_dict = pool_obj.to_dict(recurse=True, render_unsets=True) pool_dict['id'] = pool_id # Get the load balancer object if pool_dict.get('loadbalancer_id'): # Generate a loadbalancer object pool_dict['loadbalancer'] = self._get_load_balancer_dict( pool_dict['loadbalancer_id']) if 'listener' not in pool_dict: self._get_listener_in_pool_dict(pool_dict, is_update) # make sure this pool has a project id if not pool_dict.get('project_id'): project_id = self.get_obj_project_id('Pool', pool_dict) if project_id is None: project_id = parent_project_id pool_dict['tenant_id'] = pool_dict['project_id'] = project_id return pool_dict def _get_hm_dict(self, hm_id, is_update): if not hm_id: return {} db_hm = self.repositories.health_monitor.get( db_apis.get_session(), id=hm_id) if not db_hm: return {} hm_obj = oct_utils.db_HM_to_provider_HM(db_hm) hm_dict = hm_obj.to_dict(recurse=True, render_unsets=True) hm_dict['id'] = hm_id # Get the pol object if hm_dict.get('pool_id'): hm_dict['pool'] = self._get_pool_dict( hm_dict['pool_id'], is_update) return hm_dict def update_policy_dict(self, policy_dict, policy_obj, is_update=False): if policy_dict.get('listener_id'): db_list = self.repositories.listener.get( db_apis.get_session(), id=policy_dict['listener_id']) list_obj = oct_utils.db_listener_to_provider_listener(db_list) list_dict = list_obj.to_dict(recurse=True, render_unsets=True) list_dict['id'] = policy_dict['listener_id'] policy_dict['listener'] = list_dict if policy_obj.rules: policy_dict['rules'] = [] for rule in policy_obj.rules: if isinstance(rule, dict): rule_dict = rule else: rule_dict = rule.to_dict(recurse=False, render_unsets=True) rule_dict['id'] = rule_dict['l7rule_id'] policy_dict['rules'].append(rule_dict) elif not is_update: policy_dict['rules'] = [] def _remove_unsupported_keys(self, obj_type, obj_dict): for key in unsupported_keys.get(obj_type, []): if key in obj_dict: if obj_dict.get(key): LOG.warning("Ignoring %(key)s:%(val)s in %(type)s as the " "NSX plugin does not currently support it", {'key': key, 'val': obj_dict[key], 'type': obj_type}) del obj_dict[key] def obj_to_dict(self, obj, is_update=False, project_id=None): obj_type = obj.__class__.__name__ # create a dictionary out of the object render_unsets = False if is_update else True obj_dict = obj.to_dict(recurse=True, render_unsets=render_unsets) # Update the dictionary to match what the nsx driver expects if not project_id: project_id = self.get_obj_project_id(obj_type, obj_dict) obj_dict['tenant_id'] = obj_dict['project_id'] = project_id if 'id' not in obj_dict: obj_dict['id'] = obj_dict.get('%s_id' % obj_type.lower()) if not obj_dict.get('name') and not is_update: obj_dict['name'] = "" self._remove_unsupported_keys(obj_type, obj_dict) if obj_type == 'LoadBalancer': # clean listeners and pools for update case: if 'listeners' in obj_dict: if is_update and not obj_dict['listeners']: del obj_dict['listeners'] else: if obj_dict['listeners'] is None: obj_dict['listeners'] = [] for listener in obj_dict['listeners']: listener['id'] = listener['listener_id'] for policy in listener.get('l7policies', []): policy['id'] = policy['l7policy_id'] for rule in policy.get('rules', []): rule['id'] = rule['l7rule_id'] if 'pools' in obj_dict: if is_update and not obj_dict['pools']: del obj_dict['pools'] else: if obj_dict['pools'] is None: obj_dict['pools'] = [] for pool in obj_dict['pools']: pool['id'] = pool['pool_id'] for member in pool.get('members', []): member['id'] = member['member_id'] if pool.get('healthmonitor'): pool['healthmonitor'] = self._get_hm_dict( pool['healthmonitor']['healthmonitor_id'], is_update) pool['tenant_id'] = project_id elif obj_type == 'Listener': if 'l7policies' in obj_dict: obj_dict['l7_policies'] = obj_dict['l7policies'] if obj_dict.get('loadbalancer_id'): # Generate a loadbalancer object obj_dict['loadbalancer'] = self._get_load_balancer_dict( obj_dict['loadbalancer_id']) if obj_dict.get('default_pool_id'): # Generate the default pool object obj_dict['default_pool'] = self._get_pool_dict( obj_dict['default_pool_id'], is_update, project_id) # TODO(asarfaty): add default_tls_container_id elif obj_type == 'Pool': if 'listener' not in obj_dict: self._get_listener_in_pool_dict(obj_dict, is_update) if obj_dict.get('healthmonitor'): obj_dict['healthmonitor']['id'] = obj_dict[ 'healthmonitor']['healthmonitor_id'] elif obj_type == 'Member': # Get the pool object if obj_dict.get('pool_id'): obj_dict['pool'] = self._get_pool_dict( obj_dict['pool_id'], is_update) obj_dict['loadbalancer'] = None if 'loadbalancer' in obj_dict['pool']: obj_dict['loadbalancer'] = obj_dict['pool']['loadbalancer'] if not obj_dict.get('subnet_id'): # Use the parent vip_subnet_id instead obj_dict['subnet_id'] = obj_dict['loadbalancer'][ 'vip_subnet_id'] elif not is_update: # Do not set pool & LB if in update situation, as we want to # use the original data of this member obj_dict['pool'] = None obj_dict['loadbalancer'] = None elif obj_type == 'HealthMonitor': # Get the pool object if obj_dict.get('pool_id'): obj_dict['pool'] = self._get_pool_dict( obj_dict['pool_id'], is_update) elif obj_type == 'L7Policy': self.update_policy_dict(obj_dict, obj, is_update=is_update) elif obj_type == 'L7Rule': # Get the L7 policy object if obj_dict.get('l7policy_id'): db_policy = self.repositories.l7policy.get( db_apis.get_session(), id=obj_dict['l7policy_id']) policy_obj = oct_utils.db_l7policy_to_provider_l7policy( db_policy) policy_dict = policy_obj.to_dict( recurse=True, render_unsets=True) policy_dict['id'] = obj_dict['l7policy_id'] self.update_policy_dict( policy_dict, policy_obj, is_update=is_update) obj_dict['policy'] = policy_dict LOG.debug("Translated %(type)s to dictionary: %(obj)s", {'type': obj_type, 'obj': obj_dict}) return obj_dict # Load Balancer @log_helpers.log_method_call def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary): raise exceptions.NotImplementedError() @log_helpers.log_method_call def loadbalancer_create(self, loadbalancer): kw = {'loadbalancer': self.obj_to_dict(loadbalancer)} self.client.cast({}, 'loadbalancer_create', **kw) @log_helpers.log_method_call def loadbalancer_delete(self, loadbalancer, cascade=False): kw = {'loadbalancer': self.obj_to_dict(loadbalancer), 'cascade': cascade} self.client.cast({}, 'loadbalancer_delete', **kw) @log_helpers.log_method_call def loadbalancer_failover(self, loadbalancer_id): LOG.error('Loadbalancer failover is handled by platform') raise exceptions.NotImplementedError() @log_helpers.log_method_call def loadbalancer_update(self, old_loadbalancer, new_loadbalancer): old_dict = self.obj_to_dict(old_loadbalancer) new_dict = copy.deepcopy(old_dict) new_dict.update(self.obj_to_dict( new_loadbalancer, is_update=True, project_id=old_dict.get('project_id'))) kw = {'old_loadbalancer': old_dict, 'new_loadbalancer': new_dict} self.client.cast({}, 'loadbalancer_update', **kw) # Listener @log_helpers.log_method_call def listener_create(self, listener): cert = None dict_list = self.obj_to_dict(listener) if dict_list.get('tls_certificate_id'): context = pecan.request.context.get('octavia_context') cert = self.cert_manager.get_cert(context, dict_list['tls_certificate_id']) kw = {'listener': dict_list, 'cert': cert} self.client.cast({}, 'listener_create', **kw) @log_helpers.log_method_call def listener_delete(self, listener): kw = {'listener': self.obj_to_dict(listener)} self.client.cast({}, 'listener_delete', **kw) @log_helpers.log_method_call def listener_update(self, old_listener, new_listener): old_dict = self.obj_to_dict(old_listener) new_dict = copy.deepcopy(old_dict) new_dict.update(self.obj_to_dict( new_listener, is_update=True, project_id=old_dict.get('project_id'))) cert = None if new_dict.get('tls_certificate_id'): context = pecan.request.context.get('octavia_context') cert = self.cert_manager.get_cert(context, new_dict['tls_certificate_id']) kw = {'old_listener': old_dict, 'new_listener': new_dict, 'cert': cert} self.client.cast({}, 'listener_update', **kw) # Pool @log_helpers.log_method_call def pool_create(self, pool): kw = {'pool': self.obj_to_dict(pool)} self.client.cast({}, 'pool_create', **kw) @log_helpers.log_method_call def pool_delete(self, pool): kw = {'pool': self.obj_to_dict(pool)} self.client.cast({}, 'pool_delete', **kw) @log_helpers.log_method_call def pool_update(self, old_pool, new_pool): old_dict = self.obj_to_dict(old_pool) new_dict = copy.deepcopy(old_dict) new_pool_dict = self.obj_to_dict( new_pool, is_update=True, project_id=old_dict.get('project_id')) new_dict.update(new_pool_dict) kw = {'old_pool': old_dict, 'new_pool': new_dict} self.client.cast({}, 'pool_update', **kw) # Member @log_helpers.log_method_call def member_create(self, member): kw = {'member': self.obj_to_dict(member)} self.client.cast({}, 'member_create', **kw) @log_helpers.log_method_call def member_delete(self, member): kw = {'member': self.obj_to_dict(member)} self.client.cast({}, 'member_delete', **kw) @log_helpers.log_method_call def member_update(self, old_member, new_member): old_dict = self.obj_to_dict(old_member) new_dict = copy.deepcopy(old_dict) new_dict.update(self.obj_to_dict( new_member, is_update=True, project_id=old_dict.get('project_id'))) kw = {'old_member': old_dict, 'new_member': new_dict} self.client.cast({}, 'member_update', **kw) @log_helpers.log_method_call def member_batch_update(self, members): raise NotImplementedError() # Health Monitor @log_helpers.log_method_call def health_monitor_create(self, healthmonitor): kw = {'healthmonitor': self.obj_to_dict(healthmonitor)} self.client.cast({}, 'healthmonitor_create', **kw) @log_helpers.log_method_call def health_monitor_delete(self, healthmonitor): kw = {'healthmonitor': self.obj_to_dict(healthmonitor)} self.client.cast({}, 'healthmonitor_delete', **kw) @log_helpers.log_method_call def health_monitor_update(self, old_healthmonitor, new_healthmonitor): old_dict = self.obj_to_dict(old_healthmonitor) new_dict = copy.deepcopy(old_dict) new_dict.update(self.obj_to_dict( new_healthmonitor, is_update=True, project_id=old_dict.get('project_id'))) kw = {'old_healthmonitor': old_dict, 'new_healthmonitor': new_dict} self.client.cast({}, 'healthmonitor_update', **kw) # L7 Policy @log_helpers.log_method_call def l7policy_create(self, l7policy): kw = {'l7policy': self.obj_to_dict(l7policy)} self.client.cast({}, 'l7policy_create', **kw) @log_helpers.log_method_call def l7policy_delete(self, l7policy): kw = {'l7policy': self.obj_to_dict(l7policy)} self.client.cast({}, 'l7policy_delete', **kw) @log_helpers.log_method_call def l7policy_update(self, old_l7policy, new_l7policy): old_dict = self.obj_to_dict(old_l7policy) new_dict = copy.deepcopy(old_dict) new_dict.update(self.obj_to_dict( new_l7policy, is_update=True, project_id=old_dict.get('project_id'))) kw = {'old_l7policy': old_dict, 'new_l7policy': new_dict} self.client.cast({}, 'l7policy_update', **kw) # L7 Rule @log_helpers.log_method_call def l7rule_create(self, l7rule): kw = {'l7rule': self.obj_to_dict(l7rule)} self.client.cast({}, 'l7rule_create', **kw) @log_helpers.log_method_call def l7rule_delete(self, l7rule): kw = {'l7rule': self.obj_to_dict(l7rule)} self.client.cast({}, 'l7rule_delete', **kw) @log_helpers.log_method_call def l7rule_update(self, old_l7rule, new_l7rule): old_dict = self.obj_to_dict(old_l7rule) new_dict = copy.deepcopy(old_dict) new_dict.update(self.obj_to_dict( new_l7rule, is_update=True, project_id=old_dict.get('project_id'))) kw = {'old_l7rule': old_dict, 'new_l7rule': new_dict} self.client.cast({}, 'l7rule_update', **kw) # Flavor @log_helpers.log_method_call def get_supported_flavor_metadata(self): raise exceptions.NotImplementedError() @log_helpers.log_method_call def validate_flavor(self, flavor_metadata): raise exceptions.NotImplementedError() class NSXOctaviaDriverEndpoint(driver_lib.DriverLibrary): target = messaging.Target(namespace="control", version='1.0') @log_helpers.log_method_call def update_loadbalancer_status(self, ctxt, status): # refresh the driver lib session self.db_session = db_apis.get_session() try: return super(NSXOctaviaDriverEndpoint, self).update_loadbalancer_status(status) except exceptions.UpdateStatusError as e: LOG.error("Failed to update Octavia loadbalancer status. " "Status %s, Error %s", status, e.fault_string) @log_helpers.log_method_call def update_listener_statistics(self, ctxt, statistics): # refresh the driver lib session self.db_session = db_apis.get_session() try: return super(NSXOctaviaDriverEndpoint, self).update_listener_statistics(statistics) except exceptions.UpdateStatisticsError as e: LOG.error("Failed to update Octavia listener statistics. " "Stats %s, Error %s", statistics, e.fault_string) @log_helpers.log_method_call def vmware_nsx_provider_agent(exit_event): # Initialize RPC listener topic = d_const.DRIVER_TO_OCTAVIA_TOPIC server = socket.gethostname() target = messaging.Target(topic=topic, server=server, exchange="common", fanout=False) endpoints = [NSXOctaviaDriverEndpoint()] access_policy = dispatcher.DefaultRPCAccessPolicy get_transport() octavia_server = get_rpc_server(target, endpoints, access_policy) octavia_server.start() LOG.info('VMware NSX Octavia provider agent has started.') while not exit_event.is_set(): time.sleep(1) LOG.info('VMware NSX Octavia provider agent is exiting.') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/octavia/octavia_listener.py0000644000175000017500000007013700000000000027640 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket import time import eventlet from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as n_consts from neutron_lib import context as neutron_context from neutron_lib import exceptions as n_exc from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from vmware_nsx.services.lbaas.octavia import constants LOG = logging.getLogger(__name__) class NSXOctaviaListener(object): @log_helpers.log_method_call def __init__(self, loadbalancer=None, listener=None, pool=None, member=None, healthmonitor=None, l7policy=None, l7rule=None): self._init_rpc_messaging() self._init_rpc_listener(healthmonitor, l7policy, l7rule, listener, loadbalancer, member, pool) def _init_rpc_messaging(self): if cfg.CONF.api_replay_mode: topic = constants.DRIVER_TO_OCTAVIA_MIGRATION_TOPIC else: topic = constants.DRIVER_TO_OCTAVIA_TOPIC transport = messaging.get_rpc_transport(cfg.CONF) target = messaging.Target(topic=topic, exchange="common", namespace='control', fanout=False, version='1.0') self.client = messaging.RPCClient(transport, target) def _init_rpc_listener(self, healthmonitor, l7policy, l7rule, listener, loadbalancer, member, pool): # Initialize RPC listener if cfg.CONF.api_replay_mode: topic = constants.OCTAVIA_TO_DRIVER_MIGRATION_TOPIC else: topic = constants.OCTAVIA_TO_DRIVER_TOPIC server = socket.gethostname() transport = messaging.get_rpc_transport(cfg.CONF) target = messaging.Target(topic=topic, server=server, exchange="common", fanout=False) self.endpoints = [NSXOctaviaListenerEndpoint( client=self.client, loadbalancer=loadbalancer, listener=listener, pool=pool, member=member, healthmonitor=healthmonitor, l7policy=l7policy, l7rule=l7rule)] access_policy = dispatcher.DefaultRPCAccessPolicy self.octavia_server = messaging.get_rpc_server( transport, target, self.endpoints, executor='eventlet', access_policy=access_policy) self.octavia_server.start() class NSXOctaviaListenerEndpoint(object): target = messaging.Target(namespace="control", version='1.0') def __init__(self, client=None, loadbalancer=None, listener=None, pool=None, member=None, healthmonitor=None, l7policy=None, l7rule=None): self.client = client self.loadbalancer = loadbalancer self.listener = listener self.pool = pool self.member = member self.healthmonitor = healthmonitor self.l7policy = l7policy self.l7rule = l7rule self._subscribe_router_delete_callback() def _subscribe_router_delete_callback(self): # Check if there is any LB attachment for the NSX router. # This callback is subscribed here to prevent router/GW/interface # deletion if it still has LB service attached to it. #Note(asarfaty): Those callbacks are used by Octavia as well even # though they are bound only here registry.subscribe(self._check_lb_service_on_router, resources.ROUTER, events.BEFORE_DELETE) registry.subscribe(self._check_lb_service_on_router, resources.ROUTER_GATEWAY, events.BEFORE_DELETE) registry.subscribe(self._check_lb_service_on_router_interface, resources.ROUTER_INTERFACE, events.BEFORE_DELETE) def _unsubscribe_router_delete_callback(self): registry.unsubscribe(self._check_lb_service_on_router, resources.ROUTER, events.BEFORE_DELETE) registry.unsubscribe(self._check_lb_service_on_router, resources.ROUTER_GATEWAY, events.BEFORE_DELETE) registry.unsubscribe(self._check_lb_service_on_router_interface, resources.ROUTER_INTERFACE, events.BEFORE_DELETE) def _get_core_plugin(self, context, project_id=None): core_plugin = self.loadbalancer.core_plugin if core_plugin.is_tvd_plugin(): # get the right plugin for this project # (if project_id is None, the default one will be returned) core_plugin = core_plugin._get_plugin_from_project( context, project_id) return core_plugin def _get_default_core_plugin(self, context): return self._get_core_plugin(context, project_id=None) def _get_lb_ports(self, context, subnet_ids): dev_owner_v2 = n_consts.DEVICE_OWNER_LOADBALANCERV2 dev_owner_oct = constants.DEVICE_OWNER_OCTAVIA filters = {'device_owner': [dev_owner_v2, dev_owner_oct], 'fixed_ips': {'subnet_id': subnet_ids}} core_plugin = self._get_default_core_plugin(context) return core_plugin.get_ports(context, filters=filters) def _check_lb_service_on_router(self, resource, event, trigger, payload=None): """Prevent removing a router GW or deleting a router used by LB""" router_id = payload.resource_id core_plugin = self.loadbalancer.core_plugin if core_plugin.is_tvd_plugin(): # TVD support # get the default core plugin so we can get the router project default_core_plugin = self._get_default_core_plugin( payload.context) router = default_core_plugin.get_router( payload.context, router_id) # get the real core plugin core_plugin = self._get_core_plugin( payload.context, router['project_id']) if core_plugin.service_router_has_loadbalancers( payload.context, router_id): msg = _('Cannot delete a %s as it still has lb service ' 'attachment') % resource raise n_exc.BadRequest(resource='lbaas-lb', msg=msg) def _check_lb_service_on_router_interface( self, resource, event, trigger, payload=None): # Prevent removing the interface of an LB subnet from a router router_id = payload.resource_id subnet_id = payload.metadata.get('subnet_id') if not router_id or not subnet_id: return # get LB ports and check if any loadbalancer is using this subnet if self._get_lb_ports(payload.context.elevated(), [subnet_id]): msg = _('Cannot delete a router interface as it used by a ' 'loadbalancer') raise n_exc.BadRequest(resource='lbaas-lb', msg=msg) def get_completor_func(self, obj_type, obj, delete=False, cascade=False): # return a method that will be called on success/failure completion def completor_func(success=True): LOG.debug("Octavia transaction completed. delete %s, status %s", delete, 'success' if success else 'failure') # calculate the provisioning and operating statuses main_prov_status = constants.ACTIVE parent_prov_status = constants.ACTIVE if not success: main_prov_status = constants.ERROR parent_prov_status = constants.ERROR elif delete: main_prov_status = constants.DELETED op_status = constants.ONLINE if success else constants.ERROR # add the status of the created/deleted/updated object status_dict = { obj_type: [{ 'id': obj['id'], constants.PROVISIONING_STATUS: main_prov_status, constants.OPERATING_STATUS: op_status}]} # Get all its parents, and update their statuses as well loadbalancer_id = None listener_id = None pool_id = None policy_id = None if obj_type != constants.LOADBALANCERS: loadbalancer_id = None if obj.get('loadbalancer_id'): loadbalancer_id = obj.get('loadbalancer_id') if obj.get('pool'): pool_id = obj['pool']['id'] listener_id = obj['pool'].get('listener_id') if not loadbalancer_id: loadbalancer_id = obj['pool'].get('loadbalancer_id') elif obj.get('pool_id'): pool_id = obj['pool_id'] if obj.get('listener'): listener_id = obj['listener']['id'] if not loadbalancer_id: loadbalancer_id = obj['listener'].get( 'loadbalancer_id') elif obj.get('listener_id'): listener_id = obj['listener_id'] if obj.get('policy') and obj['policy'].get('listener'): policy_id = obj['policy']['id'] if not listener_id: listener_id = obj['policy']['listener']['id'] if not loadbalancer_id: loadbalancer_id = obj['policy']['listener'].get( 'loadbalancer_id') if (loadbalancer_id and not status_dict.get(constants.LOADBALANCERS)): status_dict[constants.LOADBALANCERS] = [{ 'id': loadbalancer_id, constants.PROVISIONING_STATUS: parent_prov_status, constants.OPERATING_STATUS: op_status}] if (listener_id and not status_dict.get(constants.LISTENERS)): status_dict[constants.LISTENERS] = [{ 'id': listener_id, constants.PROVISIONING_STATUS: parent_prov_status, constants.OPERATING_STATUS: op_status}] if (pool_id and not status_dict.get(constants.POOLS)): status_dict[constants.POOLS] = [{ 'id': pool_id, constants.PROVISIONING_STATUS: parent_prov_status, constants.OPERATING_STATUS: op_status}] if (policy_id and not status_dict.get(constants.L7POLICIES)): status_dict[constants.L7POLICIES] = [{ 'id': policy_id, constants.PROVISIONING_STATUS: parent_prov_status, constants.OPERATING_STATUS: op_status}] elif delete and cascade: # add deleted status to all other objects status_dict[constants.LISTENERS] = [] status_dict[constants.POOLS] = [] status_dict[constants.MEMBERS] = [] status_dict[constants.L7POLICIES] = [] status_dict[constants.L7RULES] = [] status_dict[constants.HEALTHMONITORS] = [] for pool in obj.get('pools', []): for member in pool.get('members', []): status_dict[constants.MEMBERS].append( {'id': member['id'], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: op_status}) if pool.get('healthmonitor'): status_dict[constants.HEALTHMONITORS].append( {'id': pool['healthmonitor']['id'], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: op_status}) status_dict[constants.POOLS].append( {'id': pool['id'], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: op_status}) for listener in obj.get('listeners', []): status_dict[constants.LISTENERS].append( {'id': listener['id'], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: op_status}) for policy in listener.get('l7policies', []): status_dict[constants.L7POLICIES].append( {'id': policy['id'], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: op_status}) for rule in policy.get('rules', []): status_dict[constants.L7RULES].append( {'id': rule['id'], constants.PROVISIONING_STATUS: constants.DELETED, constants.OPERATING_STATUS: op_status}) LOG.debug("Octavia transaction completed with statuses %s", status_dict) kw = {'status': status_dict} self.client.cast({}, 'update_loadbalancer_status', **kw) return completor_func def update_listener_statistics(self, statistics): kw = {'statistics': statistics} self.client.cast({}, 'update_listener_statistics', **kw) @log_helpers.log_method_call def loadbalancer_create(self, ctxt, loadbalancer): ctx = neutron_context.Context(None, loadbalancer['project_id']) completor = self.get_completor_func(constants.LOADBALANCERS, loadbalancer) try: self.loadbalancer.create(ctx, loadbalancer, completor) except Exception as e: LOG.error('NSX driver loadbalancer_create failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def loadbalancer_delete_cascade(self, ctxt, loadbalancer): ctx = neutron_context.Context(None, loadbalancer['project_id']) def dummy_completor(success=True): pass # Go over the LB tree and delete one by one using the cascade # api implemented for each resource for listener in loadbalancer.get('listeners', []): for policy in listener.get('l7policies', []): for rule in policy.get('rules', []): self.l7rule.delete_cascade(ctx, rule, dummy_completor) self.l7policy.delete_cascade(ctx, policy, dummy_completor) self.listener.delete_cascade(ctx, listener, dummy_completor) for pool in loadbalancer.get('pools', []): for member in pool.get('members', []): self.member.delete_cascade(ctx, member, dummy_completor) if pool.get('healthmonitor'): self.healthmonitor.delete_cascade( ctx, pool['healthmonitor'], dummy_completor) self.pool.delete_cascade(ctx, pool, dummy_completor) # Delete the loadbalancer itself with the completor that marks all # as deleted completor = self.get_completor_func(constants.LOADBALANCERS, loadbalancer, delete=True) try: self.loadbalancer.delete_cascade( ctx, loadbalancer, self.get_completor_func( constants.LOADBALANCERS, loadbalancer, delete=True, cascade=True)) except Exception as e: LOG.error('NSX driver loadbalancer_delete_cascade failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def loadbalancer_delete(self, ctxt, loadbalancer, cascade=False): if cascade: return self.loadbalancer_delete_cascade(ctxt, loadbalancer) ctx = neutron_context.Context(None, loadbalancer['project_id']) completor = self.get_completor_func(constants.LOADBALANCERS, loadbalancer, delete=True) try: self.loadbalancer.delete(ctx, loadbalancer, completor) except Exception as e: LOG.error('NSX driver loadbalancer_delete failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def loadbalancer_update(self, ctxt, old_loadbalancer, new_loadbalancer): ctx = neutron_context.Context(None, old_loadbalancer['project_id']) completor = self.get_completor_func(constants.LOADBALANCERS, new_loadbalancer) try: self.loadbalancer.update(ctx, old_loadbalancer, new_loadbalancer, completor) except Exception as e: LOG.error('NSX driver loadbalancer_update failed %s', e) completor(success=False) return False return True # Listener @log_helpers.log_method_call def listener_create(self, ctxt, listener, cert): ctx = neutron_context.Context(None, listener['project_id']) completor = self.get_completor_func(constants.LISTENERS, listener) try: self.listener.create(ctx, listener, completor, certificate=cert) except Exception as e: LOG.error('NSX driver listener_create failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def listener_delete(self, ctxt, listener): ctx = neutron_context.Context(None, listener['project_id']) completor = self.get_completor_func(constants.LISTENERS, listener, delete=True) try: self.listener.delete(ctx, listener, completor) except Exception as e: LOG.error('NSX driver listener_delete failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def listener_update(self, ctxt, old_listener, new_listener, cert): ctx = neutron_context.Context(None, old_listener['project_id']) completor = self.get_completor_func(constants.LISTENERS, new_listener) try: self.listener.update(ctx, old_listener, new_listener, completor, certificate=cert) except Exception as e: LOG.error('NSX driver listener_update failed %s', e) completor(success=False) return False return True # Pool @log_helpers.log_method_call def pool_create(self, ctxt, pool): ctx = neutron_context.Context(None, pool['project_id']) completor = self.get_completor_func(constants.POOLS, pool) try: self.pool.create(ctx, pool, completor) except Exception as e: LOG.error('NSX driver pool_create failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def pool_delete(self, ctxt, pool): ctx = neutron_context.Context(None, pool['project_id']) completor = self.get_completor_func(constants.POOLS, pool, delete=True) try: self.pool.delete(ctx, pool, completor) except Exception as e: LOG.error('NSX driver pool_delete failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def pool_update(self, ctxt, old_pool, new_pool): ctx = neutron_context.Context(None, old_pool['project_id']) completor = self.get_completor_func(constants.POOLS, new_pool) try: self.pool.update(ctx, old_pool, new_pool, completor) except Exception as e: LOG.error('NSX driver pool_update failed %s', e) completor(success=False) return False return True # Member @log_helpers.log_method_call def member_create(self, ctxt, member): ctx = neutron_context.Context(None, member['project_id']) completor = self.get_completor_func(constants.MEMBERS, member) try: self.member.create(ctx, member, completor) except Exception as e: LOG.error('NSX driver member_create failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def member_delete(self, ctxt, member): ctx = neutron_context.Context(None, member['project_id']) completor = self.get_completor_func(constants.MEMBERS, member, delete=True) try: self.member.delete(ctx, member, completor) except Exception as e: LOG.error('NSX driver member_delete failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def member_update(self, ctxt, old_member, new_member): ctx = neutron_context.Context(None, old_member['project_id']) completor = self.get_completor_func(constants.MEMBERS, new_member) try: self.member.update(ctx, old_member, new_member, completor) except Exception as e: LOG.error('NSX driver member_update failed %s', e) completor(success=False) return False return True # Health Monitor @log_helpers.log_method_call def healthmonitor_create(self, ctxt, healthmonitor): ctx = neutron_context.Context(None, healthmonitor['project_id']) completor = self.get_completor_func(constants.HEALTHMONITORS, healthmonitor) try: self.healthmonitor.create(ctx, healthmonitor, completor) except Exception as e: LOG.error('NSX driver healthmonitor_create failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def healthmonitor_delete(self, ctxt, healthmonitor): ctx = neutron_context.Context(None, healthmonitor['project_id']) completor = self.get_completor_func(constants.HEALTHMONITORS, healthmonitor, delete=True) try: self.healthmonitor.delete(ctx, healthmonitor, completor) except Exception as e: LOG.error('NSX driver healthmonitor_delete failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def healthmonitor_update(self, ctxt, old_healthmonitor, new_healthmonitor): ctx = neutron_context.Context(None, old_healthmonitor['project_id']) completor = self.get_completor_func(constants.HEALTHMONITORS, new_healthmonitor) try: self.healthmonitor.update(ctx, old_healthmonitor, new_healthmonitor, completor) except Exception as e: LOG.error('NSX driver healthmonitor_update failed %s', e) completor(success=False) return False return True # L7 Policy @log_helpers.log_method_call def l7policy_create(self, ctxt, l7policy): ctx = neutron_context.Context(None, l7policy['project_id']) completor = self.get_completor_func(constants.L7POLICIES, l7policy) try: self.l7policy.create(ctx, l7policy, completor) except Exception as e: LOG.error('NSX driver l7policy_create failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def l7policy_delete(self, ctxt, l7policy): ctx = neutron_context.Context(None, l7policy['project_id']) completor = self.get_completor_func(constants.L7POLICIES, l7policy, delete=True) try: self.l7policy.delete(ctx, l7policy, completor) except Exception as e: LOG.error('NSX driver l7policy_delete failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def l7policy_update(self, ctxt, old_l7policy, new_l7policy): ctx = neutron_context.Context(None, old_l7policy['project_id']) completor = self.get_completor_func(constants.L7POLICIES, new_l7policy) try: self.l7policy.update(ctx, old_l7policy, new_l7policy, completor) except Exception as e: LOG.error('NSX driver l7policy_update failed %s', e) completor(success=False) return False return True # L7 Rule @log_helpers.log_method_call def l7rule_create(self, ctxt, l7rule): ctx = neutron_context.Context(None, l7rule['project_id']) completor = self.get_completor_func(constants.L7RULES, l7rule) try: self.l7rule.create(ctx, l7rule, completor) except Exception as e: LOG.error('NSX driver l7rule_create failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def l7rule_delete(self, ctxt, l7rule): ctx = neutron_context.Context(None, l7rule['project_id']) completor = self.get_completor_func(constants.L7RULES, l7rule, delete=True) try: self.l7rule.delete(ctx, l7rule, completor) except Exception as e: LOG.error('NSX driver l7rule_delete failed %s', e) completor(success=False) return False return True @log_helpers.log_method_call def l7rule_update(self, ctxt, old_l7rule, new_l7rule): ctx = neutron_context.Context(None, old_l7rule['project_id']) completor = self.get_completor_func(constants.L7RULES, new_l7rule) try: self.l7rule.update(ctx, old_l7rule, new_l7rule, completor) except Exception as e: LOG.error('NSX driver l7rule_update failed %s', e) completor(success=False) return False return True class NSXOctaviaStatisticsCollector(object): def __init__(self, core_plugin, listener_stats_getter): self.core_plugin = core_plugin self.listener_stats_getter = listener_stats_getter if cfg.CONF.octavia_stats_interval: eventlet.spawn_n(self.thread_runner, cfg.CONF.octavia_stats_interval) def thread_runner(self, interval): while True: time.sleep(interval) self.collect() def collect(self): if not self.core_plugin.octavia_listener: return endpoint = self.core_plugin.octavia_listener.endpoints[0] context = neutron_context.get_admin_context() listeners_stats = self.listener_stats_getter( context, self.core_plugin) if not listeners_stats: # Avoid sending empty stats return stats = {'listeners': listeners_stats} endpoint.update_listener_statistics(stats) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/lbaas/octavia/tvd_wrapper.py0000644000175000017500000000601300000000000026632 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants from neutron_lib.plugins import directory from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx import utils as tvd_utils LOG = logging.getLogger(__name__) class OctaviaTVDWrapper(object): _core_plugin = None def __init__(self, v_manager, t_manager): self.managers = {} if v_manager: self.managers[projectpluginmap.NsxPlugins.NSX_V] = v_manager if t_manager: self.managers[projectpluginmap.NsxPlugins.NSX_T] = t_manager def _get_plugin(self, plugin_type): return directory.get_plugin(plugin_type) @property def core_plugin(self): if not self._core_plugin: self._core_plugin = ( self._get_plugin(constants.CORE)) return self._core_plugin def _get_manager_by_project(self, context, project_id): plugin_type = tvd_utils.get_tvd_plugin_type_for_project( project_id, context=context) if not self.managers.get(plugin_type): LOG.error("Project %(project)s with plugin %(plugin)s has no " "support for Octavia", {'project': project_id, 'plugin': plugin_type}) raise n_exc.ServiceUnavailable() return self.managers[plugin_type] def create(self, context, obj, completor, **args): manager = self._get_manager_by_project(context, obj['project_id']) return manager.create(context, obj, completor, **args) def update(self, context, old_obj, new_obj, completor, **args): manager = self._get_manager_by_project(context, old_obj['project_id']) return manager.update(context, old_obj, new_obj, completor, **args) def delete(self, context, obj, completor, **args): manager = self._get_manager_by_project(context, obj['project_id']) return manager.delete(context, obj, completor, **args) def stats_getter(context, core_plugin, ignore_list=None): """Call stats of both plugins""" for plugin_type in [projectpluginmap.NsxPlugins.NSX_V, projectpluginmap.NsxPlugins.NSX_T]: plugin = core_plugin.get_plugin_by_type(plugin_type) if plugin: stats_getter_func = plugin._get_octavia_stats_getter() return stats_getter_func(context, plugin) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2182543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/0000755000175000017500000000000000000000000022015 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/__init__.py0000644000175000017500000000000000000000000024114 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2182543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/common/0000755000175000017500000000000000000000000023305 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/common/__init__.py0000644000175000017500000000000000000000000025404 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/common/utils.py0000644000175000017500000000662700000000000025032 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.exceptions import qos as qos_exc from neutron_lib.objects import registry as obj_reg from neutron_lib.services.qos import constants as qos_consts def validate_policy_accessable(context, policy_id): policy_obj = obj_reg.load_class('QosPolicy').get_object( context, id=policy_id) if not policy_obj: # This means that rbac decided the policy cannot be used with this # context raise qos_exc.QosPolicyNotFound(policy_id=policy_id) def update_network_policy_binding(context, net_id, new_policy_id): # detach the old policy (if exists) from the network old_policy = obj_reg.load_class('QosPolicy').get_network_policy( context, net_id) if old_policy: if old_policy.id == new_policy_id: return old_policy.detach_network(net_id) # attach the new policy (if exists) to the network if new_policy_id is not None: new_policy = obj_reg.load_class('QosPolicy').get_object( context, id=new_policy_id) if new_policy: new_policy.attach_network(net_id) def update_port_policy_binding(context, port_id, new_policy_id): # detach the old policy (if exists) from the port old_policy = obj_reg.load_class('QosPolicy').get_port_policy( context, port_id) if old_policy: if old_policy.id == new_policy_id: return old_policy.detach_port(port_id) # attach the new policy (if exists) to the port if new_policy_id is not None: new_policy = obj_reg.load_class('QosPolicy').get_object( context, id=new_policy_id) if new_policy: new_policy.attach_port(port_id) def get_port_policy_id(context, port_id): policy = obj_reg.load_class('QosPolicy').get_port_policy( context, port_id) if policy: return policy.id def get_network_policy_id(context, net_id): policy = obj_reg.load_class('QosPolicy').get_network_policy( context, net_id) if policy: return policy.id def set_qos_policy_on_new_net(context, net_data, created_net): """Update the network with the assigned or default QoS policy Update the network-qos binding table, and the new network structure """ qos_policy_id = net_data.get(qos_consts.QOS_POLICY_ID) if not qos_policy_id: # try and get the default one qos_obj = obj_reg.load_class('QosPolicyDefault').get_object( context, project_id=created_net['project_id']) if qos_obj: qos_policy_id = qos_obj.qos_policy_id if qos_policy_id: # attach the policy to the network in the neutron DB update_network_policy_binding( context, created_net['id'], qos_policy_id) created_net[qos_consts.QOS_POLICY_ID] = qos_policy_id return qos_policy_id ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2182543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_tvd/0000755000175000017500000000000000000000000023502 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_tvd/__init__.py0000644000175000017500000000000000000000000025601 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_tvd/plugin.py0000644000175000017500000000164100000000000025354 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.qos import qos_plugin from vmware_nsx.plugins.nsx import utils as tvd_utils @tvd_utils.filter_plugins class QoSPlugin(qos_plugin.QoSPlugin): """NSX-TV plugin for QoS. This plugin adds separation between T/V instances """ methods_to_separate = ['get_policies'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2182543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_v/0000755000175000017500000000000000000000000023152 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_v/__init__.py0000644000175000017500000000000000000000000025251 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_v/driver.py0000644000175000017500000000573100000000000025025 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib.db import constants as db_constants from neutron_lib.services.qos import base from neutron_lib.services.qos import constants as qos_consts from oslo_log import log as logging from vmware_nsx.extensions import projectpluginmap LOG = logging.getLogger(__name__) DRIVER = None SUPPORTED_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': [constants.EGRESS_DIRECTION, constants.INGRESS_DIRECTION]} }, qos_consts.RULE_TYPE_DSCP_MARKING: { qos_consts.DSCP_MARK: {'type:values': constants.VALID_DSCP_MARKS} } } class NSXvQosDriver(base.DriverBase): @staticmethod def create(core_plugin): return NSXvQosDriver( core_plugin, name='NSXvQosDriver', vif_types=None, vnic_types=None, supported_rules=SUPPORTED_RULES, requires_rpc_notifications=False) def __init__(self, core_plugin, **kwargs): super(NSXvQosDriver, self).__init__(**kwargs) self.core_plugin = core_plugin if self.core_plugin.is_tvd_plugin(): # get the plugin that match this driver self.core_plugin = self.core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) self.requires_rpc_notifications = False def is_vif_type_compatible(self, vif_type): return True def is_vnic_compatible(self, vnic_type): return True def create_policy(self, context, policy): pass def update_policy(self, context, policy): # get all the bound networks of this policy networks = policy.get_bound_networks() for net_id in networks: # update the new bw limitations for this network self.core_plugin._update_qos_on_backend_network( context, net_id, policy.id) def delete_policy(self, context, policy): pass def register(core_plugin): """Register the NSX-V QoS driver.""" global DRIVER if not DRIVER: DRIVER = NSXvQosDriver.create(core_plugin) LOG.debug('NSXvQosDriver QoS driver registered') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_v/plugin.py0000644000175000017500000000264300000000000025027 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.qos import qos_plugin from neutron_lib.api.definitions import qos as qos_apidef from oslo_config import cfg from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc LOG = logging.getLogger(__name__) class NsxVQosPlugin(qos_plugin.QoSPlugin): """Service plugin for VMware NSX-v to implement Neutron's Qos API.""" supported_extension_aliases = [qos_apidef.ALIAS] def __init__(self): LOG.info("Loading VMware NSX-V Qos Service Plugin") super(NsxVQosPlugin, self).__init__() if not cfg.CONF.nsxv.use_dvs_features: error = _("Cannot use the NSX-V QoS plugin without " "enabling the dvs features") raise nsx_exc.NsxPluginException(err_msg=error) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_v/utils.py0000644000175000017500000000733300000000000024672 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants as n_consts from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_log import log as logging LOG = logging.getLogger(__name__) class NsxVQosBWLimits(object): # Data structure to hold the NSX-V representation # of the neutron QoS Bandwidth rule bandwidthEnabled = False averageBandwidth = 0 peakBandwidth = 0 burstSize = 0 class NsxVQosRule(object): def __init__(self, context=None, qos_policy_id=None): super(NsxVQosRule, self).__init__() self._qos_plugin = None # Data structure to hold the NSX-V representation # of the neutron QoS Bandwidth rule for both directions self.egress = NsxVQosBWLimits() self.ingress = NsxVQosBWLimits() # And data for the DSCP marking rule self.dscpMarkEnabled = False self.dscpMarkValue = 0 if qos_policy_id is not None: self._init_from_policy_id(context, qos_policy_id) def _get_qos_plugin(self): if not self._qos_plugin: self._qos_plugin = directory.get_plugin(plugin_const.QOS) return self._qos_plugin # init the nsx_v qos data (outShapingPolicy) from a neutron qos policy def _init_from_policy_id(self, context, qos_policy_id): self.bandwidthEnabled = False self.dscpMarkEnabled = False # read the neutron policy restrictions if qos_policy_id is not None: plugin = self._get_qos_plugin() policy_obj = plugin.get_policy(context, qos_policy_id) if 'rules' in policy_obj and len(policy_obj['rules']) > 0: for rule_obj in policy_obj['rules']: if (rule_obj['type'] == qos_consts.RULE_TYPE_BANDWIDTH_LIMIT): # BW limit rule for one of the directions if rule_obj['direction'] == n_consts.EGRESS_DIRECTION: dir_obj = self.egress else: dir_obj = self.ingress dir_obj.bandwidthEnabled = True # averageBandwidth: kbps (neutron) -> bps (nsxv) dir_obj.averageBandwidth = rule_obj['max_kbps'] * 1024 # peakBandwidth: a Multiplying on the average BW # because the neutron qos configuration supports # only 1 value dir_obj.peakBandwidth = int(round( dir_obj.averageBandwidth * cfg.CONF.NSX.qos_peak_bw_multiplier)) # burstSize: kbps (neutron) -> Bytes (nsxv) dir_obj.burstSize = rule_obj['max_burst_kbps'] * 128 if rule_obj['type'] == qos_consts.RULE_TYPE_DSCP_MARKING: # DSCP marking rule self.dscpMarkEnabled = True self.dscpMarkValue = rule_obj['dscp_mark'] return self ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_v3/0000755000175000017500000000000000000000000023235 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_v3/__init__.py0000644000175000017500000000000000000000000025334 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_v3/driver.py0000644000175000017500000000572100000000000025107 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_lib.db import constants as db_constants from neutron_lib.services.qos import base from neutron_lib.services.qos import constants as qos_consts from oslo_log import log as logging LOG = logging.getLogger(__name__) DRIVER = None SUPPORTED_RULES = { qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: { qos_consts.MAX_KBPS: { 'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]}, qos_consts.MAX_BURST: { 'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]}, qos_consts.DIRECTION: { 'type:values': [constants.EGRESS_DIRECTION, constants.INGRESS_DIRECTION]} }, qos_consts.RULE_TYPE_DSCP_MARKING: { qos_consts.DSCP_MARK: {'type:values': constants.VALID_DSCP_MARKS} } } class NSXv3QosDriver(base.DriverBase): @staticmethod def create(handler): return NSXv3QosDriver( name='NSXv3QosDriver', vif_types=None, vnic_types=None, supported_rules=SUPPORTED_RULES, requires_rpc_notifications=False, handler=handler) def __init__(self, handler=None, **kwargs): self.handler = handler super(NSXv3QosDriver, self).__init__(**kwargs) def is_vif_type_compatible(self, vif_type): return True def is_vnic_compatible(self, vnic_type): return True def create_policy(self, context, policy): self.handler.create_policy(context, policy) def update_policy(self, context, policy): # Update the rules if (hasattr(policy, "rules")): self.handler.update_policy_rules( context, policy.id, policy["rules"]) # Update the entire policy self.handler.update_policy(context, policy.id, policy) def delete_policy(self, context, policy): self.handler.delete_policy(context, policy.id) def update_policy_precommit(self, context, policy): """Validate rules values, before creation""" if (hasattr(policy, "rules")): for rule in policy["rules"]: self.handler.validate_policy_rule(context, policy.id, rule) def register(handler): """Register the NSX-V3 QoS driver.""" global DRIVER if not DRIVER: DRIVER = NSXv3QosDriver.create(handler) LOG.debug('NSXv3QosDriver QoS driver registered') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_v3/message_queue.py0000644000175000017500000000207600000000000026444 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.services.qos.notification_drivers import message_queue class NsxV3QosNotificationDriver( message_queue.RpcQosServiceNotificationDriver): """NSXv3 message queue service notification driver for QoS. Overriding the create_policy method in order to add a notification message in this case too. """ # The message queue is no longer needed in Pike. # Keeping this class for a while for existing configurations. pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_v3/pol_utils.py0000644000175000017500000001450200000000000025623 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron_lib import constants as n_consts from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from vmware_nsx._i18n import _ from vmware_nsx.common import utils LOG = logging.getLogger(__name__) MAX_KBPS_MIN_VALUE = 1024 # The max limit is calculated so that the value sent to the backed will # be smaller than 2**31 MAX_BURST_MAX_VALUE = int((2 ** 31 - 1) / 128) class PolicyQosNotificationsHandler(object): def __init__(self): super(PolicyQosNotificationsHandler, self).__init__() self._core_plugin = None @property def core_plugin(self): if not self._core_plugin: self._core_plugin = directory.get_plugin() return self._core_plugin @property def _nsxpolicy(self): return self.core_plugin.nsxpolicy def _get_tags(self, context, policy): policy_dict = {'id': policy.id, 'tenant_id': policy.tenant_id} return self._nsxpolicy.build_v3_tags_payload( policy_dict, resource_type='os-neutron-qos-id', project_name=context.tenant_name) def create_or_update_policy(self, context, policy): policy_id = policy.id tags = self._get_tags(context, policy) pol_name = utils.get_name_and_uuid(policy.name or 'policy', policy.id) shapers = [] dscp = None if (hasattr(policy, "rules")): for rule in policy["rules"]: if rule.rule_type == qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: # the NSX direction is opposite to the neutron one is_ingress = rule.direction == n_consts.EGRESS_DIRECTION shapers.append(self._get_shaper_from_rule( rule, is_ingress=is_ingress)) elif rule.rule_type == qos_consts.RULE_TYPE_DSCP_MARKING: dscp = self._get_dscp_from_rule(rule) else: LOG.warning("The NSX-Policy plugin does not support QoS " "rule of type %s", rule.rule_type) self._nsxpolicy.qos_profile.create_or_overwrite( pol_name, profile_id=policy_id, description=policy.get('description'), dscp=dscp, shaper_configurations=shapers, tags=tags) def create_policy(self, context, policy): return self.create_or_update_policy(context, policy) def delete_policy(self, context, policy_id): self._nsxpolicy.qos_profile.delete(policy_id) def update_policy(self, context, policy_id, policy): return self.create_or_update_policy(context, policy) def _validate_bw_values(self, bw_rule): """Validate that the values are allowed by the NSX backend""" # Validate the max bandwidth value minimum value # (max value is above what neutron allows so no need to check it) if (bw_rule.max_kbps < MAX_KBPS_MIN_VALUE): msg = (_("Invalid input for max_kbps. " "The minimal legal value is %s") % MAX_KBPS_MIN_VALUE) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) # validate the burst size value max value # (max value is 0, and neutron already validates this) if (bw_rule.max_burst_kbps > MAX_BURST_MAX_VALUE): msg = (_("Invalid input for burst_size. " "The maximal legal value is %s") % MAX_BURST_MAX_VALUE) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _get_shaper_from_rule(self, bw_rule, is_ingress=True): """Translate the neutron bandwidth_limit_rule values into the NSX-lib Policy QoS shaper """ kwargs = {} if is_ingress: shaper = self._nsxpolicy.qos_profile.build_ingress_rate_limiter else: shaper = self._nsxpolicy.qos_profile.build_egress_rate_limiter if bw_rule: kwargs['enabled'] = True # translate kbps -> bytes kwargs['burst_size'] = int(bw_rule.max_burst_kbps) * 128 # value in kbps -> Mb/s kwargs['average_bandwidth'] = int( round(float(bw_rule.max_kbps) / 1024)) # peakBandwidth: a Multiplying on the average BW because the # neutron qos configuration supports only 1 value kwargs['peak_bandwidth'] = int( round(kwargs['average_bandwidth'] * cfg.CONF.NSX.qos_peak_bw_multiplier)) else: kwargs['enabled'] = False return shaper(**kwargs) def _get_dscp_from_rule(self, dscp_rule): """Translate the neutron DSCP marking rule values into NSX-lib Policy QoS Dscp object """ trusted = False if dscp_rule else True priority = dscp_rule.dscp_mark if dscp_rule else 0 return self._nsxpolicy.qos_profile.build_dscp( trusted=trusted, priority=priority) def update_policy_rules(self, context, policy_id, rules): """This handler will do all the updates through the create_or_update""" pass def validate_policy_rule(self, context, policy_id, rule): """Raise an exception if the rule values are not supported""" if rule.rule_type == qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: self._validate_bw_values(rule) elif rule.rule_type == qos_consts.RULE_TYPE_DSCP_MARKING: pass else: msg = (_("The NSX-Policy plugin does not support QoS rule of type " "%s") % rule.rule_type) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/qos/nsx_v3/utils.py0000644000175000017500000002027600000000000024756 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron_lib.api import validators from neutron_lib import constants as n_consts from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap LOG = logging.getLogger(__name__) MAX_KBPS_MIN_VALUE = 1024 # The max limit is calculated so that the value sent to the backed will # be smaller than 2**31 MAX_BURST_MAX_VALUE = int((2 ** 31 - 1) / 128) class QosNotificationsHandler(object): def __init__(self): super(QosNotificationsHandler, self).__init__() self._core_plugin = None @property def core_plugin(self): if not self._core_plugin: self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): # get the plugin that match this driver self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) return self._core_plugin @property def _nsxlib_qos(self): return self.core_plugin.nsxlib.qos_switching_profile def _get_tags(self, context, policy): policy_dict = {'id': policy.id, 'tenant_id': policy.tenant_id} return self._nsxlib_qos.build_v3_tags_payload( policy_dict, resource_type='os-neutron-qos-id', project_name=context.tenant_name) def create_policy(self, context, policy): policy_id = policy.id tags = self._get_tags(context, policy) result = self._nsxlib_qos.create( tags=tags, name=policy.name, description=policy.description) if not result or not validators.is_attr_set(result.get('id')): msg = _("Unable to create QoS switching profile on the backend") raise nsx_exc.NsxPluginException(err_msg=msg) profile_id = result['id'] # Add the mapping entry of the policy_id <-> profile_id nsx_db.add_qos_policy_profile_mapping(context.session, policy_id, profile_id) def delete_policy(self, context, policy_id): profile_id = nsx_db.get_switch_profile_by_qos_policy( context.session, policy_id) # delete the profile id from the backend and the DB self._nsxlib_qos.delete(profile_id) nsx_db.delete_qos_policy_profile_mapping( context.session, policy_id) def update_policy(self, context, policy_id, policy): profile_id = nsx_db.get_switch_profile_by_qos_policy( context.session, policy_id) tags = self._get_tags(context, policy) self._nsxlib_qos.update( profile_id, tags=tags, name=policy.name, description=policy.description) def _validate_bw_values(self, bw_rule): """Validate that the values are allowed by the NSX backend""" # Validate the max bandwidth value minimum value # (max value is above what neutron allows so no need to check it) if (bw_rule.max_kbps < MAX_KBPS_MIN_VALUE): msg = (_("Invalid input for max_kbps. " "The minimal legal value is %s") % MAX_KBPS_MIN_VALUE) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) # validate the burst size value max value # (max value is 0, and neutron already validates this) if (bw_rule.max_burst_kbps > MAX_BURST_MAX_VALUE): msg = (_("Invalid input for burst_size. " "The maximal legal value is %s") % MAX_BURST_MAX_VALUE) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) def _get_bw_values_from_rule(self, bw_rule): """Translate the neutron bandwidth_limit_rule values, into the values expected by the NSX-v3 QoS switch profile, and validate that those are legal """ if bw_rule: shaping_enabled = True # translate kbps -> bytes burst_size = int(bw_rule.max_burst_kbps) * 128 # translate kbps -> Mbps average_bandwidth = int(round(float(bw_rule.max_kbps) / 1024)) # peakBandwidth: a Multiplying on the average BW # because the neutron qos configuration supports # only 1 value peak_bandwidth = int(round(average_bandwidth * cfg.CONF.NSX.qos_peak_bw_multiplier)) else: shaping_enabled = False burst_size = None peak_bandwidth = None average_bandwidth = None return shaping_enabled, burst_size, peak_bandwidth, average_bandwidth def _get_dscp_values_from_rule(self, dscp_rule): """Translate the neutron DSCP marking rule values, into the values expected by the NSX-v3 QoS switch profile """ if dscp_rule: qos_marking = 'untrusted' dscp = dscp_rule.dscp_mark else: qos_marking = 'trusted' dscp = 0 return qos_marking, dscp def update_policy_rules(self, context, policy_id, rules): """Update the QoS switch profile with the BW limitations and DSCP marking configuration """ profile_id = nsx_db.get_switch_profile_by_qos_policy( context.session, policy_id) ingress_bw_rule = None egress_bw_rule = None dscp_rule = None for rule in rules: if rule.rule_type == qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: if rule.direction == n_consts.EGRESS_DIRECTION: egress_bw_rule = rule else: ingress_bw_rule = rule elif rule.rule_type == qos_consts.RULE_TYPE_DSCP_MARKING: dscp_rule = rule else: LOG.warning("The NSX-V3 plugin does not support QoS rule of " "type %s", rule.rule_type) # the NSX direction is opposite to the neutron direction (ingress_bw_enabled, ingress_burst_size, ingress_peak_bw, ingress_average_bw) = self._get_bw_values_from_rule(egress_bw_rule) (egress_bw_enabled, egress_burst_size, egress_peak_bw, egress_average_bw) = self._get_bw_values_from_rule(ingress_bw_rule) qos_marking, dscp = self._get_dscp_values_from_rule(dscp_rule) self._nsxlib_qos.set_profile_shaping( profile_id, ingress_bw_enabled=ingress_bw_enabled, ingress_burst_size=ingress_burst_size, ingress_peak_bandwidth=ingress_peak_bw, ingress_average_bandwidth=ingress_average_bw, egress_bw_enabled=egress_bw_enabled, egress_burst_size=egress_burst_size, egress_peak_bandwidth=egress_peak_bw, egress_average_bandwidth=egress_average_bw, qos_marking=qos_marking, dscp=dscp) def validate_policy_rule(self, context, policy_id, rule): """Raise an exception if the rule values are not supported""" if rule.rule_type == qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: self._validate_bw_values(rule) elif rule.rule_type == qos_consts.RULE_TYPE_DSCP_MARKING: pass else: msg = (_("The NSX-V3 plugin does not support QoS rule of type " "%s") % rule.rule_type) LOG.error(msg) raise n_exc.InvalidInput(error_message=msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/trunk/0000755000175000017500000000000000000000000022356 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/trunk/__init__.py0000644000175000017500000000000000000000000024455 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/trunk/nsx_p/0000755000175000017500000000000000000000000023505 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/trunk/nsx_p/__init__.py0000644000175000017500000000000000000000000025604 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/trunk/nsx_p/driver.py0000644000175000017500000002375500000000000025366 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron.services.trunk.drivers import base from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib.services.trunk import constants as trunk_consts from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils as nsx_utils from vmware_nsx.extensions import projectpluginmap from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3.policy import constants as p_constants from vmware_nsxlib.v3 import utils as nsxlib_utils LOG = logging.getLogger(__name__) SUPPORTED_INTERFACES = ( portbindings.VIF_TYPE_OVS, ) SUPPORTED_SEGMENTATION_TYPES = ( trunk_consts.SEGMENTATION_TYPE_VLAN, ) DRIVER_NAME = 'vmware_nsxp_trunk' TRUNK_ID_TAG_NAME = 'os-neutron-trunk-id' class NsxpTrunkHandler(object): def __init__(self, plugin_driver): self.plugin_driver = plugin_driver def _get_port_tags_and_network(self, context, port_id): _, tags, net = self._get_port_compute_tags_and_net(context, port_id) return tags, net def _get_port_compute_tags_and_net(self, context, port_id): port = self.plugin_driver.get_port(context, port_id) segment_id = self.plugin_driver._get_network_nsx_segment_id( context, port['network_id']) lport = self.plugin_driver.nsxpolicy.segment_port.get( segment_id, port_id) is_compute = port.get('device_owner', '').startswith( constants.DEVICE_OWNER_COMPUTE_PREFIX) return is_compute, segment_id, lport.get('tags', []) def _update_tags(self, port_id, tags, tags_update, is_delete=False): if is_delete: tags = [tag for tag in tags if tag not in tags_update] else: for tag in tags: for tag_u in tags_update: if tag_u['scope'] == tag['scope']: tag['tag'] = tag_u['tag'] tags_update.remove(tag_u) break tags.extend( [tag for tag in tags_update if tag not in tags]) if len(tags) > nsxlib_utils.MAX_TAGS: LOG.warning("Cannot add external tags to port %s: " "too many tags", port_id) return tags def _set_subports(self, context, parent_port_id, subports): for subport in subports: # Update port with parent port for backend. # Set properties for VLAN trunking if subport.segmentation_type == nsx_utils.NsxV3NetworkTypes.VLAN: seg_id = subport.segmentation_id else: msg = (_("Cannot create a subport %s with no segmentation" " id") % subport.port_id) LOG.error(msg) raise nsx_exc.NsxPluginException(err_msg=msg) tags_update = [{'scope': TRUNK_ID_TAG_NAME, 'tag': subport.trunk_id}] segment_id, tags = self._get_port_tags_and_network( context, subport.port_id) tags = self._update_tags( subport.port_id, tags, tags_update, is_delete=False) # Update logical port in the backend to set/unset parent port try: self.plugin_driver.nsxpolicy.segment_port.attach( segment_id, subport.port_id, p_constants.ATTACHMENT_CHILD, subport.port_id, context_id=parent_port_id, traffic_tag=seg_id, tags=tags) except nsxlib_exc.ManagerError as e: with excutils.save_and_reraise_exception(): LOG.error("Unable to update subport for attachment " "type. Exception is %s", e) def _unset_subports(self, context, subports): for subport in subports: # Update port and remove parent port attachment in the backend # Unset the parent port properties from child port tags_update = [{'scope': TRUNK_ID_TAG_NAME, 'tag': subport.trunk_id}] is_compute, segment_id, tags = self._get_port_compute_tags_and_net( context, subport.port_id) tags = self._update_tags( subport.port_id, tags, tags_update, is_delete=True) # Update logical port in the backend to set/unset parent port vif_id = None if is_compute: vif_id = subport.port_id try: self.plugin_driver.nsxpolicy.segment_port.detach( segment_id, subport.port_id, vif_id=vif_id, tags=tags) except nsxlib_exc.ManagerError as e: with excutils.save_and_reraise_exception(): LOG.error("Unable to update subport for attachment " "type. Exception is %s", e) def trunk_created(self, context, trunk): tags_update = [{'scope': TRUNK_ID_TAG_NAME, 'tag': trunk.id}] segment_id, tags = self._get_port_tags_and_network( context, trunk.port_id) tags = self._update_tags( trunk.port_id, tags, tags_update, is_delete=False) try: self.plugin_driver.nsxpolicy.segment_port.attach( segment_id, trunk.port_id, vif_id=trunk.port_id, attachment_type=p_constants.ATTACHMENT_PARENT, tags=tags) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error("Parent port attachment for trunk %(trunk)s failed " "with error %(e)s", {'trunk': trunk.id, 'e': e}) if trunk.sub_ports: self.subports_added(context, trunk, trunk.sub_ports) def trunk_deleted(self, context, trunk): tags_update = [{'scope': TRUNK_ID_TAG_NAME, 'tag': trunk.id}] is_compute, segment_id, tags = self._get_port_compute_tags_and_net( context, trunk.port_id) tags = self._update_tags( trunk.port_id, tags, tags_update, is_delete=True) try: vif_id = None if is_compute: vif_id = trunk.port_id self.plugin_driver.nsxpolicy.segment_port.detach( segment_id, trunk.port_id, vif_id=vif_id, tags=tags) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error("Parent port detachment for trunk %(trunk)s failed " "with error %(e)s", {'trunk': trunk.id, 'e': e}) self.subports_deleted(context, trunk, trunk.sub_ports) def subports_added(self, context, trunk, subports): try: self._set_subports(context, trunk.port_id, subports) trunk.update(status=trunk_consts.TRUNK_ACTIVE_STATUS) except (nsxlib_exc.ManagerError, nsxlib_exc.ResourceNotFound): trunk.update(status=trunk_consts.TRUNK_ERROR_STATUS) def subports_deleted(self, context, trunk, subports): try: self._unset_subports(context, subports) except (nsxlib_exc.ManagerError, nsxlib_exc.ResourceNotFound): trunk.update(status=trunk_consts.TRUNK_ERROR_STATUS) def trunk_event(self, resource, event, trunk_plugin, payload): if event == events.AFTER_CREATE: self.trunk_created(payload.context, payload.current_trunk) elif event == events.AFTER_DELETE: self.trunk_deleted(payload.context, payload.original_trunk) def subport_event(self, resource, event, trunk_plugin, payload): if event == events.AFTER_CREATE: self.subports_added( payload.context, payload.original_trunk, payload.subports) elif event == events.AFTER_DELETE: self.subports_deleted( payload.context, payload.original_trunk, payload.subports) class NsxpTrunkDriver(base.DriverBase): """Driver to implement neutron's trunk extensions.""" @property def is_loaded(self): try: plugin_type = self.plugin_driver.plugin_type() return plugin_type == projectpluginmap.NsxPlugins.NSX_P except cfg.NoSuchOptError: return False @classmethod def create(cls, plugin_driver): cls.plugin_driver = plugin_driver return cls(DRIVER_NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES, agent_type=None, can_trunk_bound_port=True) @registry.receives(resources.TRUNK_PLUGIN, [events.AFTER_INIT]) def register(self, resource, event, trigger, payload=None): super(NsxpTrunkDriver, self).register( resource, event, trigger, payload=payload) self._handler = NsxpTrunkHandler(self.plugin_driver) for event in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self._handler.trunk_event, resources.TRUNK, event) registry.subscribe(self._handler.subport_event, resources.SUBPORTS, event) LOG.debug("VMware NSXP trunk driver initialized.") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/trunk/nsx_v3/0000755000175000017500000000000000000000000023576 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/trunk/nsx_v3/__init__.py0000644000175000017500000000000000000000000025675 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/trunk/nsx_v3/driver.py0000644000175000017500000002213400000000000025445 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron.services.trunk.drivers import base from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib.services.trunk import constants as trunk_consts from vmware_nsx.common import nsx_constants as nsx_consts from vmware_nsx.common import utils as nsx_utils from vmware_nsx.db import db as nsx_db from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) SUPPORTED_INTERFACES = ( portbindings.VIF_TYPE_OVS, ) SUPPORTED_SEGMENTATION_TYPES = ( trunk_consts.SEGMENTATION_TYPE_VLAN, ) class NsxV3TrunkHandler(object): """Class to handle trunk events.""" def __init__(self, plugin_driver): self.plugin_driver = plugin_driver @property def _nsxlib(self): return self.plugin_driver.nsxlib def _build_switching_profile_ids(self, profiles): switching_profile = self._nsxlib.switching_profile return switching_profile.build_switch_profile_ids( switching_profile.client, *profiles) def _update_port_at_backend(self, context, parent_port_id, subport): # Retrieve the child port details child_port = self.plugin_driver.get_port(context, subport.port_id) # Retrieve the logical port ID based on the child port's neutron ID nsx_child_port_id = nsx_db.get_nsx_switch_and_port_id( session=context.session, neutron_id=subport.port_id)[1] # Retrieve child logical port from the backend try: nsx_child_port = self._nsxlib.logical_port.get( nsx_child_port_id) except nsxlib_exc.ResourceNotFound: with excutils.save_and_reraise_exception(): LOG.error("Child port %s not found on the backend. " "Setting trunk status to ERROR.", nsx_child_port_id) # Build address bindings and switch profiles otherwise backend will # clear that information during port update address_bindings = self.plugin_driver._build_address_bindings( child_port) switching_profile_ids = self._build_switching_profile_ids( nsx_child_port.get('switching_profile_ids', [])) seg_id = None tags_update = [] attachment_type = nsx_constants.ATTACHMENT_VIF if parent_port_id: # Set properties for VLAN trunking if subport.segmentation_type == nsx_utils.NsxV3NetworkTypes.VLAN: seg_id = subport.segmentation_id tags_update.append({'scope': 'os-neutron-trunk-id', 'tag': subport.trunk_id}) vif_type = nsx_constants.VIF_TYPE_CHILD else: # Unset the parent port properties from child port seg_id = None vif_type = None tags_update.append({'scope': 'os-neutron-trunk-id', 'tag': None}) # Update logical port in the backend to set/unset parent port try: self._nsxlib.logical_port.update( lport_id=nsx_child_port.get('id'), vif_uuid=subport.port_id, name=nsx_child_port.get('display_name'), admin_state=nsx_child_port.get('admin_state'), address_bindings=address_bindings, switch_profile_ids=switching_profile_ids, attachment_type=attachment_type, parent_vif_id=parent_port_id, vif_type=vif_type, traffic_tag=seg_id, tags_update=tags_update) except nsxlib_exc.ManagerError as e: with excutils.save_and_reraise_exception(): LOG.error("Unable to update subport for attachment " "type. Setting trunk status to ERROR. " "Exception is %s", e) def _set_subports(self, context, parent_port_id, subports): for subport in subports: # Update port with parent port for backend. self._update_port_at_backend(context, parent_port_id, subport) def _unset_subports(self, context, subports): for subport in subports: # Update port and remove parent port attachment in the backend self._update_port_at_backend( context=context, parent_port_id=None, subport=subport) def trunk_created(self, context, trunk): # Retrieve the logical port ID based on the parent port's neutron ID nsx_parent_port_id = nsx_db.get_nsx_switch_and_port_id( session=context.session, neutron_id=trunk.port_id)[1] tags_update = [{'scope': 'os-neutron-trunk-id', 'tag': trunk.id}] self.plugin_driver.nsxlib.logical_port.update( nsx_parent_port_id, vif_uuid=trunk.port_id, vif_type=nsx_constants.VIF_TYPE_PARENT, tags_update=tags_update) try: if trunk.sub_ports: self._set_subports(context, trunk.port_id, trunk.sub_ports) trunk.update(status=trunk_consts.TRUNK_ACTIVE_STATUS) except (nsxlib_exc.ManagerError, nsxlib_exc.ResourceNotFound): trunk.update(status=trunk_consts.TRUNK_ERROR_STATUS) def trunk_deleted(self, context, trunk): # Retrieve the logical port ID based on the parent port's neutron ID nsx_parent_port_id = nsx_db.get_nsx_switch_and_port_id( session=context.session, neutron_id=trunk.port_id)[1] tags_update = [{'scope': 'os-neutron-trunk-id', 'tag': None}] self.plugin_driver.nsxlib.logical_port.update( nsx_parent_port_id, vif_uuid=trunk.port_id, vif_type=None, tags_update=tags_update) self._unset_subports(context, trunk.sub_ports) def subports_added(self, context, trunk, subports): try: self._set_subports(context, trunk.port_id, subports) trunk.update(status=trunk_consts.TRUNK_ACTIVE_STATUS) except (nsxlib_exc.ManagerError, nsxlib_exc.ResourceNotFound): trunk.update(status=trunk_consts.TRUNK_ERROR_STATUS) def subports_deleted(self, context, trunk, subports): try: self._unset_subports(context, subports) except (nsxlib_exc.ManagerError, nsxlib_exc.ResourceNotFound): trunk.update(status=trunk_consts.TRUNK_ERROR_STATUS) def trunk_event(self, resource, event, trunk_plugin, payload): if event == events.AFTER_CREATE: self.trunk_created(payload.context, payload.current_trunk) elif event == events.AFTER_DELETE: self.trunk_deleted(payload.context, payload.original_trunk) def subport_event(self, resource, event, trunk_plugin, payload): if event == events.AFTER_CREATE: self.subports_added( payload.context, payload.original_trunk, payload.subports) elif event == events.AFTER_DELETE: self.subports_deleted( payload.context, payload.original_trunk, payload.subports) class NsxV3TrunkDriver(base.DriverBase): """Driver to implement neutron's trunk extensions.""" @property def is_loaded(self): try: return nsx_consts.VMWARE_NSX_V3_PLUGIN_NAME == cfg.CONF.core_plugin except cfg.NoSuchOptError: return False @classmethod def create(cls, plugin_driver): cls.plugin_driver = plugin_driver return cls(nsx_consts.VMWARE_NSX_V3_PLUGIN_NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES, agent_type=None, can_trunk_bound_port=True) @registry.receives(resources.TRUNK_PLUGIN, [events.AFTER_INIT]) def register(self, resource, event, trigger, payload=None): super(NsxV3TrunkDriver, self).register( resource, event, trigger, payload=payload) self._handler = NsxV3TrunkHandler(self.plugin_driver) for event in (events.AFTER_CREATE, events.AFTER_DELETE): registry.subscribe(self._handler.trunk_event, resources.TRUNK, event) registry.subscribe(self._handler.subport_event, resources.SUBPORTS, event) LOG.debug("VMware NSXv3 trunk driver initialized.") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/0000755000175000017500000000000000000000000022503 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/__init__.py0000644000175000017500000000000000000000000024602 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/common_v3/0000755000175000017500000000000000000000000024403 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/common_v3/__init__.py0000644000175000017500000000000000000000000026502 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/common_v3/ipsec_driver.py0000644000175000017500000001556600000000000027450 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import log as logging from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context as n_context from neutron_lib import exceptions as nexception from neutron_lib.plugins import directory from neutron_vpnaas.services.vpn import service_drivers from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.vpnaas.common_v3 import ipsec_utils LOG = logging.getLogger(__name__) IPSEC = 'ipsec' class RouterWithSNAT(nexception.BadRequest): message = _("Router %(router_id)s has a VPN service and cannot enable " "SNAT") class RouterWithOverlapNoSnat(nexception.BadRequest): message = _("Router %(router_id)s has a subnet overlapping with a VPN " "local subnet, and cannot disable SNAT") class RouterOverlapping(nexception.BadRequest): message = _("Router %(router_id)s interface is overlapping with a VPN " "local subnet and cannot be added") class NSXcommonIPsecVpnDriver(service_drivers.VpnDriver): def __init__(self, service_plugin, validator): self.vpn_plugin = service_plugin self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): # TVD only supports nsx-T, and not nsx-P self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) super(NSXcommonIPsecVpnDriver, self).__init__( service_plugin, validator) registry.subscribe( self._verify_overlap_subnet, resources.ROUTER_INTERFACE, events.BEFORE_CREATE) @property def l3_plugin(self): return self._core_plugin @property def service_type(self): return IPSEC def _get_dpd_profile_name(self, connection): return (connection['name'] or connection['id'])[:240] + '-dpd-profile' def _find_vpn_service_port(self, context, router_id): """Look for the neutron port created for the vpnservice of a router""" filters = {'device_id': ['router-' + router_id], 'device_owner': [ipsec_utils.VPN_PORT_OWNER]} ports = self.l3_plugin.get_ports(context, filters=filters) if ports: return ports[0] def _get_service_local_address(self, context, vpnservice): """Find/Allocate a port on the external network to allocate the ip to be used as the local ip of this service """ router_id = vpnservice['router_id'] # check if this router already have an IP port = self._find_vpn_service_port(context, router_id) if not port: # create a new port, on the external network of the router # Note(asarfaty): using a unique device owner and device id to # make sure tis port will be ignored in certain queries ext_net = vpnservice['router']['gw_port']['network_id'] port_data = { 'port': { 'network_id': ext_net, 'name': 'VPN local address port', 'admin_state_up': True, 'device_id': 'router-' + router_id, 'device_owner': ipsec_utils.VPN_PORT_OWNER, 'fixed_ips': constants.ATTR_NOT_SPECIFIED, 'mac_address': constants.ATTR_NOT_SPECIFIED, 'port_security_enabled': False, 'tenant_id': vpnservice['tenant_id']}} port = self.l3_plugin.base_create_port(context, port_data) # return the port ip(v4) as the local address for fixed_ip in port['fixed_ips']: if (len(port['fixed_ips']) == 1 or netaddr.IPNetwork(fixed_ip['ip_address']).version == 4): return fixed_ip['ip_address'] def _update_status(self, context, vpn_service_id, ipsec_site_conn_id, status, updated_pending_status=True): vpn_status = {'id': vpn_service_id, 'updated_pending_status': updated_pending_status, 'status': status, 'ipsec_site_connections': {}} if ipsec_site_conn_id: ipsec_site_conn = { 'status': status, 'updated_pending_status': updated_pending_status} vpn_status['ipsec_site_connections'] = { ipsec_site_conn_id: ipsec_site_conn} status_list = [vpn_status] self.service_plugin.update_status_by_agent(context, status_list) def _check_subnets_overlap_with_all_conns(self, context, subnets): # find all vpn services with connections filters = {'status': [constants.ACTIVE, constants.DOWN]} connections = self.vpn_plugin.get_ipsec_site_connections( context, filters=filters) # Check if any of the connections overlap with the given subnets for conn in connections: local_cidrs = self.validator._get_local_cidrs(context, conn) if netaddr.IPSet(subnets) & netaddr.IPSet(local_cidrs): return False return True def _verify_overlap_subnet(self, resource, event, trigger, **kwargs): """Upon router interface creation validation overlapping with vpn""" router_db = kwargs.get('router_db') port = kwargs.get('port') if not port or not router_db: LOG.warning("NSX V3 VPNaaS ROUTER_INTERFACE BEFORE_CREATE " "callback didn't get all the relevant information") return if router_db.enable_snat: # checking only no-snat routers return admin_con = n_context.get_admin_context() # Get the (ipv4) subnet of the interface subnet_id = None for fixed_ip in port['fixed_ips']: if netaddr.IPNetwork(fixed_ip['ip_address']).version == 4: subnet_id = fixed_ip.get('subnet_id') break if subnet_id: subnet = self._core_plugin.get_subnet(admin_con, subnet_id) # find all vpn services with connections if not self._check_subnets_overlap_with_all_conns( admin_con, [subnet['cidr']]): raise RouterOverlapping(router_id=kwargs.get('router_id')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/common_v3/ipsec_utils.py0000644000175000017500000000440300000000000027301 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsxlib.v3 import vpn_ipsec VPN_PORT_OWNER = 'vpnservice' ENCRYPTION_ALGORITHM_MAP = { 'aes-128': vpn_ipsec.EncryptionAlgorithmTypes.ENCRYPTION_ALGORITHM_128, 'aes-256': vpn_ipsec.EncryptionAlgorithmTypes.ENCRYPTION_ALGORITHM_256, } AUTH_ALGORITHM_MAP = { 'sha1': vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA1, 'sha256': vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA256, } AUTH_ALGORITHM_MAP_P = { 'sha1': vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA1, 'sha256': vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA256, 'sha384': vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA2_384, 'sha512': vpn_ipsec.DigestAlgorithmTypes.DIGEST_ALGORITHM_SHA2_512, } PFS_MAP = { 'group14': vpn_ipsec.DHGroupTypes.DH_GROUP_14 } PFS_MAP_P = { 'group2': vpn_ipsec.DHGroupTypes.DH_GROUP_2, 'group5': vpn_ipsec.DHGroupTypes.DH_GROUP_5, 'group14': vpn_ipsec.DHGroupTypes.DH_GROUP_14, } IKE_VERSION_MAP = { 'v1': vpn_ipsec.IkeVersionTypes.IKE_VERSION_V1, 'v2': vpn_ipsec.IkeVersionTypes.IKE_VERSION_V2, } ENCAPSULATION_MODE_MAP = { 'tunnel': vpn_ipsec.EncapsulationModeTypes.ENCAPSULATION_MODE_TUNNEL } TRANSFORM_PROTOCOL_MAP = { 'esp': vpn_ipsec.TransformProtocolTypes.TRANSFORM_PROTOCOL_ESP } DPD_ACTION_MAP = { 'hold': vpn_ipsec.DpdProfileActionTypes.DPD_PROFILE_ACTION_HOLD, 'disabled': None } INITIATION_MODE_MAP = { 'bi-directional': (vpn_ipsec.ConnectionInitiationModeTypes. INITIATION_MODE_INITIATOR), 'response-only': (vpn_ipsec.ConnectionInitiationModeTypes. INITIATION_MODE_RESPOND_ONLY) } DEFAULT_LOG_LEVEL = vpn_ipsec.IkeLogLevelTypes.LOG_LEVEL_ERROR ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/common_v3/ipsec_validator.py0000644000175000017500000004327600000000000030141 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg from oslo_log import log as logging from neutron_lib import constants from neutron_vpnaas.db.vpn import vpn_validator from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.vpnaas.common_v3 import ipsec_utils from vmware_nsxlib.v3 import vpn_ipsec LOG = logging.getLogger(__name__) class IPsecCommonValidator(vpn_validator.VpnReferenceValidator): """Validator methods for Vmware NSX-V3 & Policy VPN support""" def __init__(self, service_plugin): super(IPsecCommonValidator, self).__init__() self.vpn_plugin = service_plugin self._core_plugin = self.core_plugin if self._core_plugin.is_tvd_plugin(): # TVD currently supports only NSX-T and not NSX-P self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_T) def _validate_policy_lifetime(self, policy_info, policy_type): """NSX supports only units=seconds""" lifetime = policy_info.get('lifetime') if not lifetime: return if lifetime.get('units') != 'seconds': msg = _("Unsupported policy lifetime %(val)s in %(pol)s policy. " "Only seconds lifetime is supported.") % { 'val': lifetime, 'pol': policy_type} raise nsx_exc.NsxVpnValidationError(details=msg) value = lifetime.get('value') if policy_type == 'IKE': limits = vpn_ipsec.IkeSALifetimeLimits else: limits = vpn_ipsec.IPsecSALifetimeLimits if (value and (value < limits.SA_LIFETIME_MIN or value > limits.SA_LIFETIME_MAX)): msg = _("Unsupported policy lifetime %(value)s in %(pol)s policy. " "Value range is [%(min)s-%(max)s].") % { 'value': value, 'pol': policy_type, 'min': limits.SA_LIFETIME_MIN, 'max': limits.SA_LIFETIME_MAX} raise nsx_exc.NsxVpnValidationError(details=msg) @property def auth_algorithm_map(self): pass @property def pfs_map(self): pass def _validate_policy_auth_algorithm(self, policy_info, policy_type): """NSX supports only SHA1 and SHA256""" auth = policy_info.get('auth_algorithm') if auth and auth not in self.auth_algorithm_map: msg = _("Unsupported auth_algorithm: %(algo)s in %(pol)s policy. " "Please select one of the following supported algorithms: " "%(supported_algos)s") % { 'pol': policy_type, 'algo': auth, 'supported_algos': self.auth_algorithm_map.keys()} raise nsx_exc.NsxVpnValidationError(details=msg) def _validate_policy_encryption_algorithm(self, policy_info, policy_type): encryption = policy_info.get('encryption_algorithm') if (encryption and encryption not in ipsec_utils.ENCRYPTION_ALGORITHM_MAP): msg = _("Unsupported encryption_algorithm: %(algo)s in %(pol)s " "policy. Please select one of the following supported " "algorithms: %(supported_algos)s") % { 'algo': encryption, 'pol': policy_type, 'supported_algos': ipsec_utils.ENCRYPTION_ALGORITHM_MAP.keys()} raise nsx_exc.NsxVpnValidationError(details=msg) def _validate_policy_pfs(self, policy_info, policy_type): pfs = policy_info.get('pfs') if pfs and pfs not in self.pfs_map: msg = _("Unsupported pfs: %(pfs)s in %(pol)s policy. Please " "select one of the following pfs: " "%(supported_pfs)s") % { 'pfs': pfs, 'pol': policy_type, 'supported_pfs': self.pfs_map.keys()} raise nsx_exc.NsxVpnValidationError(details=msg) def _validate_dpd(self, connection): dpd_info = connection.get('dpd') if not dpd_info: return action = dpd_info.get('action') if action not in ipsec_utils.DPD_ACTION_MAP.keys(): msg = _("Unsupported DPD action: %(action)s! Currently only " "%(supported)s is supported.") % { 'action': action, 'supported': ipsec_utils.DPD_ACTION_MAP.keys()} raise nsx_exc.NsxVpnValidationError(details=msg) timeout = dpd_info.get('timeout') if (timeout < vpn_ipsec.DpdProfileTimeoutLimits.DPD_TIMEOUT_MIN or timeout > vpn_ipsec.DpdProfileTimeoutLimits.DPD_TIMEOUT_MAX): msg = _("Unsupported DPD timeout: %(timeout)s. Value range is " "[%(min)s-%(max)s].") % { 'timeout': timeout, 'min': vpn_ipsec.DpdProfileTimeoutLimits.DPD_TIMEOUT_MIN, 'max': vpn_ipsec.DpdProfileTimeoutLimits.DPD_TIMEOUT_MAX} raise nsx_exc.NsxVpnValidationError(details=msg) def _validate_psk(self, connection): if 'psk' in connection and not connection['psk']: msg = _("'psk' cannot be empty or null when authentication " "mode is psk") raise nsx_exc.NsxVpnValidationError(details=msg) def _get_local_cidrs(self, context, ipsec_site_conn): vpnservice_id = ipsec_site_conn.get('vpnservice_id') vpnservice = self.vpn_plugin._get_vpnservice(context, vpnservice_id) if vpnservice['subnet']: local_cidrs = [vpnservice['subnet']['cidr']] else: # local endpoint group local_cidrs = [] self.vpn_plugin.get_endpoint_info(context, ipsec_site_conn) subnets_ids = ipsec_site_conn['local_epg_subnets']['endpoints'] for sub in subnets_ids: subnet = self.l3_plugin.get_subnet(context, sub) local_cidrs.append(subnet['cidr']) return local_cidrs def _get_peer_cidrs(self, context, ipsec_site_conn): if ipsec_site_conn['peer_cidrs']: return ipsec_site_conn['peer_cidrs'] else: # peer endpoint group self.vpn_plugin.get_endpoint_info(context, ipsec_site_conn) return ipsec_site_conn['peer_epg_cidrs']['endpoints'] def _check_policy_rules_overlap(self, context, ipsec_site_conn): """validate no overlapping policy rules The nsx does not support overlapping policy rules cross all tenants, and tier0 routers """ connections = self.vpn_plugin.get_ipsec_site_connections( context.elevated()) if not connections: return local_cidrs = self._get_local_cidrs(context, ipsec_site_conn) peer_cidrs = self._get_peer_cidrs(context, ipsec_site_conn) for conn in connections: # skip this connection and connections in ERROR state if (conn['id'] == ipsec_site_conn.get('id') or conn['status'] == constants.ERROR): continue conn_peer_cidrs = self._get_peer_cidrs(context.elevated(), conn) if netaddr.IPSet(conn_peer_cidrs) & netaddr.IPSet(peer_cidrs): # check if the local cidr also overlaps conn_local_cidr = self._get_local_cidrs( context.elevated(), conn) if netaddr.IPSet(conn_local_cidr) & netaddr.IPSet(local_cidrs): msg = (_("Cannot create a connection with overlapping " "local and peer cidrs (%(local)s and %(peer)s) " "as connection %(id)s") % {'local': local_cidrs, 'peer': peer_cidrs, 'id': conn['id']}) raise nsx_exc.NsxVpnValidationError(details=msg) def _check_unique_addresses(self, context, ipsec_site_conn): """Validate no repeating local & peer addresses (of all tenants) The nsx does not support it cross all tenants, and tier0 routers """ vpnservice_id = ipsec_site_conn.get('vpnservice_id') local_addr = self._get_service_local_address(context, vpnservice_id) peer_address = ipsec_site_conn.get('peer_address') filters = {'peer_address': [peer_address]} connections = self.vpn_plugin.get_ipsec_site_connections( context.elevated(), filters=filters) for conn in connections: # skip this connection and connections in ERROR state if (conn['id'] == ipsec_site_conn.get('id') or conn['status'] == constants.ERROR): continue # this connection has the same peer addr as ours. # check the service local address srv_id = conn.get('vpnservice_id') srv_local = self._get_service_local_address( context.elevated(), srv_id) if srv_local == local_addr: msg = (_("Cannot create another connection with the same " "local address %(local)s and peer address %(peer)s " "as connection %(id)s") % {'local': local_addr, 'peer': peer_address, 'id': conn['id']}) raise nsx_exc.NsxVpnValidationError(details=msg) def _check_advertisment_overlap(self, context, ipsec_site_conn): """Validate there is no overlapping advertisement of networks The plugin advertise all no-snat routers networks + vpn local networks. The NSX does not allow different Tier1 router to advertise the same subnets. """ admin_con = context.elevated() srv_id = ipsec_site_conn.get('vpnservice_id') srv = self.vpn_plugin._get_vpnservice(admin_con, srv_id) this_router = srv['router_id'] local_cidrs = self._get_local_cidrs(context, ipsec_site_conn) # get all subnets of no-snat routers all_routers = self._core_plugin.get_routers(admin_con) nosnat_routers = [rtr for rtr in all_routers if (rtr['id'] != this_router and rtr.get('external_gateway_info') and not rtr['external_gateway_info'].get( 'enable_snat', cfg.CONF.enable_snat_by_default))] for rtr in nosnat_routers: if rtr['id'] == this_router: continue # go over the subnets of this router subnets = self._core_plugin._find_router_subnets_cidrs( admin_con, rtr['id']) if subnets and netaddr.IPSet(subnets) & netaddr.IPSet(local_cidrs): msg = (_("Cannot create connection with overlapping local " "cidrs %(local)s which was already advertised by " "no-snat router %(rtr)s") % {'local': subnets, 'rtr': rtr['id']}) raise nsx_exc.NsxVpnValidationError(details=msg) # add all vpn local subnets connections = self.vpn_plugin.get_ipsec_site_connections(admin_con) for conn in connections: # skip this connection and connections in ERROR state if (conn['id'] == ipsec_site_conn.get('id') or conn['status'] == constants.ERROR): continue # check the service local address conn_srv_id = conn.get('vpnservice_id') conn_srv = self.vpn_plugin._get_vpnservice(admin_con, conn_srv_id) if conn_srv['router_id'] == this_router: continue conn_cidrs = self._get_local_cidrs(context, conn) if netaddr.IPSet(conn_cidrs) & netaddr.IPSet(local_cidrs): msg = (_("Cannot create connection with overlapping local " "cidr %(local)s which was already advertised by " "router %(rtr)s and connection %(conn)s") % { 'local': conn_cidrs, 'rtr': conn_srv['router_id'], 'conn': conn['id']}) raise nsx_exc.NsxVpnValidationError(details=msg) def validate_ipsec_site_connection(self, context, ipsec_site_conn): """Called upon create/update of a connection""" self._validate_dpd(ipsec_site_conn) self._validate_psk(ipsec_site_conn) ike_policy_id = ipsec_site_conn.get('ikepolicy_id') if ike_policy_id: ikepolicy = self.vpn_plugin.get_ikepolicy(context, ike_policy_id) self.validate_ike_policy(context, ikepolicy) ipsec_policy_id = ipsec_site_conn.get('ipsecpolicy_id') if ipsec_policy_id: ipsecpolicy = self.vpn_plugin.get_ipsecpolicy(context, ipsec_policy_id) self.validate_ipsec_policy(context, ipsecpolicy) if ipsec_site_conn.get('vpnservice_id'): self._check_advertisment_overlap(context, ipsec_site_conn) self._check_unique_addresses(context, ipsec_site_conn) self._check_policy_rules_overlap(context, ipsec_site_conn) #TODO(asarfaty): IPv6 is not yet supported. add validation def _get_service_local_address(self, context, vpnservice_id): """The local address of the service is assigned upon creation From the attached external network pool """ vpnservice = self.vpn_plugin._get_vpnservice(context, vpnservice_id) return vpnservice['external_v4_ip'] def _validate_t0_ha_mode(self, tier0_uuid): pass def _validate_router(self, context, router_id): # Verify that the router gw network is connected to an active-standby # Tier0 router router_db = self._core_plugin._get_router(context, router_id) tier0_uuid = self._core_plugin._get_tier0_uuid_by_router(context, router_db) self._validate_t0_ha_mode(tier0_uuid) def _support_endpoint_groups(self): """Can be implemented by each plugin""" return False def validate_vpnservice(self, context, vpnservice): """Called upon create/update of a service""" # Call general validations super(IPsecCommonValidator, self).validate_vpnservice( context, vpnservice) # Call specific NSX validations self._validate_router(context, vpnservice['router_id']) if not self._support_endpoint_groups() and not vpnservice['subnet_id']: # we currently do not support multiple subnets so a subnet must # be defined msg = _("Subnet must be defined in a service") raise nsx_exc.NsxVpnValidationError(details=msg) #TODO(asarfaty): IPv6 is not yet supported. add validation def validate_ipsec_policy(self, context, ipsec_policy): # Call general validations super(IPsecCommonValidator, self).validate_ipsec_policy( context, ipsec_policy) # Call specific NSX validations self._validate_policy_lifetime(ipsec_policy, "IPSec") self._validate_policy_auth_algorithm(ipsec_policy, "IPSec") self._validate_policy_encryption_algorithm(ipsec_policy, "IPSec") self._validate_policy_pfs(ipsec_policy, "IPSec") # Ensure IPSec policy encap mode is tunnel mode = ipsec_policy.get('encapsulation_mode') if mode and mode not in ipsec_utils.ENCAPSULATION_MODE_MAP.keys(): msg = _("Unsupported encapsulation mode: %s. Only 'tunnel' mode " "is supported.") % mode raise nsx_exc.NsxVpnValidationError(details=msg) # Ensure IPSec policy transform protocol is esp prot = ipsec_policy.get('transform_protocol') if prot and prot not in ipsec_utils.TRANSFORM_PROTOCOL_MAP.keys(): msg = _("Unsupported transform protocol: %s. Only 'esp' protocol " "is supported.") % prot raise nsx_exc.NsxVpnValidationError(details=msg) def validate_ike_policy(self, context, ike_policy): # Call general validations super(IPsecCommonValidator, self).validate_ike_policy( context, ike_policy) # Call specific NSX validations self._validate_policy_lifetime(ike_policy, "IKE") self._validate_policy_auth_algorithm(ike_policy, "IKE") self._validate_policy_encryption_algorithm(ike_policy, "IKE") self._validate_policy_pfs(ike_policy, "IKE") # 'aggressive' phase1-negotiation-mode is not supported if ike_policy.get('phase1-negotiation-mode', 'main') != 'main': msg = _("Unsupported phase1-negotiation-mode: %s! Only 'main' is " "supported.") % ike_policy['phase1-negotiation-mode'] raise nsx_exc.NsxVpnValidationError(details=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsx_plugin.py0000644000175000017500000000511000000000000025240 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_vpnaas.services.vpn import plugin LOG = logging.getLogger(__name__) class NsxVPNPlugin(plugin.VPNDriverPlugin): """NSX plugin for VPNaaS. This plugin overrides get connection/s calls to issue a status update before them, and make sure the connections status is up to date """ def _update_nsx_connection_status(self, context, ipsec_site_conn_id): driver = self.drivers[self.default_provider] if hasattr(driver, 'get_ipsec_site_connection_status'): status = driver.get_ipsec_site_connection_status( context, ipsec_site_conn_id) if status: self._update_connection_status(context, ipsec_site_conn_id, status, False) def update_all_connection_status(self, context): connections = super(NsxVPNPlugin, self).get_ipsec_site_connections( context) if not connections: return # TODO(asarfaty): This will not scale well. Should use a bulk action # instead for the NSX api for connection in connections: self._update_nsx_connection_status(context, connection['id']) def get_ipsec_site_connection(self, context, ipsec_site_conn_id, fields=None): # update connection status if not fields or 'status' in fields: self._update_nsx_connection_status(context, ipsec_site_conn_id) # call super return super(NsxVPNPlugin, self).get_ipsec_site_connection( context, ipsec_site_conn_id, fields=fields) def get_ipsec_site_connections(self, context, filters=None, fields=None): # update connection status if not fields or 'status' in fields: self.update_all_connection_status(context) # call super return super(NsxVPNPlugin, self).get_ipsec_site_connections( context, filters=filters, fields=fields) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsx_tvd/0000755000175000017500000000000000000000000024170 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsx_tvd/__init__.py0000644000175000017500000000000000000000000026267 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsx_tvd/ipsec_driver.py0000644000175000017500000001177600000000000027234 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lib.plugins import directory from neutron_vpnaas.services.vpn import service_drivers from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.services.vpnaas.nsx_tvd import ipsec_validator from vmware_nsx.services.vpnaas.nsxv import ipsec_driver as v_driver from vmware_nsx.services.vpnaas.nsxv3 import ipsec_driver as t_driver LOG = logging.getLogger(__name__) IPSEC = 'ipsec' class NSXIPsecVpnDriver(service_drivers.VpnDriver): """Wrapper driver to select the relevant driver for each VPNaaS request""" def __init__(self, service_plugin): self.vpn_plugin = service_plugin self._core_plugin = directory.get_plugin() validator = ipsec_validator.IPsecValidator(service_plugin) super(NSXIPsecVpnDriver, self).__init__(service_plugin, validator) # supported drivers: self.drivers = {} try: self.drivers[projectpluginmap.NsxPlugins.NSX_T] = ( t_driver.NSXv3IPsecVpnDriver(service_plugin)) except Exception as e: LOG.error("NSXIPsecVpnDriver failed to initialize the NSX-T " "driver: %s", e) self.drivers[projectpluginmap.NsxPlugins.NSX_T] = None try: self.drivers[projectpluginmap.NsxPlugins.NSX_V] = ( v_driver.NSXvIPsecVpnDriver(service_plugin)) except Exception as e: LOG.error("NSXIPsecVpnDriver failed to initialize the NSX-V " "driver: %s", e) self.drivers[projectpluginmap.NsxPlugins.NSX_V] = None @property def service_type(self): return IPSEC def _get_driver_for_project(self, project): plugin_type = tvd_utils.get_tvd_plugin_type_for_project(project) if not self.drivers.get(plugin_type): msg = (_("Project %(project)s with plugin %(plugin)s has no " "support for VPNaaS"), {'project': project, 'plugin': plugin_type}) raise nsx_exc.NsxPluginException(err_msg=msg) return self.drivers[plugin_type] def create_ipsec_site_connection(self, context, ipsec_site_conn): d = self._get_driver_for_project(ipsec_site_conn['tenant_id']) return d.create_ipsec_site_connection(context, ipsec_site_conn) def delete_ipsec_site_connection(self, context, ipsec_site_conn): d = self._get_driver_for_project(ipsec_site_conn['tenant_id']) return d.delete_ipsec_site_connection(context, ipsec_site_conn) def update_ipsec_site_connection(self, context, old_ipsec_conn, ipsec_site_conn): d = self._get_driver_for_project(old_ipsec_conn['tenant_id']) return d.update_ipsec_site_connection(context, old_ipsec_conn, ipsec_site_conn) def create_vpnservice(self, context, vpnservice): d = self._get_driver_for_project(vpnservice['tenant_id']) return d.create_vpnservice(context, vpnservice) def update_vpnservice(self, context, old_vpnservice, vpnservice): pass def delete_vpnservice(self, context, vpnservice): pass def _generate_ipsecvpn_firewall_rules(self, plugin_type, context, **kargs): d = self.drivers.get(plugin_type) if d: return d._generate_ipsecvpn_firewall_rules( plugin_type, context, **kargs) return [] def get_ipsec_site_connection_status(self, context, ipsec_site_conn_id): # Currently only NSX-T supports it. In the future we will need to # decide on the driver by the tenant driver = self.drivers.get(projectpluginmap.NsxPlugins.NSX_T) if driver and hasattr(driver, 'get_ipsec_site_connection_status'): return driver.get_ipsec_site_connection_status( context, ipsec_site_conn_id) def validate_router_gw_info(self, context, router_id, gw_info): # Currently only NSX-T supports it. In the future we will need to # decide on the driver by the tenant driver = self.drivers.get(projectpluginmap.NsxPlugins.NSX_T) if driver and hasattr(driver, 'validate_router_gw_info'): return driver.validate_router_gw_info( context, router_id, gw_info) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsx_tvd/ipsec_validator.py0000644000175000017500000001132600000000000027715 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_vpnaas.db.vpn import vpn_validator from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.services.vpnaas.nsxv import ipsec_validator as v_validator from vmware_nsx.services.vpnaas.nsxv3 import ipsec_validator as t_validator LOG = logging.getLogger(__name__) class IPsecValidator(vpn_validator.VpnReferenceValidator): """Wrapper validator for selecting the V/T validator to use""" def __init__(self, service_plugin): super(IPsecValidator, self).__init__() self.vpn_plugin = service_plugin # supported validatorss: self.validators = {} try: self.validators[projectpluginmap.NsxPlugins.NSX_T] = ( t_validator.IPsecV3Validator(service_plugin)) except Exception as e: LOG.error("IPsecValidator failed to initialize the NSX-T " "validator: %s", e) self.validators[projectpluginmap.NsxPlugins.NSX_T] = None try: self.validators[projectpluginmap.NsxPlugins.NSX_V] = ( v_validator.IPsecValidator(service_plugin)) except Exception as e: LOG.error("IPsecValidator failed to initialize the NSX-V " "validator: %s", e) self.validators[projectpluginmap.NsxPlugins.NSX_V] = None def _get_validator_for_project(self, project): plugin_type = tvd_utils.get_tvd_plugin_type_for_project(project) if not self.validators.get(plugin_type): msg = (_("Project %(project)s with plugin %(plugin)s has no " "support for VPNaaS"), {'project': project, 'plugin': plugin_type}) raise nsx_exc.NsxPluginException(err_msg=msg) return self.validators[plugin_type] def validate_ipsec_site_connection(self, context, ipsec_site_conn): if not ipsec_site_conn.get('tenant_id'): # nothing we can do here. return v = self._get_validator_for_project(ipsec_site_conn['tenant_id']) # first make sure the plugin is the same as the one of the vpnservice srv_id = ipsec_site_conn.get('vpnservice_id') srv = self.vpn_plugin._get_vpnservice(context, srv_id) srv_validator = self._get_validator_for_project(srv['tenant_id']) if v != srv_validator: err_msg = _('VPN service should belong to the same plugin ' 'as the connection') raise nsx_exc.NsxVpnValidationError(details=err_msg) return v.validate_ipsec_site_connection(context, ipsec_site_conn) def validate_vpnservice(self, context, vpnservice): if not vpnservice.get('tenant_id'): # This will happen during update. # nothing significant like router or subnet can be changes # so we can skip it. return v = self._get_validator_for_project(vpnservice['tenant_id']) # first make sure the router&subnet plugin matches the vpnservice router_id = vpnservice['router_id'] p = self.core_plugin._get_plugin_from_router_id(context, router_id) if self.validators.get(p.plugin_type()) != v: err_msg = _('Router & subnet should belong to the same plugin ' 'as the VPN service') raise nsx_exc.NsxVpnValidationError(details=err_msg) return v.validate_vpnservice(context, vpnservice) def validate_ipsec_policy(self, context, ipsec_policy): if not ipsec_policy.get('tenant_id'): # nothing we can do here return v = self._get_validator_for_project(ipsec_policy['tenant_id']) return v.validate_ipsec_policy(context, ipsec_policy) def validate_ike_policy(self, context, ike_policy): if not ike_policy.get('tenant_id'): # nothing we can do here return v = self._get_validator_for_project(ike_policy['tenant_id']) return v.validate_ike_policy(context, ike_policy) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsx_tvd/plugin.py0000644000175000017500000000217000000000000026040 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsx.plugins.nsx import utils as tvd_utils from vmware_nsx.services.vpnaas import nsx_plugin @tvd_utils.filter_plugins class VPNPlugin(nsx_plugin.NsxVPNPlugin): """NSX-TV plugin for VPNaaS. This plugin adds separation between T/V instances """ methods_to_separate = ['get_ipsec_site_connections', 'get_ikepolicies', 'get_ipsecpolicies', 'get_vpnservices', 'get_endpoint_groups'] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxp/0000755000175000017500000000000000000000000023473 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxp/__init__.py0000644000175000017500000000000000000000000025572 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxp/ipsec_driver.py0000644000175000017500000010167400000000000026534 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron_lib import constants from neutron_lib import context as n_context from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.services.vpnaas.common_v3 import ipsec_driver as common_driver from vmware_nsx.services.vpnaas.common_v3 import ipsec_utils from vmware_nsx.services.vpnaas.nsxp import ipsec_validator from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as consts from vmware_nsxlib.v3.policy import constants as policy_constants LOG = logging.getLogger(__name__) IPSEC = 'ipsec' class NSXpIPsecVpnDriver(common_driver.NSXcommonIPsecVpnDriver): def __init__(self, service_plugin): validator = ipsec_validator.IPsecNsxPValidator(service_plugin) super(NSXpIPsecVpnDriver, self).__init__(service_plugin, validator) self._nsxpolicy = self._core_plugin.nsxpolicy self._nsx_vpn = self._nsxpolicy.ipsec_vpn def _get_service_local_cidr_group(self, context, vpnservice, cidrs): """Create/Override the group for the local cidrs of a vpnservice used for the edge firewall rules allowing the vpn traffic. Return the group id, which is the same as the service id. """ group_id = vpnservice['id'] expr = self._nsxpolicy.group.build_ip_address_expression(cidrs) tags = self._nsxpolicy.build_v3_tags_payload( vpnservice, resource_type='os-vpn-service-id', project_name=context.tenant_name) self._nsxpolicy.group.create_or_overwrite_with_conditions( "Local group for VPN service %s" % vpnservice['id'], policy_constants.DEFAULT_DOMAIN, group_id=group_id, conditions=[expr], tags=tags) return group_id def _delete_service_local_cidr_group(self, vpnservice): try: self._nsxpolicy.group.delete( policy_constants.DEFAULT_DOMAIN, group_id=vpnservice['id']) except nsx_lib_exc.ResourceNotFound: # If there is no FWaaS on the router it may not have been created LOG.debug("Cannot delete local CIDR group for vpnservice %s as " "it was not found", vpnservice['id']) def _get_connection_local_cidr_group_name(self, connection): return 'local_%s' % connection['id'] def _get_connection_local_cidr_group(self, context, connection, cidrs): """Create/Override the group for the local cidrs of a connection used for the edge firewall rules allowing the vpn traffic. Return the group id, which is the same as the connection id. """ group_id = self._get_connection_local_cidr_group_name(connection) expr = self._nsxpolicy.group.build_ip_address_expression(cidrs) tags = self._nsxpolicy.build_v3_tags_payload( connection, resource_type='os-vpn-connection-id', project_name=context.tenant_name) self._nsxpolicy.group.create_or_overwrite_with_conditions( "Local group for VPN connection %s" % connection['id'], policy_constants.DEFAULT_DOMAIN, group_id=group_id, conditions=[expr], tags=tags) return group_id def _delete_connection_local_cidr_group(self, connection): try: group_id = self._get_connection_local_cidr_group_name(connection) self._nsxpolicy.group.delete( policy_constants.DEFAULT_DOMAIN, group_id=group_id) except nsx_lib_exc.ResourceNotFound: # If there is no FWaaS on the router it may not have been created LOG.debug("Cannot delete local CIDR group for connection %s as " "it was not found", connection['id']) def _get_connection_peer_cidr_group_name(self, connection): return 'peer_%s' % connection['id'] def _get_peer_cidr_group(self, context, conn): """Create/Override the group for the peer cidrs of a connection used for the edge firewall rules allowing the vpn traffic. Return the group id, which is the same as the connection id. """ group_ips = self.validator._get_peer_cidrs(context, conn) group_id = self._get_connection_peer_cidr_group_name(conn) expr = self._nsxpolicy.group.build_ip_address_expression(group_ips) tags = self._nsxpolicy.build_v3_tags_payload( conn, resource_type='os-vpn-connection-id', project_name=context.tenant_name) self._nsxpolicy.group.create_or_overwrite_with_conditions( "Peer group for VPN connection %s" % conn['id'], policy_constants.DEFAULT_DOMAIN, group_id=group_id, conditions=[expr], tags=tags) return group_id def _delete_peer_cidr_group(self, conn): try: group_id = self._get_connection_peer_cidr_group_name(conn) self._nsxpolicy.group.delete( policy_constants.DEFAULT_DOMAIN, group_id=group_id) except nsx_lib_exc.ResourceNotFound: # If there is no FWaaS on the router it may not have been created LOG.debug("Cannot delete peer CIDR group for connection %s as " "it was not found", conn['id']) def _generate_ipsecvpn_firewall_rules(self, plugin_type, context, router_id=None): """Return the firewall rules needed to allow vpn traffic""" fw_rules = [] # get all the active services of this router filters = {'router_id': [router_id], 'status': [constants.ACTIVE]} services = self.vpn_plugin.get_vpnservices( context.elevated(), filters=filters) if not services: return fw_rules for srv in services: subnet_id = None if srv['subnet_id']: subnet_id = srv['subnet_id'] subnet = self.l3_plugin.get_subnet( context.elevated(), subnet_id) local_cidrs = [subnet['cidr']] local_group = self._get_service_local_cidr_group( context, srv, local_cidrs) # get all the non-errored connections of this service filters = {'vpnservice_id': [srv['id']], 'status': [constants.ACTIVE, constants.DOWN]} connections = self.vpn_plugin.get_ipsec_site_connections( context.elevated(), filters=filters) for conn in connections: if not subnet_id: # Get local endpoint from group local_cidrs = self.validator._get_local_cidrs( context.elevated(), conn) local_group = self._get_connection_local_cidr_group( context, conn, local_cidrs) peer_group = self._get_peer_cidr_group( context.elevated(), conn) fw_rules.append(self._nsxpolicy.gateway_policy.build_entry( 'VPN connection ' + conn['id'], policy_constants.DEFAULT_DOMAIN, router_id, action=consts.FW_ACTION_ALLOW, dest_groups=[peer_group], source_groups=[local_group], scope=[self._nsxpolicy.tier1.get_path(router_id)], direction=consts.IN_OUT)) return fw_rules def _update_firewall_rules(self, context, vpnservice, conn, delete=False): LOG.debug("Updating vpn firewall rules for router %s", vpnservice['router_id']) self._core_plugin.update_router_firewall( context, vpnservice['router_id']) # if it is during delete - try to delete the group of this connection if delete: self._delete_peer_cidr_group(conn) self._delete_connection_local_cidr_group(conn) def update_router_advertisement(self, context, router_id): """Advertise the local subnets of all the services on the router""" # Do nothing in case of a router with no GW or no-snat router # (as it is already advertised) rtr = self.l3_plugin.get_router(context, router_id) if (not rtr.get('external_gateway_info') or not rtr['external_gateway_info'].get('enable_snat', True)): return LOG.debug("Updating router advertisement rules for router %s", router_id) rules = [] # get all the active services of this router filters = {'router_id': [router_id], 'status': [constants.ACTIVE]} services = self.vpn_plugin.get_vpnservices( context.elevated(), filters=filters) rule_name_pref = 'VPN advertisement service' has_connections = False for srv in services: # use only services with non-errored connections filters = {'vpnservice_id': [srv['id']], 'status': [constants.ACTIVE, constants.DOWN]} connections = self.vpn_plugin.get_ipsec_site_connections( context.elevated(), filters=filters) if not connections: continue has_connections = True if srv['subnet_id']: subnet = self.l3_plugin.get_subnet( context.elevated(), srv['subnet_id']) local_cidrs = [subnet['cidr']] else: # get all connections local endpoints cidrs local_cidrs = [] for conn in connections: local_cidrs.extend( self.validator._get_local_cidrs( context.elevated(), conn)) rules.append(self._nsxpolicy.tier1.build_advertisement_rule( "%s %s" % (rule_name_pref, srv['id']), policy_constants.ADV_RULE_PERMIT, policy_constants.ADV_RULE_OPERATOR_GE, [policy_constants.ADV_RULE_TIER1_IPSEC_LOCAL_ENDPOINT], local_cidrs)) self._nsxpolicy.tier1.update_advertisement_rules( router_id, rules, name_prefix=rule_name_pref) # Also update the ipsec endpoints advertisement self._nsxpolicy.tier1.update_route_advertisement( router_id, ipsec_endpoints=has_connections) def _nsx_tags(self, context, object): return self._nsxpolicy.build_v3_tags_payload( object, resource_type='os-vpn-connection-id', project_name=context.tenant_name) def _create_ike_profile(self, context, connection): """Create an ike profile for a connection Creating/overwriting IKE profile based on the openstack ike policy upon connection creation. There is no driver callback for profiles creation so it has to be done on connection creation. """ ike_policy_id = connection['ikepolicy_id'] ikepolicy = self.vpn_plugin.get_ikepolicy(context, ike_policy_id) tags = self._nsxpolicy.build_v3_tags_payload( ikepolicy, resource_type='os-vpn-ikepol-id', project_name=context.tenant_name) try: profile_id = self._nsx_vpn.ike_profile.create_or_overwrite( ikepolicy['name'] or ikepolicy['id'], profile_id=ikepolicy['id'], description=ikepolicy['description'], encryption_algorithms=[ipsec_utils.ENCRYPTION_ALGORITHM_MAP[ ikepolicy['encryption_algorithm']]], digest_algorithms=[ipsec_utils.AUTH_ALGORITHM_MAP_P[ ikepolicy['auth_algorithm']]], ike_version=ipsec_utils.IKE_VERSION_MAP[ ikepolicy['ike_version']], dh_groups=[ipsec_utils.PFS_MAP_P[ikepolicy['pfs']]], sa_life_time=ikepolicy['lifetime']['value'], tags=tags) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create an ike profile: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return profile_id def _delete_ike_profile(self, ikeprofile_id): try: self._nsx_vpn.ike_profile.delete(ikeprofile_id) except nsx_lib_exc.ResourceInUse: # Still in use by another connection LOG.info("IKE profile %s cannot be deleted yet, because " "another connection still uses it", ikeprofile_id) def _create_ipsec_profile(self, context, connection): """Create a tunnel profile for a connection Creating/overwriting tunnel profile based on the openstack ipsec policy upon connection creation. There is no driver callback for profiles creation so it has to be done on connection creation. """ ipsec_policy_id = connection['ipsecpolicy_id'] ipsecpolicy = self.vpn_plugin.get_ipsecpolicy( context, ipsec_policy_id) tags = self._nsxpolicy.build_v3_tags_payload( ipsecpolicy, resource_type='os-vpn-ipsecpol-id', project_name=context.tenant_name) try: profile_id = self._nsx_vpn.tunnel_profile.create_or_overwrite( ipsecpolicy['name'] or ipsecpolicy['id'], profile_id=ipsecpolicy['id'], description=ipsecpolicy['description'], encryption_algorithms=[ipsec_utils.ENCRYPTION_ALGORITHM_MAP[ ipsecpolicy['encryption_algorithm']]], digest_algorithms=[ipsec_utils.AUTH_ALGORITHM_MAP_P[ ipsecpolicy['auth_algorithm']]], dh_groups=[ipsec_utils.PFS_MAP_P[ipsecpolicy['pfs']]], sa_life_time=ipsecpolicy['lifetime']['value'], tags=tags) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a tunnel profile: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return profile_id def _delete_ipsec_profile(self, ipsecprofile_id): try: self._nsx_vpn.tunnel_profile.delete(ipsecprofile_id) except nsx_lib_exc.ResourceInUse: # Still in use by another connection LOG.info("Tunnel profile %s cannot be deleted yet, because " "another connection still uses it", ipsecprofile_id) def _create_dpd_profile(self, context, connection): """Create a DPD profile for a connection Creating/overwriting DPD profile based on the openstack ipsec connection configuration upon connection creation. There is no driver callback for profiles creation so it has to be done on connection creation. """ # TODO(asarfaty) consider reusing profiles based on values dpd_info = connection['dpd'] try: profile_id = self._nsx_vpn.dpd_profile.create_or_overwrite( self._get_dpd_profile_name(connection), profile_id=connection['id'], description='neutron dpd profile %s' % connection['id'], dpd_probe_interval=dpd_info.get('timeout'), enabled=True if dpd_info.get('action') == 'hold' else False, tags=self._nsx_tags(context, connection)) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a DPD profile: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return profile_id def _delete_dpd_profile(self, dpdprofile_id): self._nsx_vpn.dpd_profile.delete(dpdprofile_id) def _update_dpd_profile(self, connection): dpd_info = connection['dpd'] self._nsx_vpn.dpd_profile.update( connection['id'], name=self._get_dpd_profile_name(connection), dpd_probe_interval=dpd_info.get('timeout'), enabled=True if dpd_info.get('action') == 'hold' else False) def _create_local_endpoint(self, context, connection, vpnservice): """Creating/overwrite an NSX local endpoint for a logical router This endpoint can be reused by other connections, and will be deleted when the router vpn service is deleted. """ # use the router GW as the local ip router_id = vpnservice['router']['id'] local_addr = vpnservice['external_v4_ip'] # Add the neutron router-id to the tags to help search later tags = self._nsxpolicy.build_v3_tags_payload( {'id': router_id, 'project_id': vpnservice['project_id']}, resource_type='os-neutron-router-id', project_name=context.tenant_name) try: ep_client = self._nsx_vpn.local_endpoint local_endpoint_id = ep_client.create_or_overwrite( 'Local endpoint for OS VPNaaS on router %s' % router_id, router_id, router_id, endpoint_id=router_id, local_address=local_addr, tags=tags) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a local endpoint: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return local_endpoint_id def _delete_local_endpoint(self, vpnservice): router_id = vpnservice['router']['id'] ctx = n_context.get_admin_context() port = self._find_vpn_service_port(ctx, router_id) if port: self._nsx_vpn.local_endpoint.delete( router_id, router_id, router_id) self.l3_plugin.delete_port(ctx, port['id'], force_delete_vpn=True) def _get_session_rules(self, context, connection): peer_cidrs = self.validator._get_peer_cidrs(context, connection) local_cidrs = self.validator._get_local_cidrs(context, connection) rule = self._nsx_vpn.session.build_rule( connection['name'] or connection['id'], connection['id'], source_cidrs=local_cidrs, destination_cidrs=peer_cidrs) return [rule] def _create_session(self, context, connection, vpnservice, local_ep_id, ikeprofile_id, ipsecprofile_id, dpdprofile_id, rules, enabled=True): try: router_id = vpnservice['router_id'] session_id = self._nsx_vpn.session.create_or_overwrite( connection['name'] or connection['id'], tier1_id=router_id, vpn_service_id=router_id, session_id=connection['id'], description=connection['description'], peer_address=connection['peer_address'], peer_id=connection['peer_id'], psk=connection['psk'], rules=rules, dpd_profile_id=dpdprofile_id, ike_profile_id=ikeprofile_id, tunnel_profile_id=ipsecprofile_id, local_endpoint_id=local_ep_id, enabled=enabled, tags=self._nsx_tags(context, connection)) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a session: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return session_id def _update_session(self, connection, vpnservice, rules=None, enabled=True): router_id = vpnservice['router_id'] args = {'enabled': enabled} if rules is not None: args['rules'] = rules self._nsx_vpn.session.update( router_id, router_id, connection['id'], name=connection['name'] or connection['id'], description=connection['description'], peer_address=connection['peer_address'], peer_id=connection['peer_id'], psk=connection['psk'], **args) def get_ipsec_site_connection_status(self, context, ipsec_site_conn_id): # find out the router-id of this connection conn = self.vpn_plugin._get_ipsec_site_connection( context, ipsec_site_conn_id) vpnservice_id = conn.vpnservice_id vpnservice = self.service_plugin._get_vpnservice( context, vpnservice_id) router_id = vpnservice['router_id'] # Get the NSX detailed status try: status_result = self._nsx_vpn.session.get_status( router_id, router_id, ipsec_site_conn_id) if status_result and 'results' in status_result: status = status_result['results'][0].get('runtime_status', '') # NSX statuses are UP, DOWN, DEGRADE # VPNaaS connection status should be ACTIVE or DOWN if status == 'UP': return 'ACTIVE' elif status == 'DOWN' or status == 'DEGRADED': return 'DOWN' except nsx_lib_exc.ResourceNotFound: LOG.debug("Status for VPN session %s was not found", ipsec_site_conn_id) def _delete_session(self, vpnservice, session_id): router_id = vpnservice['router_id'] self._nsx_vpn.session.delete(router_id, router_id, session_id) def create_ipsec_site_connection(self, context, ipsec_site_conn): LOG.debug('Creating ipsec site connection %(conn_info)s.', {"conn_info": ipsec_site_conn}) # Note(asarfaty) the plugin already calls the validator # which also validated the policies and service ikeprofile_id = None ipsecprofile_id = None dpdprofile_id = None session_id = None vpnservice_id = ipsec_site_conn['vpnservice_id'] vpnservice = self.service_plugin._get_vpnservice( context, vpnservice_id) ipsec_id = ipsec_site_conn["id"] try: # create the ike profile ikeprofile_id = self._create_ike_profile( context, ipsec_site_conn) LOG.debug("Created NSX ike profile %s", ikeprofile_id) # create the ipsec profile ipsecprofile_id = self._create_ipsec_profile( context, ipsec_site_conn) LOG.debug("Created NSX ipsec profile %s", ipsecprofile_id) # create the dpd profile dpdprofile_id = self._create_dpd_profile( context, ipsec_site_conn) LOG.debug("Created NSX dpd profile %s", dpdprofile_id) # create or reuse a local endpoint using the vpn service local_ep_id = self._create_local_endpoint( context, ipsec_site_conn, vpnservice) # Finally: create the session with policy rules rules = self._get_session_rules(context, ipsec_site_conn) connection_enabled = (vpnservice['admin_state_up'] and ipsec_site_conn['admin_state_up']) self._create_session( context, ipsec_site_conn, vpnservice, local_ep_id, ikeprofile_id, ipsecprofile_id, dpdprofile_id, rules, enabled=connection_enabled) self._update_status(context, vpnservice_id, ipsec_id, constants.ACTIVE) except nsx_exc.NsxPluginException: with excutils.save_and_reraise_exception(): self._update_status(context, vpnservice_id, ipsec_id, constants.ERROR) # delete the NSX objects that were already created # Do not delete reused objects: service, local endpoint if session_id: self._delete_session(vpnservice, session_id) if dpdprofile_id: self._delete_dpd_profile(dpdprofile_id) if ipsecprofile_id: self._delete_ipsec_profile(ipsecprofile_id) if ikeprofile_id: self._delete_ike_profile(ikeprofile_id) # update router firewall rules self._update_firewall_rules(context, vpnservice, ipsec_site_conn) # update router advertisement rules self.update_router_advertisement(context, vpnservice['router_id']) def delete_ipsec_site_connection(self, context, ipsec_site_conn): LOG.debug('Deleting ipsec site connection %(site)s.', {"site": ipsec_site_conn}) vpnservice_id = ipsec_site_conn['vpnservice_id'] vpnservice = self.service_plugin._get_vpnservice( context, vpnservice_id) self._delete_session(vpnservice, ipsec_site_conn['id']) self._delete_dpd_profile(ipsec_site_conn['id']) self._delete_ipsec_profile(ipsec_site_conn['ipsecpolicy_id']) self._delete_ike_profile(ipsec_site_conn['ikepolicy_id']) # update router firewall rules self._update_firewall_rules(context, vpnservice, ipsec_site_conn, delete=True) # update router advertisement rules self.update_router_advertisement(context, vpnservice['router_id']) def update_ipsec_site_connection(self, context, old_ipsec_conn, ipsec_site_conn): LOG.debug('Updating ipsec site connection new %(site)s.', {"site": ipsec_site_conn}) LOG.debug('Updating ipsec site connection old %(site)s.', {"site": old_ipsec_conn}) # Note(asarfaty) the plugin already calls the validator # which also validated the policies and service # Note(asarfaty): the VPN plugin does not allow changing ike/tunnel # policy or the service of a connection during update. vpnservice_id = old_ipsec_conn['vpnservice_id'] vpnservice = self.service_plugin._get_vpnservice( context, vpnservice_id) # check if the dpd configuration changed old_dpd = old_ipsec_conn['dpd'] new_dpd = ipsec_site_conn['dpd'] if (old_dpd['action'] != new_dpd['action'] or old_dpd['timeout'] != new_dpd['timeout'] or old_ipsec_conn['name'] != ipsec_site_conn['name']): self._update_dpd_profile(ipsec_site_conn) rules = self._get_session_rules(context, ipsec_site_conn) connection_enabled = (vpnservice['admin_state_up'] and ipsec_site_conn['admin_state_up']) try: self._update_session(ipsec_site_conn, vpnservice, rules, enabled=connection_enabled) except nsx_lib_exc.ManagerError as e: self._update_status(context, vpnservice_id, ipsec_site_conn['id'], constants.ERROR) msg = _("Failed to update VPN session %(id)s: %(error)s") % { "id": ipsec_site_conn['id'], "error": e} raise nsx_exc.NsxPluginException(err_msg=msg) if (ipsec_site_conn['peer_cidrs'] != old_ipsec_conn['peer_cidrs'] or ipsec_site_conn['peer_ep_group_id'] != old_ipsec_conn['peer_ep_group_id']): # Update firewall self._update_firewall_rules(context, vpnservice, ipsec_site_conn) # No service updates. No need to update router advertisement rules def _create_vpn_service(self, context, vpnservice): """Create or overwrite tier1 vpn service The service is created on the TIER1 router attached to the service The NSX can keep only one service per tier1 router so we reuse it """ router_id = vpnservice['router_id'] tags = self._nsxpolicy.build_v3_tags_payload( {'id': router_id, 'project_id': vpnservice['project_id']}, resource_type='os-neutron-router-id', project_name=context.tenant_name) self._nsx_vpn.service.create_or_overwrite( 'Neutron VPN service for T1 router ' + router_id, router_id, vpn_service_id=router_id, enabled=True, ike_log_level=ipsec_utils.DEFAULT_LOG_LEVEL, tags=tags) def _should_delete_nsx_service(self, context, vpnservice): # Check that no neutron vpn-service is configured for the same router router_id = vpnservice['router_id'] filters = {'router_id': [router_id]} services = self.vpn_plugin.get_vpnservices( context.elevated(), filters=filters) if not services: return True def _delete_vpn_service(self, context, vpnservice): router_id = vpnservice['router_id'] try: self._nsx_vpn.service.delete(router_id, router_id) except Exception as e: LOG.error("Failed to delete VPN service %s: %s", router_id, e) # check if service router should be deleted if not self._core_plugin.service_router_has_services( context.elevated(), router_id): self._core_plugin.delete_service_router(router_id) def create_vpnservice(self, context, new_vpnservice): LOG.info('Creating VPN service %(vpn)s', {'vpn': new_vpnservice}) vpnservice_id = new_vpnservice['id'] vpnservice = self.service_plugin._get_vpnservice(context, vpnservice_id) try: self.validator.validate_vpnservice(context, vpnservice) local_address = self._get_service_local_address( context.elevated(), vpnservice) except Exception: with excutils.save_and_reraise_exception(): # Rolling back change on the neutron self.service_plugin.delete_vpnservice(context, vpnservice_id) vpnservice['external_v4_ip'] = local_address self.service_plugin.set_external_tunnel_ips(context, vpnservice_id, v4_ip=local_address) # Make sure this tier1 has service router router_id = vpnservice['router_id'] if not self._core_plugin.verify_sr_at_backend(router_id): self._core_plugin.create_service_router(context, router_id) # create the NSX vpn service try: self._create_vpn_service(context, vpnservice) except nsx_lib_exc.ManagerError as e: self._update_status(context, vpnservice_id, None, constants.ERROR) msg = _("Failed to create vpn service: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) # update neutron vpnservice status to active self._update_status(context, vpnservice_id, None, constants.ACTIVE) def update_vpnservice(self, context, old_vpnservice, vpnservice): # Only handle the case of admin-state-up changes if old_vpnservice['admin_state_up'] != vpnservice['admin_state_up']: # update all relevant connections filters = {'vpnservice_id': [vpnservice['id']]} connections = self.vpn_plugin.get_ipsec_site_connections( context, filters=filters) for conn in connections: connection_enabled = (vpnservice['admin_state_up'] and conn['admin_state_up']) self._update_session(conn, vpnservice, enabled=connection_enabled) def delete_vpnservice(self, context, vpnservice): if self._should_delete_nsx_service(context, vpnservice): self._delete_local_endpoint(vpnservice) self._delete_vpn_service(context, vpnservice) self._delete_service_local_cidr_group(vpnservice) def validate_router_gw_info(self, context, router_id, gw_info): """Upon router gw update verify no overlapping subnets to advertise""" # check if this router has a vpn service admin_con = context.elevated() # get all relevant services, except those waiting to be deleted or in # ERROR state filters = {'router_id': [router_id], 'status': [constants.ACTIVE, constants.PENDING_CREATE, constants.INACTIVE, constants.PENDING_UPDATE]} services = self.vpn_plugin.get_vpnservices(admin_con, filters=filters) if not services: # This is a non-vpn router. if snat was disabled, should check # there is no overlapping with vpn connections advertised if (gw_info and not gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default)): # get router subnets subnets = self._core_plugin._find_router_subnets_cidrs( context, router_id) # find all vpn services with connections if not self._check_subnets_overlap_with_all_conns( admin_con, subnets): raise common_driver.RouterWithOverlapNoSnat( router_id=router_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxp/ipsec_validator.py0000644000175000017500000000342400000000000027220 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.services.vpnaas.common_v3 import ipsec_utils from vmware_nsx.services.vpnaas.common_v3 import ipsec_validator LOG = logging.getLogger(__name__) class IPsecNsxPValidator(ipsec_validator.IPsecCommonValidator): """Validator methods for Vmware NSX-Policy VPN support""" def __init__(self, service_plugin): super(IPsecNsxPValidator, self).__init__(service_plugin) self.nsxpolicy = self._core_plugin.nsxpolicy @property def auth_algorithm_map(self): return ipsec_utils.AUTH_ALGORITHM_MAP_P @property def pfs_map(self): return ipsec_utils.PFS_MAP_P def _validate_t0_ha_mode(self, tier0_uuid): tier0_router = self.nsxpolicy.tier0.get(tier0_uuid) if (not tier0_router or tier0_router.get('ha_mode') != 'ACTIVE_STANDBY'): msg = _("The router GW should be connected to a TIER-0 router " "with ACTIVE_STANDBY HA mode") raise nsx_exc.NsxVpnValidationError(details=msg) def _support_endpoint_groups(self): return True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxv/0000755000175000017500000000000000000000000023501 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxv/__init__.py0000644000175000017500000000000000000000000025600 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxv/ipsec_driver.py0000644000175000017500000004034200000000000026534 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.plugins import directory from neutron_vpnaas.services.vpn import service_drivers from oslo_log import log as logging from oslo_utils import excutils from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc from vmware_nsx.services.vpnaas.nsxv import ipsec_validator LOG = logging.getLogger(__name__) IPSEC = 'ipsec' class NSXvIPsecVpnDriver(service_drivers.VpnDriver): def __init__(self, service_plugin): self._core_plugin = directory.get_plugin() if self._core_plugin.is_tvd_plugin(): self._core_plugin = self._core_plugin.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) self._vcns = self._core_plugin.nsx_v.vcns validator = ipsec_validator.IPsecValidator(service_plugin) super(NSXvIPsecVpnDriver, self).__init__(service_plugin, validator) @property def l3_plugin(self): return self._core_plugin @property def service_type(self): return IPSEC def _get_router_edge_id(self, context, vpnservice_id): vpnservice = self.service_plugin._get_vpnservice(context, vpnservice_id) router_id = vpnservice['router_id'] edge_binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if not edge_binding: msg = _("Couldn't find edge binding for router %s") % router_id raise nsxv_exc.NsxPluginException(err_msg=msg) if edge_binding['edge_type'] == nsxv_constants.VDR_EDGE: edge_manager = self._core_plugin.edge_manager router_id = edge_manager.get_plr_by_tlr_id(context, router_id) binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) edge_id = binding['edge_id'] else: # Get exclusive edge id edge_id = edge_binding['edge_id'] return router_id, edge_id def _convert_ipsec_conn(self, context, ipsec_site_connection): ipsec_id = ipsec_site_connection['ipsecpolicy_id'] vpnservice_id = ipsec_site_connection['vpnservice_id'] ipsecpolicy = self.service_plugin.get_ipsecpolicy(context, ipsec_id) vpnservice = self.service_plugin._get_vpnservice(context, vpnservice_id) local_cidr = vpnservice['subnet']['cidr'] router_id = vpnservice['router_id'] router = self._core_plugin.get_router(context, router_id) local_addr = (router['external_gateway_info']['external_fixed_ips'] [0]["ip_address"]) encrypt = nsxv_constants.ENCRYPTION_ALGORITHM_MAP.get( ipsecpolicy.get('encryption_algorithm')) site = { 'enabled': True, 'enablePfs': True, 'dhGroup': nsxv_constants.PFS_MAP.get(ipsecpolicy.get('pfs')), 'name': ipsec_site_connection.get('name'), 'description': ipsec_site_connection.get('description'), 'localId': local_addr, 'localIp': local_addr, 'peerId': ipsec_site_connection['peer_id'], 'peerIp': ipsec_site_connection.get('peer_address'), 'localSubnets': { 'subnets': [local_cidr]}, 'peerSubnets': { 'subnets': ipsec_site_connection.get('peer_cidrs')}, 'authenticationMode': ipsec_site_connection.get('auth_mode'), 'psk': ipsec_site_connection.get('psk'), 'encryptionAlgorithm': encrypt } return site def _generate_new_sites(self, edge_id, ipsec_site_conn): # Fetch the previous ipsec vpn configuration ipsecvpn_configs = self._get_ipsec_config(edge_id) vse_sites = [] if ipsecvpn_configs[1]['enabled']: vse_sites = ([site for site in ipsecvpn_configs[1]['sites']['sites']]) vse_sites.append(ipsec_site_conn) return vse_sites def _generate_ipsecvpn_firewall_rules(self, plugin_type, context, edge_id=None): ipsecvpn_configs = self._get_ipsec_config(edge_id) ipsec_vpn_fw_rules = [] if ipsecvpn_configs[1]['enabled']: for site in ipsecvpn_configs[1]['sites']['sites']: peer_subnets = site['peerSubnets']['subnets'] local_subnets = site['localSubnets']['subnets'] ipsec_vpn_fw_rules.append({ 'name': 'VPN ' + site.get('name', 'rule'), 'action': 'allow', 'enabled': True, 'source_ip_address': peer_subnets, 'destination_ip_address': local_subnets}) return ipsec_vpn_fw_rules def _update_firewall_rules(self, context, vpnservice_id): vpnservice = self.service_plugin._get_vpnservice(context, vpnservice_id) router_db = ( self._core_plugin._get_router(context, vpnservice['router_id'])) self._core_plugin._update_subnets_and_dnat_firewall(context, router_db) def _update_status(self, context, vpn_service_id, ipsec_site_conn_id, status, updated_pending_status=True): status_list = [] vpn_status = {} ipsec_site_conn = {} vpn_status['id'] = vpn_service_id vpn_status['updated_pending_status'] = updated_pending_status vpn_status['status'] = status ipsec_site_conn['status'] = status ipsec_site_conn['updated_pending_status'] = updated_pending_status vpn_status['ipsec_site_connections'] = {ipsec_site_conn_id: ipsec_site_conn} status_list.append(vpn_status) self.service_plugin.update_status_by_agent(context, status_list) def create_ipsec_site_connection(self, context, ipsec_site_connection): LOG.debug('Creating ipsec site connection %(conn_info)s.', {"conn_info": ipsec_site_connection}) new_ipsec = self._convert_ipsec_conn(context, ipsec_site_connection) vpnservice_id = ipsec_site_connection['vpnservice_id'] edge_id = self._get_router_edge_id(context, vpnservice_id)[1] with locking.LockManager.get_lock(edge_id): vse_sites = self._generate_new_sites(edge_id, new_ipsec) ipsec_id = ipsec_site_connection["id"] try: LOG.debug('Updating ipsec vpn configuration %(vse_sites)s.', {'vse_sites': vse_sites}) self._update_ipsec_config(edge_id, vse_sites, enabled=True) except vcns_exc.VcnsApiException: self._update_status(context, vpnservice_id, ipsec_id, "ERROR") msg = (_("Failed to create ipsec site connection " "configuration with %(edge_id)s.") % {'edge_id': edge_id}) raise nsxv_exc.NsxPluginException(err_msg=msg) LOG.debug('Updating ipsec vpn firewall') try: self._update_firewall_rules(context, vpnservice_id) except vcns_exc.VcnsApiException: self._update_status(context, vpnservice_id, ipsec_id, "ERROR") msg = (_("Failed to update firewall rule for ipsec vpn " "with %(edge_id)s.") % {'edge_id': edge_id}) raise nsxv_exc.NsxPluginException(err_msg=msg) self._update_status(context, vpnservice_id, ipsec_id, "ACTIVE") def _get_ipsec_config(self, edge_id): return self._vcns.get_ipsec_config(edge_id) def delete_ipsec_site_connection(self, context, ipsec_site_conn): LOG.debug('Deleting ipsec site connection %(site)s.', {"site": ipsec_site_conn}) ipsec_id = ipsec_site_conn['id'] edge_id = self._get_router_edge_id(context, ipsec_site_conn['vpnservice_id'])[1] with locking.LockManager.get_lock(edge_id): del_site, vse_sites = self._find_vse_site(context, edge_id, ipsec_site_conn) if not del_site: LOG.error("Failed to find ipsec_site_connection " "%(ipsec_site_conn)s with %(edge_id)s.", {'ipsec_site_conn': ipsec_site_conn, 'edge_id': edge_id}) raise nsxv_exc.NsxIPsecVpnMappingNotFound(conn=ipsec_id) vse_sites.remove(del_site) enabled = True if vse_sites else False try: self._update_ipsec_config(edge_id, vse_sites, enabled) except vcns_exc.VcnsApiException: msg = (_("Failed to delete ipsec site connection " "configuration with edge_id: %(edge_id)s.") % {'egde_id': edge_id}) raise nsxv_exc.NsxPluginException(err_msg=msg) try: self._update_firewall_rules(context, ipsec_site_conn['vpnservice_id']) except vcns_exc.VcnsApiException: msg = _("Failed to update firewall rule for ipsec vpn with " "%(edge_id)s.") % {'edge_id': edge_id} raise nsxv_exc.NsxPluginException(err_msg=msg) def _find_vse_site(self, context, edge_id, site): # Fetch the previous ipsec vpn configuration ipsecvpn_configs = self._get_ipsec_config(edge_id)[1] vpnservice = self.service_plugin._get_vpnservice(context, site['vpnservice_id']) local_cidr = vpnservice['subnet']['cidr'] old_site = None vse_sites = None if ipsecvpn_configs['enabled']: vse_sites = ipsecvpn_configs['sites'].get('sites') for s in vse_sites: if ((s['peerSubnets'].get('subnets') == site['peer_cidrs']) and (s['localSubnets'].get('subnets')[0] == local_cidr)): old_site = s break return old_site, vse_sites def _update_site_dict(self, context, edge_id, site, ipsec_site_connection): # Fetch the previous ipsec vpn configuration old_site, vse_sites = self._find_vse_site(context, edge_id, site) if old_site: vse_sites.remove(old_site) if 'peer_addresses' in ipsec_site_connection: old_site['peerIp'] = ipsec_site_connection['peer_address'] if 'peer_cidrs' in ipsec_site_connection: old_site['peerSubnets']['subnets'] = (ipsec_site_connection ['peer_cidrs']) vse_sites.append(old_site) return vse_sites def update_ipsec_site_connection(self, context, old_ipsec_conn, ipsec_site_connection): LOG.debug('Updating ipsec site connection %(site)s.', {"site": ipsec_site_connection}) vpnservice_id = old_ipsec_conn['vpnservice_id'] ipsec_id = old_ipsec_conn['id'] edge_id = self._get_router_edge_id(context, vpnservice_id)[1] with locking.LockManager.get_lock(edge_id): vse_sites = self._update_site_dict(context, edge_id, old_ipsec_conn, ipsec_site_connection) if not vse_sites: self._update_status(context, vpnservice_id, ipsec_id, "ERROR") LOG.error("Failed to find ipsec_site_connection " "%(ipsec_site_conn)s with %(edge_id)s.", {'ipsec_site_conn': ipsec_site_connection, 'edge_id': edge_id}) raise nsxv_exc.NsxIPsecVpnMappingNotFound(conn=ipsec_id) try: LOG.debug('Updating ipsec vpn configuration %(vse_sites)s.', {'vse_sites': vse_sites}) self._update_ipsec_config(edge_id, vse_sites) except vcns_exc.VcnsApiException: self._update_status(context, vpnservice_id, ipsec_id, "ERROR") msg = (_("Failed to create ipsec site connection " "configuration with %(edge_id)s.") % {'edge_id': edge_id}) raise nsxv_exc.NsxPluginException(err_msg=msg) if 'peer_cidrs' in ipsec_site_connection: # Update firewall old_ipsec_conn['peer_cidrs'] = ( ipsec_site_connection['peer_cidrs']) try: self._update_firewall_rules(context, vpnservice_id) except vcns_exc.VcnsApiException: self._update_status(context, vpnservice_id, ipsec_id, "ERROR") msg = (_("Failed to update firewall rule for ipsec " "vpn with %(edge_id)s.") % {'edge_id': edge_id}) raise nsxv_exc.NsxPluginException(err_msg=msg) def _get_gateway_ips(self, router): """Obtain the IPv4 and/or IPv6 GW IP for the router. If there are multiples, (arbitrarily) use the first one. """ v4_ip = v6_ip = None for fixed_ip in router.gw_port['fixed_ips']: addr = fixed_ip['ip_address'] vers = netaddr.IPAddress(addr).version if vers == 4: if v4_ip is None: v4_ip = addr elif v6_ip is None: v6_ip = addr return v4_ip, v6_ip def create_vpnservice(self, context, vpnservice): LOG.debug('Creating VPN service %(vpn)s', {'vpn': vpnservice}) vpnservice_id = vpnservice['id'] try: self.validator.validate_vpnservice(context, vpnservice) except Exception: with excutils.save_and_reraise_exception(): # Rolling back change on the neutron self.service_plugin.delete_vpnservice(context, vpnservice_id) vpnservice = self.service_plugin._get_vpnservice(context, vpnservice_id) v4_ip, v6_ip = self._get_gateway_ips(vpnservice.router) if v4_ip: vpnservice['external_v4_ip'] = v4_ip if v6_ip: vpnservice['external_v6_ip'] = v6_ip self.service_plugin.set_external_tunnel_ips(context, vpnservice_id, v4_ip=v4_ip, v6_ip=v6_ip) def update_vpnservice(self, context, old_vpnservice, vpnservice): pass def delete_vpnservice(self, context, vpnservice): pass def _update_ipsec_config(self, edge_id, sites, enabled=True): ipsec_config = {'featureType': "ipsec_4.0", 'enabled': enabled} ipsec_config['sites'] = {'sites': sites} try: self._vcns.update_ipsec_config(edge_id, ipsec_config) except vcns_exc.VcnsApiException: msg = _("Failed to update ipsec vpn configuration with " "edge_id: %s") % edge_id raise nsxv_exc.NsxPluginException(err_msg=msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxv/ipsec_validator.py0000644000175000017500000001304400000000000027225 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib import constants from neutron_vpnaas.db.vpn import vpn_validator from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import nsxv_constants LOG = logging.getLogger(__name__) class IPsecValidator(vpn_validator.VpnReferenceValidator): """Validator methods for Vmware VPN support""" def __init__(self, service_plugin): super(IPsecValidator, self).__init__() self.vpn_plugin = service_plugin def validate_ikepolicy_version(self, policy_info): """NSX Edge provides IKEv1""" version = policy_info.get('ike_version') if version != 'v1': msg = _("Unsupported ike policy %s! only v1 " "is supported right now.") % version raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_ikepolicy_pfs(self, policy_info): # Check whether pfs is allowed. if not nsxv_constants.PFS_MAP.get(policy_info['pfs']): msg = _("Unsupported pfs: %(pfs)s! currently only " "the following pfs are supported on VSE: %s") % { 'pfs': policy_info['pfs'], 'supported': nsxv_constants.PFS_MAP} raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_encryption_algorithm(self, policy_info): encryption = policy_info['encryption_algorithm'] if encryption not in nsxv_constants.ENCRYPTION_ALGORITHM_MAP: msg = _("Unsupported encryption_algorithm: %(algo)s! please " "select one of the following supported algorithms: " "%(supported_algos)s") % { 'algo': encryption, 'supported_algos': nsxv_constants.ENCRYPTION_ALGORITHM_MAP} raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_ipsec_policy(self, context, policy_info): """Ensure IPSec policy encap mode is tunnel for current REST API.""" mode = policy_info['encapsulation_mode'] if mode not in nsxv_constants.ENCAPSULATION_MODE_ALLOWED: msg = _("Unsupported encapsulation mode: %s! currently only" "'tunnel' mode is supported.") % mode raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_policies_matching_algorithms(self, ikepolicy, ipsecpolicy): # In VSE, Phase 1 and Phase 2 share the same encryption_algorithm # and authentication algorithms setting. At present, just record the # discrepancy error in log and take ipsecpolicy to do configuration. keys = ('auth_algorithm', 'encryption_algorithm', 'pfs') for key in keys: if ikepolicy[key] != ipsecpolicy[key]: LOG.warning("IKEPolicy and IPsecPolicy should have consistent " "auth_algorithm, encryption_algorithm and pfs for " "VSE!") break def _is_shared_router(self, router): return router.get('router_type') == constants.SHARED def _validate_router(self, context, router_id): # Only support distributed and exclusive router type router = self.core_plugin.get_router(context, router_id) if self._is_shared_router(router): msg = _("Router type is not supported for VPN service, only " "support distributed and exclusive router") raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_vpnservice(self, context, vpnservice): """Called upon create/update of a service""" # Call general validations super(IPsecValidator, self).validate_vpnservice( context, vpnservice) # Call specific NSX validations self._validate_router(context, vpnservice['router_id']) if not vpnservice['subnet_id']: # we currently do not support multiple subnets so a subnet must # be defined msg = _("Subnet must be defined in a service") raise nsxv_exc.NsxVpnValidationError(details=msg) def validate_ipsec_site_connection(self, context, ipsec_site_conn): ike_policy_id = ipsec_site_conn.get('ikepolicy_id') if ike_policy_id: ikepolicy = self.vpn_plugin.get_ikepolicy(context, ike_policy_id) self.validate_ikepolicy_version(ikepolicy) self.validate_ikepolicy_pfs(ikepolicy) self.validate_encryption_algorithm(ikepolicy) ipsec_policy_id = ipsec_site_conn.get('ipsecpolicy_id') if ipsec_policy_id: ipsecpolicy = self.vpn_plugin.get_ipsecpolicy(context, ipsec_policy_id) self.validate_ipsec_policy(context, ipsecpolicy) if ike_policy_id and ipsec_policy_id: self.validate_policies_matching_algorithms(ikepolicy, ipsecpolicy) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxv3/0000755000175000017500000000000000000000000023564 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxv3/__init__.py0000644000175000017500000000000000000000000025663 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxv3/ipsec_driver.py0000644000175000017500000007707600000000000026635 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context as n_context from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db from vmware_nsx.services.vpnaas.common_v3 import ipsec_driver as common_driver from vmware_nsx.services.vpnaas.common_v3 import ipsec_utils from vmware_nsx.services.vpnaas.nsxv3 import ipsec_validator from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as consts from vmware_nsxlib.v3 import vpn_ipsec LOG = logging.getLogger(__name__) IPSEC = 'ipsec' class NSXv3IPsecVpnDriver(common_driver.NSXcommonIPsecVpnDriver): def __init__(self, service_plugin): validator = ipsec_validator.IPsecV3Validator(service_plugin) super(NSXv3IPsecVpnDriver, self).__init__(service_plugin, validator) self._nsxlib = self._core_plugin.nsxlib self._nsx_vpn = self._nsxlib.vpn_ipsec registry.subscribe( self._delete_local_endpoint, resources.ROUTER_GATEWAY, events.AFTER_DELETE) def _translate_cidr(self, cidr): return self._nsxlib.firewall_section.get_ip_cidr_reference( cidr, consts.IPV6 if netaddr.valid_ipv6(cidr) else consts.IPV4) def _translate_addresses_to_target(self, cidrs): return [self._translate_cidr(ip) for ip in cidrs] def _generate_ipsecvpn_firewall_rules(self, plugin_type, context, router_id=None): """Return the firewall rules needed to allow vpn traffic""" fw_rules = [] # get all the active services of this router filters = {'router_id': [router_id], 'status': [constants.ACTIVE]} services = self.vpn_plugin.get_vpnservices( context.elevated(), filters=filters) if not services: return fw_rules for srv in services: subnet = self.l3_plugin.get_subnet( context.elevated(), srv['subnet_id']) local_cidrs = [subnet['cidr']] # get all the active connections of this service filters = {'vpnservice_id': [srv['id']], 'status': [constants.ACTIVE]} connections = self.vpn_plugin.get_ipsec_site_connections( context.elevated(), filters=filters) for conn in connections: peer_cidrs = conn['peer_cidrs'] fw_rules.append({ 'display_name': 'VPN connection ' + conn['id'], 'action': consts.FW_ACTION_ALLOW, 'destinations': self._translate_addresses_to_target( peer_cidrs), 'sources': self._translate_addresses_to_target( local_cidrs)}) return fw_rules def _update_firewall_rules(self, context, vpnservice): LOG.debug("Updating vpn firewall rules for router %s", vpnservice['router_id']) self._core_plugin.update_router_firewall( context, vpnservice['router_id']) def _update_router_advertisement(self, context, vpnservice): LOG.debug("Updating router advertisement rules for router %s", vpnservice['router_id']) router_id = vpnservice['router_id'] # skip no-snat router as it is already advertised, # and router with no gw rtr = self.l3_plugin.get_router(context, router_id) if (not rtr.get('external_gateway_info') or not rtr['external_gateway_info'].get('enable_snat', True)): return rules = [] # get all the active services of this router filters = {'router_id': [router_id], 'status': [constants.ACTIVE]} services = self.vpn_plugin.get_vpnservices( context.elevated(), filters=filters) rule_name_pref = 'VPN advertisement service' for srv in services: # use only services with active connections filters = {'vpnservice_id': [srv['id']], 'status': [constants.ACTIVE]} connections = self.vpn_plugin.get_ipsec_site_connections( context.elevated(), filters=filters) if not connections: continue subnet = self.l3_plugin.get_subnet( context.elevated(), srv['subnet_id']) rules.append({ 'display_name': "%s %s" % (rule_name_pref, srv['id']), 'action': consts.FW_ACTION_ALLOW, 'networks': [subnet['cidr']]}) if rules: logical_router_id = db.get_nsx_router_id(context.session, router_id) self._nsxlib.logical_router.update_advertisement_rules( logical_router_id, rules, name_prefix=rule_name_pref) def _nsx_tags(self, context, connection): return self._nsxlib.build_v3_tags_payload( connection, resource_type='os-vpn-connection-id', project_name=context.tenant_name) def _nsx_tags_for_reused(self): # Service & Local endpoint can be reused cross tenants, # so we do not add the tenant/object id. return self._nsxlib.build_v3_api_version_tag() def _create_ike_profile(self, context, connection): """Create an ike profile for a connection""" # Note(asarfaty) the NSX profile can be reused, so we can consider # creating it only once in the future, and keeping a use-count for it. # There is no driver callback for profiles creation so it has to be # done on connection creation. ike_policy_id = connection['ikepolicy_id'] ikepolicy = self.vpn_plugin.get_ikepolicy(context, ike_policy_id) try: profile = self._nsx_vpn.ike_profile.create( ikepolicy['name'] or ikepolicy['id'], description=ikepolicy['description'], encryption_algorithm=ipsec_utils.ENCRYPTION_ALGORITHM_MAP[ ikepolicy['encryption_algorithm']], digest_algorithm=ipsec_utils.AUTH_ALGORITHM_MAP[ ikepolicy['auth_algorithm']], ike_version=ipsec_utils.IKE_VERSION_MAP[ ikepolicy['ike_version']], dh_group=ipsec_utils.PFS_MAP[ikepolicy['pfs']], sa_life_time=ikepolicy['lifetime']['value'], tags=self._nsx_tags(context, connection)) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create an ike profile: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return profile['id'] def _delete_ike_profile(self, ikeprofile_id): self._nsx_vpn.ike_profile.delete(ikeprofile_id) def _create_ipsec_profile(self, context, connection): """Create an ipsec profile for a connection""" # Note(asarfaty) the NSX profile can be reused, so we can consider # creating it only once in the future, and keeping a use-count for it. # There is no driver callback for profiles creation so it has to be # done on connection creation. ipsec_policy_id = connection['ipsecpolicy_id'] ipsecpolicy = self.vpn_plugin.get_ipsecpolicy( context, ipsec_policy_id) try: profile = self._nsx_vpn.tunnel_profile.create( ipsecpolicy['name'] or ipsecpolicy['id'], description=ipsecpolicy['description'], encryption_algorithm=ipsec_utils.ENCRYPTION_ALGORITHM_MAP[ ipsecpolicy['encryption_algorithm']], digest_algorithm=ipsec_utils.AUTH_ALGORITHM_MAP[ ipsecpolicy['auth_algorithm']], dh_group=ipsec_utils.PFS_MAP[ipsecpolicy['pfs']], pfs=True, sa_life_time=ipsecpolicy['lifetime']['value'], tags=self._nsx_tags(context, connection)) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a tunnel profile: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return profile['id'] def _delete_ipsec_profile(self, ipsecprofile_id): self._nsx_vpn.tunnel_profile.delete(ipsecprofile_id) def _create_dpd_profile(self, context, connection): dpd_info = connection['dpd'] try: profile = self._nsx_vpn.dpd_profile.create( self._get_dpd_profile_name(connection), description='neutron dpd profile', timeout=dpd_info.get('timeout'), enabled=True if dpd_info.get('action') == 'hold' else False, tags=self._nsx_tags(context, connection)) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a DPD profile: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return profile['id'] def _delete_dpd_profile(self, dpdprofile_id): self._nsx_vpn.dpd_profile.delete(dpdprofile_id) def _update_dpd_profile(self, connection, dpdprofile_id): dpd_info = connection['dpd'] self._nsx_vpn.dpd_profile.update(dpdprofile_id, name=self._get_dpd_profile_name(connection), timeout=dpd_info.get('timeout'), enabled=True if dpd_info.get('action') == 'hold' else False) def _create_peer_endpoint(self, context, connection, ikeprofile_id, ipsecprofile_id, dpdprofile_id): default_auth = vpn_ipsec.AuthenticationModeTypes.AUTH_MODE_PSK try: peer_endpoint = self._nsx_vpn.peer_endpoint.create( connection['name'] or connection['id'], connection['peer_address'], connection['peer_id'], description=connection['description'], authentication_mode=default_auth, dpd_profile_id=dpdprofile_id, ike_profile_id=ikeprofile_id, ipsec_tunnel_profile_id=ipsecprofile_id, connection_initiation_mode=ipsec_utils.INITIATION_MODE_MAP[ connection['initiator']], psk=connection['psk'], tags=self._nsx_tags(context, connection)) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a peer endpoint: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return peer_endpoint['id'] def _update_peer_endpoint(self, peer_ep_id, connection): self._nsx_vpn.peer_endpoint.update( peer_ep_id, name=connection['name'] or connection['id'], peer_address=connection['peer_address'], peer_id=connection['peer_id'], description=connection['description'], connection_initiation_mode=ipsec_utils.INITIATION_MODE_MAP[ connection['initiator']], psk=connection['psk']) def _delete_peer_endpoint(self, peer_ep_id): self._nsx_vpn.peer_endpoint.delete(peer_ep_id) def _get_profiles_from_peer_endpoint(self, peer_ep_id): peer_ep = self._nsx_vpn.peer_endpoint.get(peer_ep_id) return ( peer_ep['ike_profile_id'], peer_ep['ipsec_tunnel_profile_id'], peer_ep['dpd_profile_id']) def _create_local_endpoint(self, context, local_addr, nsx_service_id, router_id, project_id): """Creating an NSX local endpoint for a logical router This endpoint can be reused by other connections, and will be deleted when the router is deleted or gateway is removed """ # Add the neutron router-id to the tags to help search later tags = self._nsxlib.build_v3_tags_payload( {'id': router_id, 'project_id': project_id}, resource_type='os-neutron-router-id', project_name=context.tenant_name) try: local_endpoint = self._nsx_vpn.local_endpoint.create( 'Local endpoint for OS VPNaaS', local_addr, nsx_service_id, tags=tags) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a local endpoint: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return local_endpoint['id'] def _search_local_endpint(self, router_id): tags = [{'scope': 'os-neutron-router-id', 'tag': router_id}] ep_list = self._nsxlib.search_by_tags( tags=tags, resource_type=self._nsx_vpn.local_endpoint.resource_type) if ep_list['results']: return ep_list['results'][0]['id'] def _get_local_endpoint(self, context, vpnservice): """Get the id of the local endpoint for a service The NSX allows only one local endpoint per local address This method will create it if there is not matching endpoint """ # use the router GW as the local ip router_id = vpnservice['router']['id'] # check if we already have this endpoint on the NSX local_ep_id = self._search_local_endpint(router_id) if local_ep_id: return local_ep_id # create a new one local_addr = vpnservice['external_v4_ip'] nsx_service_id = self._get_nsx_vpn_service(context, vpnservice) local_ep_id = self._create_local_endpoint( context, local_addr, nsx_service_id, router_id, vpnservice['project_id']) return local_ep_id def _delete_local_endpoint_by_router(self, context, router_id): # delete the local endpoint from the NSX local_ep_id = self._search_local_endpint(router_id) if local_ep_id: self._nsx_vpn.local_endpoint.delete(local_ep_id) # delete the neutron port with this IP port = self._find_vpn_service_port(context, router_id) if port: self.l3_plugin.delete_port(context, port['id'], force_delete_vpn=True) def _delete_local_endpoint(self, resource, event, trigger, payload=None): """Upon router deletion / gw removal delete the matching endpoint""" router_id = payload.resource_id ctx = n_context.get_admin_context() self._delete_local_endpoint_by_router(ctx, router_id) def validate_router_gw_info(self, context, router_id, gw_info): """Upon router gw update - verify no-snat""" # check if this router has a vpn service admin_con = context.elevated() # get all relevant services, except those waiting to be deleted or in # ERROR state filters = {'router_id': [router_id], 'status': [constants.ACTIVE, constants.PENDING_CREATE, constants.INACTIVE, constants.PENDING_UPDATE]} services = self.vpn_plugin.get_vpnservices(admin_con, filters=filters) if services: # do not allow enable-snat if (gw_info and gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default)): raise common_driver.RouterWithSNAT(router_id=router_id) else: # if this is a non-vpn router. if snat was disabled, should check # there is no overlapping with vpn connections if (gw_info and not gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default)): # get router subnets subnets = self._core_plugin._find_router_subnets_cidrs( context, router_id) # find all vpn services with connections if not self._check_subnets_overlap_with_all_conns( admin_con, subnets): raise common_driver.RouterWithOverlapNoSnat( router_id=router_id) def _get_session_rules(self, context, connection, vpnservice): # TODO(asarfaty): support vpn-endpoint-groups too peer_cidrs = connection['peer_cidrs'] local_cidrs = [vpnservice['subnet']['cidr']] rule = self._nsx_vpn.session.get_rule_obj(local_cidrs, peer_cidrs) return [rule] def _create_session(self, context, connection, local_ep_id, peer_ep_id, rules, enabled=True): try: session = self._nsx_vpn.session.create( connection['name'] or connection['id'], local_ep_id, peer_ep_id, rules, description=connection['description'], tags=self._nsx_tags(context, connection), enabled=enabled) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create a session: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return session['id'] def _update_session(self, session_id, connection, rules=None, enabled=True): self._nsx_vpn.session.update( session_id, name=connection['name'] or connection['id'], description=connection['description'], policy_rules=rules, enabled=enabled) def get_ipsec_site_connection_status(self, context, ipsec_site_conn_id): mapping = db.get_nsx_vpn_connection_mapping( context.session, ipsec_site_conn_id) if not mapping or not mapping['session_id']: LOG.info("Couldn't find NSX session for VPN connection %s", ipsec_site_conn_id) return status_result = self._nsx_vpn.session.get_status(mapping['session_id']) if status_result and 'session_status' in status_result: status = status_result['session_status'] # NSX statuses are UP, DOWN, DEGRADE # VPNaaS connection status should be ACTIVE or DOWN if status == 'UP': return 'ACTIVE' elif status == 'DOWN' or status == 'DEGRADED': return 'DOWN' def _delete_session(self, session_id): self._nsx_vpn.session.delete(session_id) def create_ipsec_site_connection(self, context, ipsec_site_conn): LOG.debug('Creating ipsec site connection %(conn_info)s.', {"conn_info": ipsec_site_conn}) # Note(asarfaty) the plugin already calls the validator # which also validated the policies and service ikeprofile_id = None ipsecprofile_id = None dpdprofile_id = None peer_ep_id = None session_id = None vpnservice_id = ipsec_site_conn['vpnservice_id'] vpnservice = self.service_plugin._get_vpnservice( context, vpnservice_id) ipsec_id = ipsec_site_conn["id"] try: # create the ike profile ikeprofile_id = self._create_ike_profile( context, ipsec_site_conn) LOG.debug("Created NSX ike profile %s", ikeprofile_id) # create the ipsec profile ipsecprofile_id = self._create_ipsec_profile( context, ipsec_site_conn) LOG.debug("Created NSX ipsec profile %s", ipsecprofile_id) # create the dpd profile dpdprofile_id = self._create_dpd_profile( context, ipsec_site_conn) LOG.debug("Created NSX dpd profile %s", dpdprofile_id) # create the peer endpoint and add to the DB peer_ep_id = self._create_peer_endpoint( context, ipsec_site_conn, ikeprofile_id, ipsecprofile_id, dpdprofile_id) LOG.debug("Created NSX peer endpoint %s", peer_ep_id) # create or reuse a local endpoint using the vpn service local_ep_id = self._get_local_endpoint(context, vpnservice) # Finally: create the session with policy rules rules = self._get_session_rules( context, ipsec_site_conn, vpnservice) connection_enabled = (vpnservice['admin_state_up'] and ipsec_site_conn['admin_state_up']) session_id = self._create_session( context, ipsec_site_conn, local_ep_id, peer_ep_id, rules, enabled=connection_enabled) # update the DB with the session id db.add_nsx_vpn_connection_mapping( context.session, ipsec_site_conn['id'], session_id, dpdprofile_id, ikeprofile_id, ipsecprofile_id, peer_ep_id) self._update_status(context, vpnservice_id, ipsec_id, constants.ACTIVE) except nsx_exc.NsxPluginException: with excutils.save_and_reraise_exception(): self._update_status(context, vpnservice_id, ipsec_id, constants.ERROR) # delete the NSX objects that were already created # Do not delete reused objects: service, local endpoint if session_id: self._delete_session(session_id) if peer_ep_id: self._delete_peer_endpoint(peer_ep_id) if dpdprofile_id: self._delete_dpd_profile(dpdprofile_id) if ipsecprofile_id: self._delete_ipsec_profile(ipsecprofile_id) if ikeprofile_id: self._delete_ike_profile(ikeprofile_id) # update router firewall rules self._update_firewall_rules(context, vpnservice) # update router advertisement rules self._update_router_advertisement(context, vpnservice) def delete_ipsec_site_connection(self, context, ipsec_site_conn): LOG.debug('Deleting ipsec site connection %(site)s.', {"site": ipsec_site_conn}) vpnservice_id = ipsec_site_conn['vpnservice_id'] vpnservice = self.service_plugin._get_vpnservice( context, vpnservice_id) # get all data from the nsx based on the connection id in the DB mapping = db.get_nsx_vpn_connection_mapping( context.session, ipsec_site_conn['id']) if not mapping: LOG.warning("Couldn't find nsx ids for VPN connection %s", ipsec_site_conn['id']) # Do not fail the deletion return if mapping['session_id']: self._delete_session(mapping['session_id']) if mapping['peer_ep_id']: self._delete_peer_endpoint(mapping['peer_ep_id']) if mapping['dpd_profile_id']: self._delete_dpd_profile(mapping['dpd_profile_id']) if mapping['ipsec_profile_id']: self._delete_ipsec_profile(mapping['ipsec_profile_id']) if mapping['ike_profile_id']: self._delete_ike_profile(mapping['ike_profile_id']) # Do not delete the local endpoint and service as they are reused db.delete_nsx_vpn_connection_mapping(context.session, ipsec_site_conn['id']) # update router firewall rules self._update_firewall_rules(context, vpnservice) # update router advertisement rules self._update_router_advertisement(context, vpnservice) def update_ipsec_site_connection(self, context, old_ipsec_conn, ipsec_site_conn): LOG.debug('Updating ipsec site connection new %(site)s.', {"site": ipsec_site_conn}) LOG.debug('Updating ipsec site connection old %(site)s.', {"site": old_ipsec_conn}) # Note(asarfaty) the plugin already calls the validator # which also validated the policies and service ipsec_id = old_ipsec_conn['id'] vpnservice_id = old_ipsec_conn['vpnservice_id'] vpnservice = self.service_plugin._get_vpnservice( context, vpnservice_id) mapping = db.get_nsx_vpn_connection_mapping( context.session, ipsec_site_conn['id']) if not mapping: LOG.error("Couldn't find nsx ids for VPN connection %s", ipsec_site_conn['id']) self._update_status(context, vpnservice_id, ipsec_id, "ERROR") raise nsx_exc.NsxIPsecVpnMappingNotFound(conn=ipsec_id) # check if the dpd configuration changed old_dpd = old_ipsec_conn['dpd'] new_dpd = ipsec_site_conn['dpd'] if (old_dpd['action'] != new_dpd['action'] or old_dpd['timeout'] != new_dpd['timeout'] or old_ipsec_conn['name'] != ipsec_site_conn['name']): self._update_dpd_profile(ipsec_site_conn, mapping['dpd_profile_id']) # update peer endpoint with all the parameters that could be modified # Note(asarfaty): local endpoints are reusable and will not be updated self._update_peer_endpoint(mapping['peer_ep_id'], ipsec_site_conn) rules = self._get_session_rules( context, ipsec_site_conn, vpnservice) connection_enabled = (vpnservice['admin_state_up'] and ipsec_site_conn['admin_state_up']) self._update_session(mapping['session_id'], ipsec_site_conn, rules, enabled=connection_enabled) if ipsec_site_conn['peer_cidrs'] != old_ipsec_conn['peer_cidrs']: # Update firewall self._update_firewall_rules(context, vpnservice) # No service updates. No need to update router advertisement rules def _create_vpn_service(self, tier0_uuid): try: service = self._nsx_vpn.service.create( 'Neutron VPN service for T0 router ' + tier0_uuid, tier0_uuid, enabled=True, ike_log_level=ipsec_utils.DEFAULT_LOG_LEVEL, tags=self._nsx_tags_for_reused()) except nsx_lib_exc.ManagerError as e: msg = _("Failed to create vpn service: %s") % e raise nsx_exc.NsxPluginException(err_msg=msg) return service['id'] def _find_vpn_service(self, tier0_uuid, validate=True): # find the service for the tier0 router in the NSX. # Note(asarfaty) we expect only a small number of services services = self._nsx_vpn.service.list()['results'] for srv in services: if srv['logical_router_id'] == tier0_uuid: # if it exists but disabled: issue an error if validate and not srv.get('enabled', True): msg = _("NSX vpn service %s must be enabled") % srv['id'] raise nsx_exc.NsxPluginException(err_msg=msg) return srv['id'] def _get_service_tier0_uuid(self, context, vpnservice): router_id = vpnservice['router_id'] router_db = self._core_plugin._get_router(context, router_id) return self._core_plugin._get_tier0_uuid_by_router(context, router_db) def _create_vpn_service_if_needed(self, context, vpnservice): # The service is created on the TIER0 router attached to the router GW # The NSX can keep only one service per tier0 router so we reuse it tier0_uuid = self._get_service_tier0_uuid(context, vpnservice) if self._find_vpn_service(tier0_uuid): return # create a new one self._create_vpn_service(tier0_uuid) def _delete_vpn_service_if_needed(self, context, vpnservice): # Delete the VPN service on the NSX if no other service connected # to the same tier0 use it elev_context = context.elevated() tier0_uuid = self._get_service_tier0_uuid(elev_context, vpnservice) all_services = self.vpn_plugin.get_vpnservices(elev_context) for srv in all_services: if (srv['id'] != vpnservice['id'] and self._get_service_tier0_uuid(elev_context, srv) == tier0_uuid): LOG.info("Not deleting vpn service from the NSX as other " "neutron vpn services still use it.") return # Find the NSX-ID srv_id = self._get_nsx_vpn_service(elev_context, vpnservice) if not srv_id: LOG.error("Not deleting vpn service from the NSX as the " "service was not found on the NSX.") return try: self._nsx_vpn.service.delete(srv_id) except Exception as e: LOG.error("Failed to delete VPN service %s: %s", srv_id, e) def _delete_local_endpoints_if_needed(self, context, vpnservice): """When deleting the last service of a logical router delete its local endpoint """ router_id = vpnservice['router_id'] elev_context = context.elevated() filters = {'router_id': [router_id]} services = self.vpn_plugin.get_vpnservices( elev_context, filters=filters) if not services: self._delete_local_endpoint_by_router(elev_context, router_id) def _get_nsx_vpn_service(self, context, vpnservice): tier0_uuid = self._get_service_tier0_uuid(context, vpnservice) return self._find_vpn_service(tier0_uuid, validate=False) def create_vpnservice(self, context, vpnservice): #TODO(asarfaty) support vpn-endpoint-group-create for local & peer # cidrs too LOG.debug('Creating VPN service %(vpn)s', {'vpn': vpnservice}) vpnservice_id = vpnservice['id'] vpnservice = self.service_plugin._get_vpnservice(context, vpnservice_id) try: self.validator.validate_vpnservice(context, vpnservice) local_address = self._get_service_local_address( context.elevated(), vpnservice) except Exception: with excutils.save_and_reraise_exception(): # Rolling back change on the neutron self.service_plugin.delete_vpnservice(context, vpnservice_id) vpnservice['external_v4_ip'] = local_address self.service_plugin.set_external_tunnel_ips(context, vpnservice_id, v4_ip=local_address) self._create_vpn_service_if_needed(context, vpnservice) def update_vpnservice(self, context, old_vpnservice, vpnservice): # Only handle the case of admin-state-up changes if old_vpnservice['admin_state_up'] != vpnservice['admin_state_up']: # update all relevant connections filters = {'vpnservice_id': [vpnservice['id']]} connections = self.vpn_plugin.get_ipsec_site_connections( context, filters=filters) for conn in connections: mapping = db.get_nsx_vpn_connection_mapping( context.session, conn['id']) if mapping: connection_enabled = (vpnservice['admin_state_up'] and conn['admin_state_up']) self._update_session(mapping['session_id'], conn, enabled=connection_enabled) def delete_vpnservice(self, context, vpnservice): self._delete_local_endpoints_if_needed(context, vpnservice) self._delete_vpn_service_if_needed(context, vpnservice) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/services/vpnaas/nsxv3/ipsec_validator.py0000644000175000017500000000430500000000000027310 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsx._i18n import _ from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.services.vpnaas.common_v3 import ipsec_utils from vmware_nsx.services.vpnaas.common_v3 import ipsec_validator LOG = logging.getLogger(__name__) class IPsecV3Validator(ipsec_validator.IPsecCommonValidator): """Validator methods for Vmware NSX-V3 VPN support""" def __init__(self, service_plugin): super(IPsecV3Validator, self).__init__(service_plugin) @property def nsxlib(self): return self._core_plugin.nsxlib @property def auth_algorithm_map(self): return ipsec_utils.AUTH_ALGORITHM_MAP @property def pfs_map(self): return ipsec_utils.PFS_MAP def _validate_t0_ha_mode(self, tier0_uuid): # TODO(asarfaty): cache this result tier0_router = self.nsxlib.logical_router.get(tier0_uuid) if (not tier0_router or tier0_router.get('high_availability_mode') != 'ACTIVE_STANDBY'): msg = _("The router GW should be connected to a TIER-0 router " "with ACTIVE_STANDBY HA mode") raise nsx_exc.NsxVpnValidationError(details=msg) def _validate_router(self, context, router_id): super(IPsecV3Validator, self)._validate_router(context, router_id) # Verify that this is a no-snat router router_db = self._core_plugin._get_router(context, router_id) if router_db.enable_snat: msg = _("VPN is supported only for routers with disabled SNAT") raise nsx_exc.NsxVpnValidationError(details=msg) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/0000755000175000017500000000000000000000000020477 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/__init__.py0000644000175000017500000000357500000000000022622 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutronclient import shell from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.shell import commands as cmd # Oslo Logging uses INFO as default # Use a simple format for the output logging_format_string = '%(message)s' logging.register_options(cfg.CONF) logging.setup(cfg.CONF, "vmware-nsx") cfg.CONF.set_override('logging_context_format_string', logging_format_string) cfg.CONF.set_override('logging_default_format_string', logging_format_string) cfg.CONF.set_override('logging_exception_prefix', '') class NsxManage(shell.NeutronShell): def __init__(self, api_version): super(NsxManage, self).__init__(api_version) self.command_manager.add_command('net-migrate', cmd.NetworkMigrate) self.command_manager.add_command('net-report', cmd.NetworkReport) def build_option_parser(self, description, version): parser = super(NsxManage, self).build_option_parser( description, version) return parser def initialize_app(self, argv): super(NsxManage, self).initialize_app(argv) self.client = self.client_manager.neutron def main(): return NsxManage(shell.NEUTRON_API_VERSION).run(sys.argv[1:]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/0000755000175000017500000000000000000000000021567 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/README.rst0000644000175000017500000001503700000000000023264 0ustar00coreycorey00000000000000Admin Utility ============= Introduction ------------ Purpose of this script is to build a framework which can be leveraged to build utilities to help the on-field ops in system debugging. Adding custom functions ----------------------- Refer to the security groups example for reference implementation under, admin/plugins/nsx_v3/resources/securitygroups.py Adding new functions is fairly straightforward: * Define the function under appropriate package. We use neutron callbacks to provide hooks. So your function definition should be like, :: def function(resource, event, trigger, **kwargs) * Add the Resources and Operations enums if they don't exist. :: class Operations(object): NEUTRON_CLEAN = 'neutron_clean' :: nsxv3_resources = { constants.SECURITY_GROUPS: Resource(constants.SECURITY_GROUPS, ops) } * In resource.py, add the function to the callback registry. :: registry.subscribe(neutron_clean_security_groups, Resources.SECURITY_GROUPS.value, Operations.NEUTRON_CLEAN.value) * To test, do :: cd vmware-nsx/shell sudo pip install -e . nsxadmin -r -o TODO ---- * Use Cliff * Auto complete command line args. Directory Structure ------------------- admin/ plugins/ common/ Contains code specific to different plugin versions. nsx_v3/ resources/ Contains modules for various resources supported by the admin utility. These modules contains methods to perform operations on these resources. Installation ------------ :: sudo pip install -e . Usage ----- :: nsxadmin -r -o Example ------- :: $ nsxadmin -r security-groups -o list ==== [NSX] List Security Groups ==== Firewall Sections +------------------------------------------------+--------------------------------------+ | display_name | id | |------------------------------------------------+--------------------------------------| | default - 261343f8-4f35-4e57-9cc7-6c4fc7723b72 | 91a05fbd-054a-48b6-8e60-3b5d445be8c7 | | default - 823247b6-bdb3-47be-8bac-0d1114fc1ad7 | 78116d4a-de77-4a8f-b3e5-e76f458840ea | | OS default section for security-groups | 10a2fc6c-29c9-4d8d-ac2c-b24aafa15c79 | | Default Layer3 Section | e479e404-e712-4adb-879c-e432d510c056 | +------------------------------------------------+--------------------------------------+ Firewall NS Groups +------------------------------------------------+--------------------------------------+ | display_name | id | |------------------------------------------------+--------------------------------------| | NSGroup Container | c0b26e82-d49b-49f0-b68e-7449a59366e9 | | default - 261343f8-4f35-4e57-9cc7-6c4fc7723b72 | 2e5b5ca1-f687-4556-8130-9524b313474b | | default - 823247b6-bdb3-47be-8bac-0d1114fc1ad7 | b5cd9ae4-42b5-47a7-a1bf-9767ac62466e | +------------------------------------------------+--------------------------------------+ ==== [NEUTRON] List Security Groups Mappings ==== security-groups +---------+--------------------------------------+-----------------------------------------------------------+----------------------+ | name | id | section-uri | nsx-securitygroup-id | +---------+--------------------------------------+-----------------------------------------------------------+----------------------+ | default | f785c82a-5b28-42ac-aa0a-ad56720ccbbc | /api/4.0/firewall/globalroot-0/config/layer3sections/1006 | securitygroup-12 | +---------+--------------------------------------+-----------------------------------------------------------+----------------------+ $ nsxadmin -r security-groups -o list -f json ==== [NSX] List Security Groups ==== { "Firewall Sections": [ { "display_name": "default - 261343f8-4f35-4e57-9cc7-6c4fc7723b72", "id": "91a05fbd-054a-48b6-8e60-3b5d445be8c7" }, { "display_name": "default - 823247b6-bdb3-47be-8bac-0d1114fc1ad7", "id": "78116d4a-de77-4a8f-b3e5-e76f458840ea" }, { "display_name": "OS default section for security-groups", "id": "10a2fc6c-29c9-4d8d-ac2c-b24aafa15c79" }, { "display_name": "Default Layer3 Section", "id": "e479e404-e712-4adb-879c-e432d510c056" } ] } { "Firewall NS Groups": [ { "display_name": "NSGroup Container", "id": "c0b26e82-d49b-49f0-b68e-7449a59366e9" }, { "display_name": "default - 261343f8-4f35-4e57-9cc7-6c4fc7723b72", "id": "2e5b5ca1-f687-4556-8130-9524b313474b" }, { "display_name": "default - 823247b6-bdb3-47be-8bac-0d1114fc1ad7", "id": "b5cd9ae4-42b5-47a7-a1bf-9767ac62466e" } ] } ==== [NEUTRON] List Security Groups Mappings ==== security-groups { "security-groups": [ { "id": "f785c82a-5b28-42ac-aa0a-ad56720ccbbc", "name": "default", "nsx-securitygroup-id": "securitygroup-12", "section-uri": "/api/4.0/firewall/globalroot-0/config/layer3sections/1006" } } Upgrade Steps (Version 1.0.0 to Version 1.1.0) ---------------------------------------------- 1. Upgrade NSX backend from version 1.0.0 to version 1.1.0 2. Create a DHCP-Profile and a Metadata-Proxy in NSX backend 3. Stop Neutron 4. Install version 1.1.0 Neutron plugin 5. Run admin tools to migrate version 1.0.0 objects to version 1.1.0 objects * nsxadmin -r metadata-proxy -o nsx-update --property metadata_proxy_uuid= * nsxadmin -r dhcp-binding -o nsx-update --property dhcp_profile_uuid= 6. Start Neutron 7. Make sure /etc/nova/nova.conf has metadata_proxy_shared_secret = 8. Restart VMs or ifdown/ifup their network interface to get new DHCP options Help ---- :: $ nsxadmin --help ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/__init__.py0000644000175000017500000000000000000000000023666 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2222543 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/0000755000175000017500000000000000000000000023250 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/__init__.py0000644000175000017500000000000000000000000025347 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2262545 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/common/0000755000175000017500000000000000000000000024540 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/common/__init__.py0000644000175000017500000000000000000000000026637 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/common/constants.py0000644000175000017500000000457100000000000027135 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Default conf file locations NEUTRON_CONF = '/etc/neutron/neutron.conf' NSX_INI = '/etc/neutron/plugins/vmware/nsx.ini' # NSX Plugin Constants NSXP_PLUGIN = 'vmware_nsx.plugin.NsxPolicyPlugin' NSXV3_PLUGIN = 'vmware_nsx.plugin.NsxV3Plugin' NSXV_PLUGIN = 'vmware_nsx.plugin.NsxVPlugin' NSXTVD_PLUGIN = 'vmware_nsx.plugin.NsxTVDPlugin' VMWARE_NSXV = 'vmware_nsxv' VMWARE_NSXV3 = 'vmware_nsxv3' VMWARE_NSXP = 'vmware_nsxp' VMWARE_NSXTVD = 'vmware_nsxtvd' # Common Resource Constants NETWORKS = 'networks' ROUTERS = 'routers' DHCP_BINDING = 'dhcp-binding' FIREWALL_SECTIONS = 'firewall-sections' FIREWALL_NSX_GROUPS = 'nsx-security-groups' SECURITY_GROUPS = 'security-groups' CONFIG = 'config' ORPHANED_NETWORKS = 'orphaned-networks' ORPHANED_ROUTERS = 'orphaned-routers' SYSTEM = 'system' # NSXV3 only Resource Constants PORTS = 'ports' METADATA_PROXY = 'metadata-proxy' ORPHANED_DHCP_SERVERS = 'orphaned-dhcp-servers' CERTIFICATE = 'certificate' LB_SERVICES = 'lb-services' LB_VIRTUAL_SERVERS = 'lb-virtual-servers' LB_POOLS = 'lb-pools' LB_MONITORS = 'lb-monitors' LB_ADVERTISEMENT = 'lb-advertisement' RATE_LIMIT = 'rate-limit' CLUSTER = 'cluster' ORPHANED_FIREWALL_SECTIONS = 'orphaned-firewall-sections' # NSXV only Resource Constants EDGES = 'edges' SPOOFGUARD_POLICY = 'spoofguard-policy' BACKUP_EDGES = 'backup-edges' ORPHANED_EDGES = 'orphaned-edges' ORPHANED_BINDINGS = 'orphaned-bindings' ORPHANED_RULES = 'orphaned-rules' ORPHANED_VNICS = 'orphaned-vnics' MISSING_EDGES = 'missing-edges' METADATA = 'metadata' MISSING_NETWORKS = 'missing-networks' BGP_GW_EDGE = 'bgp-gw-edge' ROUTING_REDIS_RULE = 'routing-redistribution-rule' BGP_NEIGHBOUR = 'bgp-neighbour' NSX_PORTGROUPS = 'nsx-portgroups' NSX_MIGRATE_V_T = 'nsx-migrate-v2t' # NSXTV only Resource Constants PROJECTS = 'projects' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/common/formatters.py0000644000175000017500000000434700000000000027310 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import prettytable LOG = logging.getLogger(__name__) def output_formatter(resource_name, resources_list, attrs): """Method to format the output response from NSX/Neutron. Depending on the --fmt cli option we format the output as JSON or as a table. """ LOG.info('%(resource_name)s', {'resource_name': resource_name}) if not resources_list: LOG.info('No resources found') return '' fmt = cfg.CONF.fmt if fmt == 'psql': tableout = prettytable.PrettyTable(attrs) tableout.padding_width = 1 tableout.align = "l" for resource in resources_list: resource_list = [] for attr in attrs: resource_list.append(resource.get(attr)) tableout.add_row(resource_list) return tableout elif fmt == 'json': js_output = {} js_output[resource_name] = [] for resource in resources_list: result = {} for attr in attrs: result[attr] = resource[attr] js_output[resource_name].append(result) return jsonutils.dumps(js_output, sort_keys=True, indent=4) def tabulate_results(data): """Method to format the data in a tabular format. Expects a list of tuple with the first tuple in the list; being treated as column headers. """ columns = data.pop(0) table = prettytable.PrettyTable(["%s" % col for col in columns]) for contents in data: table.add_row(["%s" % col for col in contents]) return table ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/common/utils.py0000644000175000017500000001024700000000000026256 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import six from vmware_nsx._i18n import _ from vmware_nsx.db import db from vmware_nsx.shell import resources as nsxadmin from neutron.common import profiler # noqa from neutron_lib.callbacks import registry from oslo_log import log as logging LOG = logging.getLogger(__name__) def output_header(func): """Decorator to demarcate the output of various hooks. Based on the callback function name we add a header to the cli output. Callback name's should follow the convention of component_operation_it_does to leverage the decorator """ def func_desc(*args, **kwargs): component = '[%s]' % func.__name__.split('_')[0].upper() op_desc = [n.capitalize() for n in func.__name__.split('_')[1:]] LOG.info('==== %(component)s %(operation)s ====', {'component': component, 'operation': ' '.join(op_desc)}) return func(*args, **kwargs) func_desc.__name__ = func.__name__ return func_desc def parse_multi_keyval_opt(opt_list): """Converts a MutliStrOpt to a key-value dict""" result = dict() opt_list = opt_list if opt_list else [] for opt_value in opt_list: try: key, value = opt_value.split('=') result[key] = value except ValueError: raise ValueError(_("Illegal argument [%s]: input should have the " "format of '--property key=value'") % opt_value) return result def query_yes_no(question, default="yes"): """Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits . It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no". """ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError(_("invalid default answer: '%s'") % default) while True: sys.stdout.write(question + prompt) choice = six.moves.input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") def list_handler(resource): def wrap(func): registry.subscribe(func, resource, nsxadmin.Operations.LIST.value) return func return wrap def list_mismatches_handler(resource): def wrap(func): registry.subscribe(func, resource, nsxadmin.Operations.LIST_MISMATCHES.value) return func return wrap def fix_mismatches_handler(resource): def wrap(func): registry.subscribe(func, resource, nsxadmin.Operations.FIX_MISMATCH.value) return func return wrap def get_plugin_filters(context, plugin): # Return filters for the neutron list apis so that only resources from # a specific plugin will be returned. filters = {} core_plugin = nsxadmin.get_plugin() if core_plugin == 'nsxtvd': maps = db.get_project_plugin_mappings_by_plugin( context.session, plugin) if maps: filters['project_id'] = [m.project for m in maps] return filters ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/common/v3_common_cert.py0000644000175000017500000002171500000000000030035 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron_lib import context from vmware_nsx.plugins.nsx_v3 import cert_utils from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils from vmware_nsxlib.v3 import client_cert from vmware_nsxlib.v3 import exceptions from vmware_nsxlib.v3 import trust_management LOG = logging.getLogger(__name__) CERT_DEFAULTS = {'key-size': 2048, 'sig-alg': 'sha256', 'valid-days': 3650, 'country': 'US', 'state': 'California', 'org': 'default org', 'unit': 'default unit', 'host': 'defaulthost.org'} def get_nsx_trust_management(plugin_conf, **kwargs): username, password = None, None if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) username = properties.get('user') password = properties.get('password') nsx_client = utils.get_nsxv3_client(username, password, True, plugin_conf=plugin_conf) nsx_trust = trust_management.NsxLibTrustManagement(nsx_client, {}) return nsx_trust def get_certificate_manager(plugin_conf, **kwargs): storage_driver_type = plugin_conf.nsx_client_cert_storage.lower() LOG.info("Certificate storage is %s", storage_driver_type) if storage_driver_type == 'nsx-db': storage_driver = cert_utils.DbCertificateStorageDriver( context.get_admin_context()) elif storage_driver_type == 'none': storage_driver = cert_utils.DummyCertificateStorageDriver() # TODO(annak) - add support for barbican storage driver return client_cert.ClientCertificateManager( cert_utils.NSX_OPENSTACK_IDENTITY, get_nsx_trust_management(plugin_conf, **kwargs), storage_driver) def verify_client_cert_on(plugin_conf): if not plugin_conf.nsx_use_client_auth: LOG.info("Operation not applicable since client authentication " "is disabled") return False try: if not plugin_conf.allow_passthrough: LOG.info("Operation not applicable since passthrough API is " "disabled") return False except cfg.NoSuchOptError: # No such option exists - passthrough check is irrelevant pass return True def generate_cert(plugin_conf, **kwargs): """Generate self signed client certificate and private key """ if not verify_client_cert_on(plugin_conf): return if plugin_conf.nsx_client_cert_storage.lower() == "none": LOG.info("Generate operation is not supported " "with storage type 'none'") return # update cert defaults based on user input properties = CERT_DEFAULTS.copy() if kwargs.get('property'): properties.update(admin_utils.parse_multi_keyval_opt( kwargs['property'])) try: prop = 'key-size' key_size = int(properties.get(prop)) prop = 'valid-days' valid_for_days = int(properties.get(prop)) except ValueError: LOG.info("%s property must be a number", prop) return signature_alg = properties.get('sig-alg') subject = {} subject[client_cert.CERT_SUBJECT_COUNTRY] = properties.get('country') subject[client_cert.CERT_SUBJECT_STATE] = properties.get('state') subject[client_cert.CERT_SUBJECT_ORG] = properties.get('org') subject[client_cert.CERT_SUBJECT_UNIT] = properties.get('org') subject[client_cert.CERT_SUBJECT_HOST] = properties.get('host') regenerate = False with get_certificate_manager(plugin_conf, **kwargs) as cert: if cert.exists(): LOG.info("Deleting existing certificate") # Need to delete cert first cert.delete() regenerate = True try: cert.generate(subject, key_size, valid_for_days, signature_alg) except exceptions.NsxLibInvalidInput as e: LOG.info(e) return LOG.info("Client certificate generated successfully") if not regenerate: # No certificate existed, so client authentication service was likely # changed to true just now. The user must restart neutron to avoid # failures. LOG.info("Please restart neutron service") def delete_cert(plugin_conf, **kwargs): """Delete client certificate and private key """ if not verify_client_cert_on(plugin_conf): return with get_certificate_manager(plugin_conf, **kwargs) as cert: if plugin_conf.nsx_client_cert_storage.lower() == "none": filename = get_cert_filename(plugin_conf, **kwargs) if not filename: LOG.info("Please specify file containing the certificate " "using filename property") return cert.delete_pem(filename) else: if not cert.exists(): LOG.info("Nothing to clean") return cert.delete() LOG.info("Client certificate deleted successfully") def show_cert(plugin_conf, **kwargs): """Show client certificate details """ if not verify_client_cert_on(plugin_conf): return with get_certificate_manager(plugin_conf, **kwargs) as cert: if cert.exists(): cert_pem, key_pem = cert.get_pem() expires_on = cert.expires_on() expires_in_days = cert.expires_in_days() cert_data = cert.get_subject() cert_data['alg'] = cert.get_signature_alg() cert_data['key_size'] = cert.get_key_size() if expires_in_days >= 0: LOG.info("Client certificate is valid. " "Expires on %(date)s UTC (in %(days)d days).", {'date': expires_on, 'days': expires_in_days}) else: LOG.info("Client certificate expired on %s.", expires_on) LOG.info("Key Size %(key_size)s, " "Signature Algorithm %(alg)s\n" "Subject: Country %(country)s, State %(state)s, " "Organization %(organization)s, Unit %(unit)s, " "Common Name %(hostname)s", cert_data) LOG.info(cert_pem) else: LOG.info("Client certificate is not registered " "in storage") def get_cert_filename(plugin_conf, **kwargs): filename = plugin_conf.nsx_client_cert_file if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) filename = properties.get('filename', filename) if not filename: LOG.info("Please specify file containing the certificate " "using filename property") return filename def import_cert(plugin_conf, **kwargs): """Import client certificate that was generated externally""" if not verify_client_cert_on(plugin_conf): return if plugin_conf.nsx_client_cert_storage.lower() != "none": LOG.info("Import operation is supported " "with storage type 'none' only") return with get_certificate_manager(plugin_conf, **kwargs) as cert: if cert.exists(): LOG.info("Deleting existing certificate") cert.delete() filename = get_cert_filename(plugin_conf, **kwargs) if not filename: return cert.import_pem(filename) LOG.info("Client certificate imported successfully") def show_nsx_certs(plugin_conf, **kwargs): """Show client certificates associated with openstack identity in NSX""" # Note - this operation is supported even if the feature is disabled nsx_trust = get_nsx_trust_management(plugin_conf, **kwargs) ids = nsx_trust.get_identities(cert_utils.NSX_OPENSTACK_IDENTITY) if not ids: LOG.info("Principal identity %s not found", cert_utils.NSX_OPENSTACK_IDENTITY) return LOG.info("Certificate(s) associated with principal identity %s\n", cert_utils.NSX_OPENSTACK_IDENTITY) cert = None for identity in ids: if 'certificate_id' in identity: cert = nsx_trust.get_cert(identity['certificate_id']) LOG.info(cert['pem_encoded']) if not cert: LOG.info("No certificates found") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2262545 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxp/0000755000175000017500000000000000000000000024240 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxp/__init__.py0000644000175000017500000000000000000000000026337 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2262545 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxp/resources/0000755000175000017500000000000000000000000026252 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxp/resources/__init__.py0000644000175000017500000000000000000000000030351 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxp/resources/certificates.py0000644000175000017500000000507300000000000031276 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.common import v3_common_cert from vmware_nsx.shell import resources as shell from neutron_lib.callbacks import registry from oslo_config import cfg @admin_utils.output_header def generate_cert(resource, event, trigger, **kwargs): """Generate self signed client certificate and private key """ return v3_common_cert.generate_cert(cfg.CONF.nsx_p, **kwargs) @admin_utils.output_header def delete_cert(resource, event, trigger, **kwargs): """Delete client certificate and private key """ return v3_common_cert.delete_cert(cfg.CONF.nsx_p, **kwargs) @admin_utils.output_header def show_cert(resource, event, trigger, **kwargs): """Show client certificate details """ return v3_common_cert.show_cert(cfg.CONF.nsx_p, **kwargs) @admin_utils.output_header def import_cert(resource, event, trigger, **kwargs): """Import client certificate that was generated externally""" return v3_common_cert.import_cert(cfg.CONF.nsx_p, **kwargs) @admin_utils.output_header def show_nsx_certs(resource, event, trigger, **kwargs): """Show client certificates associated with openstack identity in NSX""" return v3_common_cert.show_nsx_certs(cfg.CONF.nsx_p, **kwargs) registry.subscribe(generate_cert, constants.CERTIFICATE, shell.Operations.GENERATE.value) registry.subscribe(show_cert, constants.CERTIFICATE, shell.Operations.SHOW.value) registry.subscribe(delete_cert, constants.CERTIFICATE, shell.Operations.CLEAN.value) registry.subscribe(import_cert, constants.CERTIFICATE, shell.Operations.IMPORT.value) registry.subscribe(show_nsx_certs, constants.CERTIFICATE, shell.Operations.NSX_LIST.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxp/resources/networks.py0000644000175000017500000001362400000000000030506 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import registry from neutron_lib import context from oslo_log import log as logging from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxp.resources import utils as p_utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) @admin_utils.list_handler(constants.NETWORKS) @admin_utils.output_header def list_networks(resource, event, trigger, **kwargs): """List neutron networks With the NSX policy resources and realization state. """ mappings = [] nsxpolicy = p_utils.get_connected_nsxpolicy() ctx = context.get_admin_context() with p_utils.NsxPolicyPluginWrapper() as plugin: nets = plugin.get_networks(ctx) for net in nets: # skip non-backend networks if plugin._network_is_external(ctx, net['id']): continue segment_id = plugin._get_network_nsx_segment_id(ctx, net['id']) status = p_utils.get_realization_info( nsxpolicy.segment, segment_id) mappings.append({'ID': net['id'], 'Name': net.get('name'), 'Project': net.get('tenant_id'), 'NSX status': status}) p_utils.log_info(constants.NETWORKS, mappings, attrs=['Project', 'Name', 'ID', 'NSX status']) return bool(mappings) @admin_utils.output_header def migrate_dhcp_to_policy(resource, event, trigger, **kwargs): errmsg = ("Need to specify policy dhcp config id. Add " "--property dhcp-config=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) dhcp_config_id = properties.get('dhcp-config') if not dhcp_config_id: LOG.error("%s", errmsg) return nsxpolicy = p_utils.get_connected_nsxpolicy() if not nsxpolicy.feature_supported( nsx_constants.FEATURE_NSX_POLICY_DHCP): LOG.error("This utility is not available for NSX version %s", nsxpolicy.get_version()) return try: nsxpolicy.dhcp_server_config.get(dhcp_config_id) except Exception: LOG.error("%s", errmsg) return ctx = context.get_admin_context() migrate_count = 0 with p_utils.NsxPolicyPluginWrapper() as plugin: nets = plugin.get_networks(ctx) for net in nets: # skip non-dhcp networks dhcp_port = plugin._get_net_dhcp_port(ctx, net['id']) if not dhcp_port: LOG.info("Skipping network %s: No DHCP subnet found", net['id']) continue dhcp_subnet_id = [fip['subnet_id'] for fip in dhcp_port['fixed_ips']][0] az = plugin.get_network_az_by_net_id(ctx, net['id']) az._policy_dhcp_server_config = dhcp_config_id dhcp_subnet = plugin.get_subnet(ctx, dhcp_subnet_id) # Verify that this network does not use policy DHCP already segment_id = plugin._get_network_nsx_segment_id(ctx, net['id']) segment = nsxpolicy.segment.get(segment_id) if segment.get('dhcp_config_path'): LOG.info("Skipping network %s: Already using policy DHCP", net['id']) continue LOG.info("Migrating network %s", net['id']) # Disable MP DHCP plugin._disable_native_dhcp(ctx, net['id']) # Enable Policy DHCP plugin._enable_subnet_dhcp(ctx, net, dhcp_subnet, az) migrate_count = migrate_count + 1 LOG.info("Finished migrating %s networks", migrate_count) @admin_utils.output_header def update_admin_state(resource, event, trigger, **kwargs): """Upon upgrade to NSX3 update policy segments & ports So that the neutron admin state will match the policy one """ nsxpolicy = p_utils.get_connected_nsxpolicy() if not nsxpolicy.feature_supported( nsx_constants.FEATURE_NSX_POLICY_ADMIN_STATE): LOG.error("This utility is not available for NSX version %s", nsxpolicy.get_version()) return ctx = context.get_admin_context() with p_utils.NsxPolicyPluginWrapper() as plugin: # Inconsistencies can happen only if the neutron state is Down filters = {'admin_state_up': [False]} nets = plugin.get_networks(ctx, filters=filters) for net in nets: seg_id = plugin._get_network_nsx_segment_id(ctx, net['id']) nsxpolicy.segment.set_admin_state(seg_id, False) ports = plugin.get_ports(ctx, filters=filters) for port in ports: seg_id = plugin._get_network_nsx_segment_id( ctx, port['network_id']) nsxpolicy.segment_port.set_admin_state(seg_id, port['id'], False) registry.subscribe(update_admin_state, constants.NETWORKS, shell.Operations.NSX_UPDATE_STATE.value) registry.subscribe(migrate_dhcp_to_policy, constants.DHCP_BINDING, shell.Operations.MIGRATE_TO_POLICY.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxp/resources/routers.py0000644000175000017500000001534300000000000030335 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron_lib.callbacks import registry from neutron_lib import context from oslo_log import log as logging from vmware_nsx.db import nsx_models from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxp.resources import utils as p_utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3.policy import constants as policy_constants from vmware_nsxlib.v3.policy import transaction as policy_trans LOG = logging.getLogger(__name__) class RoutersPlugin(db_base_plugin_v2.NeutronDbPluginV2, l3_db.L3_NAT_db_mixin): pass @admin_utils.list_handler(constants.ROUTERS) @admin_utils.output_header def list_routers(resource, event, trigger, **kwargs): """List neutron routers With the NSX policy resources and realization state. """ mappings = [] nsxpolicy = p_utils.get_connected_nsxpolicy() ctx = context.get_admin_context() with p_utils.NsxPolicyPluginWrapper() as plugin: routers = plugin.get_routers(ctx, fields=['id', 'name', 'tenant_id']) for rtr in routers: status = p_utils.get_realization_info( nsxpolicy.tier1, rtr['id']) mappings.append({'ID': rtr['id'], 'Name': rtr.get('name'), 'Project': rtr.get('tenant_id'), 'NSX status': status}) p_utils.log_info(constants.ROUTERS, mappings, attrs=['Project', 'Name', 'ID', 'NSX status']) return bool(mappings) @admin_utils.output_header def update_tier0(resource, event, trigger, **kwargs): """Replace old tier0 with a new one on the neutron DB and NSX backend""" errmsg = ("Need to specify old and new tier0 ID. Add --property " "old-tier0= --property new-tier0=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) old_tier0 = properties.get('old-tier0') new_tier0 = properties.get('new-tier0') if not old_tier0 or not new_tier0: LOG.error("%s", errmsg) return # Verify the id of the new tier0 (old one might not exist any more) nsxpolicy = p_utils.get_connected_nsxpolicy() try: nsxpolicy.tier0.get(new_tier0) except Exception: LOG.error("Tier0 logical router %s was not found", new_tier0) return # update all neutron DB entries old_tier0_networks = [] ctx = context.get_admin_context() with ctx.session.begin(subtransactions=True): bindings = ctx.session.query( nsx_models.TzNetworkBinding).filter_by(phy_uuid=old_tier0).all() for bind in bindings: old_tier0_networks.append(bind.network_id) bind.phy_uuid = new_tier0 if not old_tier0_networks: LOG.info("Did not find any provider networks using tier0 %s", old_tier0) return LOG.info("Updated provider networks in DB: %s", old_tier0_networks) # Update tier1 routers GW to point to the new tier0 in the backend plugin = RoutersPlugin() neutron_routers = plugin.get_routers(ctx) for router in neutron_routers: router_gw_net = (router.get('external_gateway_info') and router['external_gateway_info'].get('network_id')) if router_gw_net and router_gw_net in old_tier0_networks: try: nsxpolicy.tier1.update(router['id'], tier0=new_tier0) except Exception as e: LOG.error("Failed to update router %s linked port: %s", router['id'], e) else: LOG.info("Updated router %s uplink port", router['id']) LOG.info("Done.") @admin_utils.output_header def update_nat_firewall_match(resource, event, trigger, **kwargs): """Update the firewall_match value in neutron nat rules with a new value""" errmsg = ("Need to specify internal/external firewall_match value. " "Add --property firewall-match=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) firewall_match_str = properties.get('firewall-match') if (not firewall_match_str or firewall_match_str.lower() not in ('internal', 'external')): LOG.error("%s", errmsg) return if firewall_match_str.lower() == 'internal': new_firewall_match = policy_constants.NAT_FIREWALL_MATCH_INTERNAL old_firewall_match = policy_constants.NAT_FIREWALL_MATCH_EXTERNAL else: new_firewall_match = policy_constants.NAT_FIREWALL_MATCH_EXTERNAL old_firewall_match = policy_constants.NAT_FIREWALL_MATCH_INTERNAL nsxpolicy = p_utils.get_connected_nsxpolicy() plugin = RoutersPlugin() ctx = context.get_admin_context() neutron_routers = plugin.get_routers(ctx) for router in neutron_routers: rules = nsxpolicy.tier1_nat_rule.list(router['id']) for rule in rules: if not nsxpolicy.feature_supported( nsx_constants.FEATURE_PARTIAL_UPDATES): if rule['firewall_match'] == old_firewall_match: nsxpolicy.tier1_nat_rule.update( router['id'], rule['id'], firewall_match=new_firewall_match) else: with policy_trans.NsxPolicyTransaction(): if rule['firewall_match'] == old_firewall_match: nsxpolicy.tier1_nat_rule.update( router['id'], rule['id'], firewall_match=new_firewall_match) LOG.info("Done.") registry.subscribe(update_tier0, constants.ROUTERS, shell.Operations.UPDATE_TIER0.value) registry.subscribe(update_nat_firewall_match, constants.ROUTERS, shell.Operations.UPDATE_FIREWALL_MATCH.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxp/resources/securitygroups.py0000644000175000017500000000412200000000000031732 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import securitygroups_db from neutron_lib import context from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxp.resources import utils as p_utils from vmware_nsxlib.v3.policy import constants as policy_constants neutron_client = securitygroups_db.SecurityGroupDbMixin() @admin_utils.list_handler(constants.SECURITY_GROUPS) @admin_utils.output_header def list_security_groups(resource, event, trigger, **kwargs): """List neutron security groups With the NSX policy resources and realization state. """ mappings = [] nsxpolicy = p_utils.get_connected_nsxpolicy() ctx = context.get_admin_context() sgs = neutron_client.get_security_groups(ctx) domain_id = policy_constants.DEFAULT_DOMAIN for sg in sgs: map_status = p_utils.get_realization_info( nsxpolicy.comm_map, domain_id, sg['id']) group_status = p_utils.get_realization_info( nsxpolicy.group, domain_id, sg['id']) mappings.append({'ID': sg['id'], 'Name': sg.get('name'), 'Project': domain_id, 'NSX Group': group_status, 'NSX Map': map_status}) p_utils.log_info(constants.SECURITY_GROUPS, mappings, attrs=['Project', 'Name', 'ID', 'NSX Group', 'NSX Map']) return bool(mappings) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxp/resources/system.py0000644000175000017500000000516300000000000030155 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxp.resources import utils as p_utils from vmware_nsx.shell import resources as shell from neutron_lib.callbacks import registry LOG = logging.getLogger(__name__) MIN_REALIZATION_INTERVAL = 1 MAX_REALIZATION_INTERVAL = 10 def set_system_parameters(resource, event, trigger, **kwargs): """Set interval that controls realization and purge frequency This setting is affecting NSX Policy Manager appliance. """ usage = ("Usage: nsxadmin -r %s -o %s --property realization_interval=" "<%s-%s> ") % (constants.SYSTEM, shell.Operations.SET.value, MIN_REALIZATION_INTERVAL, MAX_REALIZATION_INTERVAL) if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) interval = properties.get('realization_interval') if interval: int_interval = int(interval) if int_interval not in range(MIN_REALIZATION_INTERVAL, MAX_REALIZATION_INTERVAL + 1): LOG.info("Realization interval should be in range %d-%d", MIN_REALIZATION_INTERVAL, MAX_REALIZATION_INTERVAL) return nsxpolicy = p_utils.get_connected_nsxpolicy() try: nsxpolicy.set_realization_interval(int_interval) except Exception as ex: LOG.error("Failed to apply intent realization interval to " "policy appliance - %s", ex) LOG.info("Intent realization interval set to %s min" % interval) else: LOG.error("Missing parameters: %s", usage) else: LOG.error("Missing parameters: %s", usage) registry.subscribe(set_system_parameters, constants.SYSTEM, shell.Operations.SET.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxp/resources/utils.py0000644000175000017500000000612500000000000027770 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.db import l3_dvr_db # noqa from neutron_lib import context from neutron_lib.plugins import constants as const from neutron_lib.plugins import directory from oslo_log import log as logging from vmware_nsx.common import config from vmware_nsx.plugins.nsx_p import plugin from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.shell.admin.plugins.common import formatters LOG = logging.getLogger(__name__) _NSXPOLICY = None def get_nsxp_client(nsx_username=None, nsx_password=None, use_basic_auth=False): return get_connected_nsxpolicy(nsx_username, nsx_password, use_basic_auth).client def get_connected_nsxpolicy(nsx_username=None, nsx_password=None, use_basic_auth=False): global _NSXPOLICY # for non-default agruments, initiate new lib if nsx_username or use_basic_auth: return v3_utils.get_nsxpolicy_wrapper(nsx_username, nsx_password, use_basic_auth) if _NSXPOLICY is None: _NSXPOLICY = v3_utils.get_nsxpolicy_wrapper() return _NSXPOLICY def log_info(resource, data, attrs=['display_name', 'id']): LOG.info(formatters.output_formatter(resource, data, attrs)) def get_realization_info(resource, *realization_args): try: nsx_info = resource.get_realization_info(*realization_args, silent=True) if not nsx_info: info_text = "MISSING" else: state = nsx_info.get('state') nsx_id = nsx_info.get('realization_specific_identifier') info_text = "%s (ID: %s)" % (state, nsx_id) except Exception as e: LOG.warning("Failed to get realization info for %s(%s): %s", resource, str(realization_args), e) info_text = "UNKNOWN" return info_text class NsxPolicyPluginWrapper(plugin.NsxPolicyPlugin): def __init__(self): # initialize the availability zones config.register_nsxp_azs(cfg.CONF, cfg.CONF.nsx_p.availability_zones) super(NsxPolicyPluginWrapper, self).__init__() self.context = context.get_admin_context() def __enter__(self): directory.add_plugin(const.CORE, self) return self def __exit__(self, exc_type, exc_value, traceback): directory.add_plugin(const.CORE, None) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2262545 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxtvd/0000755000175000017500000000000000000000000024576 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxtvd/__init__.py0000644000175000017500000000000000000000000026675 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2262545 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxtvd/resources/0000755000175000017500000000000000000000000026610 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxtvd/resources/__init__.py0000644000175000017500000000000000000000000030707 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxtvd/resources/migrate.py0000644000175000017500000005043000000000000030614 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from neutron.extensions import securitygroup as ext_sg from neutron_lib.callbacks import registry from neutron_lib import context as n_context from neutron_lib import exceptions from vmware_nsx.api_replay import utils as replay_utils from vmware_nsx.db import db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as v_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as v3_utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) # list of supported objects to migrate in order of deletion (creation will be # in the opposite order) migrated_resources = ["floatingip", "router", "port", "subnet", "network", "security_group"] #TODO(asarfaty): add other resources of different service plugins like #vpnaas, fwaas, lbaas, qos, subnetpool, etc @admin_utils.output_header def import_projects(resource, event, trigger, **kwargs): """Import existing openstack projects to the current plugin""" # TODO(asarfaty): get the projects list from keystone # get the plugin name from the user if not kwargs.get('property'): LOG.error("Need to specify plugin and project parameters") return else: properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) plugin = properties.get('plugin') project = properties.get('project') if not plugin or not project: LOG.error("Need to specify plugin and project parameters") return if plugin not in projectpluginmap.VALID_TYPES: LOG.error("The supported plugins are %s", projectpluginmap.VALID_TYPES) return ctx = n_context.get_admin_context() curr_map = db.get_project_plugin_mapping(ctx.session, project) if not curr_map: db.add_project_plugin_mapping(ctx.session, project, plugin) LOG.info('Done.') else: if curr_map.plugin == plugin: LOG.info("%s is already the plugin of project %s", plugin, project) else: LOG.error("Project %s is mapped to plugin %s. Existing mapping " "cannot be modified.", project, curr_map.plugin) def get_resource_file_name(project_id, resource): return "%s_nsxv_%ss" % (project_id, resource) def read_v_resources_to_files(context, project_id): """Read all relevant NSX-V resources from a specific project and write them into a json file """ results = {} with v_utils.NsxVPluginWrapper() as plugin: filters = {'project_id': [project_id]} for resource in migrated_resources: filename = get_resource_file_name(project_id, resource) file = open(filename, 'w') get_objects = getattr(plugin, "get_%ss" % resource) objects = get_objects(context, filters=filters) # also add router gateway ports of the relevant routers # (don't have the project id) if resource == 'port': rtr_ids = [rtr['id'] for rtr in results['router']] gw_filters = {'device_owner': ['network:router_gateway'], 'device_id': rtr_ids} gw_ports = plugin.get_ports(context, filters=gw_filters, filter_project=False) # ignore metadata gw ports objects.extend([port for port in gw_ports if not port['tenant_id']]) file.write(jsonutils.dumps(objects, sort_keys=True, indent=4)) file.close() results[resource] = objects return results def read_v_resources_from_files(project_id): """Read all relevant NSX-V resources from a json file""" results = {} for resource in migrated_resources: filename = get_resource_file_name(project_id, resource) file = open(filename, 'r') results[resource] = jsonutils.loads(file.read()) file.close() return results def delete_router_routes_and_interfaces(context, plugin, router): if router.get('routes'): plugin.update_router(context, router['id'], {'router': {'routes': []}}) interfaces = plugin._get_router_interfaces(context, router['id']) for port in interfaces: plugin.remove_router_interface(context, router['id'], {'port_id': port['id']}) def delete_v_resources(context, objects): """Delete a list of objects from the V plugin""" with v_utils.NsxVPluginWrapper() as plugin: LOG.info(">>>>Deleting all NSX-V objects of the project.") for resource in migrated_resources: get_object = getattr(plugin, "get_%s" % resource) del_object = getattr(plugin, "delete_%s" % resource) for obj in objects[resource]: # verify that this object still exists try: get_object(context, obj['id']) except exceptions.NotFound: # prevent logger from logging this exception sys.exc_clear() continue try: # handle special cases before delete if resource == 'router': delete_router_routes_and_interfaces( context, plugin, obj) elif resource == 'port': if obj['device_owner'] == 'network:dhcp': continue # delete the objects from the NSX-V plugin del_object(context, obj['id']) LOG.info(">>Deleted %(resource)s %(name)s", {'resource': resource, 'name': obj.get('name') or obj['id']}) except Exception as e: LOG.warning(">>Failed to delete %(resource)s %(name)s: " "%(e)s", {'resource': resource, 'name': obj.get('name') or obj['id'], 'e': e}) LOG.info(">>>>Done deleting all NSX-V objects.") def get_router_by_id(objects, router_id): for rtr in objects.get('router', []): if rtr['id'] == router_id: return rtr def create_t_resources(context, objects, ext_net): """Create a list of objects in the T plugin""" LOG.info(">>>>Creating all the objects of the project in NSX-T.") prepare = replay_utils.PrepareObjectForMigration() with v3_utils.NsxV3PluginWrapper() as plugin: # create the resource in the order opposite to the deletion # (but start with routers) ordered_resources = migrated_resources[::-1] ordered_resources.remove('router') ordered_resources = ['router'] + ordered_resources dhcp_subnets = [] for resource in ordered_resources: total_num = len(objects[resource]) LOG.info(">>>Creating %s %s%s.", total_num, resource, 's' if total_num > 1 else '') get_object = getattr(plugin, "get_%s" % resource) create_object = getattr(plugin, "create_%s" % resource) # go over the objects of this resource for count, obj in enumerate(objects[resource], 1): # check if this object already exists try: get_object(context, obj['id']) except exceptions.NotFound: # prevent logger from logging this exception sys.exc_clear() else: # already exists (this will happen if we rerun from files, # or if the deletion failed) LOG.info(">>Skipping %(resource)s %(name)s %(count)s/" "%(total)s as it was already created.", {'resource': resource, 'name': obj.get('name') or obj['id'], 'count': count, 'total': total_num}) continue # fix object before creation using the api replay code orig_id = obj['id'] prepare_object = getattr(prepare, "prepare_%s" % resource) # TODO(asarfaty): Add availability zones support too obj_data = prepare_object(obj, direct_call=True) enable_dhcp = False # special cases for different objects before create: if resource == 'subnet': if obj_data['enable_dhcp']: enable_dhcp = True # disable dhcp for now, to avoid ip collisions obj_data['enable_dhcp'] = False elif resource == 'security_group': # security group rules should be added separately sg_rules = obj_data.pop('security_group_rules') elif resource == 'floatingip': # Create the floating IP on the T external network obj_data['floating_network_id'] = ext_net del obj_data['floating_ip_address'] elif resource == 'port': # remove the old subnet id field from ports fixed_ips dict # since the subnet ids are changed for fixed_ips in obj_data['fixed_ips']: del fixed_ips['subnet_id'] if obj_data['device_owner'] == 'network:dhcp': continue if obj_data['device_owner'] == 'network:floatingip': continue if obj_data['device_owner'] == 'network:router_gateway': # add a gateway on the new ext network for this router router_id = obj_data['device_id'] # keep the original enable-snat value router_data = get_router_by_id(objects, router_id) enable_snat = router_data['external_gateway_info'].get( 'enable_snat', True) rtr_body = { "external_gateway_info": {"network_id": ext_net, "enable_snat": enable_snat}} try: plugin.update_router( context, router_id, {'router': rtr_body}) LOG.info(">>Uplinked router %(rtr)s to new " "external network %(net)s", {'rtr': router_id, 'net': ext_net}) except Exception as e: LOG.error(">>Failed to add router %(rtr)s " "gateway: %(e)s", {'rtr': router_id, 'e': e}) continue if obj_data['device_owner'] == 'network:router_interface': try: # uplink router_interface ports by creating the # port, and attaching it to the router router_id = obj_data['device_id'] obj_data['device_owner'] = "" obj_data['device_id'] = "" created_port = plugin.create_port( context, {'port': obj_data}) LOG.info(">>Created interface port %(port)s, ip " "%(ip)s, mac %(mac)s)", {'port': created_port['id'], 'ip': created_port['fixed_ips'][0][ 'ip_address'], 'mac': created_port['mac_address']}) plugin.add_router_interface( context, router_id, {'port_id': created_port['id']}) LOG.info(">>Uplinked router %(rtr)s to network " "%(net)s", {'rtr': router_id, 'net': obj_data['network_id']}) except Exception as e: LOG.error(">>Failed to add router %(rtr)s " "interface port: %(e)s", {'rtr': router_id, 'e': e}) continue # create the object on the NSX-T plugin try: created_obj = create_object(context, {resource: obj_data}) LOG.info(">>Created %(resource)s %(name)s %(count)s/" "%(total)s", {'resource': resource, 'count': count, 'name': obj_data.get('name') or orig_id, 'total': total_num}) except Exception as e: # TODO(asarfaty): subnets ids are changed, so recreating a # subnet will fail on overlapping ips. LOG.error(">>Failed to create %(resource)s %(name)s: " "%(e)s", {'resource': resource, 'e': e, 'name': obj_data.get('name') or orig_id}) continue # special cases for different objects after create: if resource == 'security_group': sg_id = obj_data.get('name') or obj_data['id'] for rule in sg_rules: rule_data = prepare.prepare_security_group_rule(rule) try: plugin.create_security_group_rule( context, {'security_group_rule': rule_data}) except ext_sg.SecurityGroupRuleExists: # default rules were already created. # prevent logger from logging this exception sys.exc_clear() except Exception as e: LOG.error( ">>Failed to create security group %(name)s " "rules: %(e)s", {'name': sg_id, 'e': e}) elif resource == 'subnet': if enable_dhcp: dhcp_subnets.append(created_obj['id']) # Enable dhcp on all the relevant subnets (after creating all ports, # to maintain original IPs): if dhcp_subnets: for subnet_id in dhcp_subnets: try: plugin.update_subnet( context, subnet_id, {'subnet': {'enable_dhcp': True}}) except Exception as e: LOG.error("Failed to enable DHCP on subnet %(subnet)s:" " %(e)s", {'subnet': subnet_id, 'e': e}) # Add static routes (after all router interfaces and gateways are set) for obj_data in objects['router']: if 'routes' in obj_data: try: plugin.update_router( context, obj_data['id'], {'router': {'routes': obj_data['routes']}}) except Exception as e: LOG.error("Failed to add routes to router %(rtr)s: " "%(e)s", {'rtr': obj_data['id'], 'e': e}) LOG.info(">>>Done Creating all objects in NSX-T.") @admin_utils.output_header def migrate_v_project_to_t(resource, event, trigger, **kwargs): """Migrate 1 project from v to t with all its resources""" # filter out the plugins INFO logging # TODO(asarfaty): Consider this for all admin utils LOG.logger.setLevel(logging.INFO) logging.getLogger(None).logger.setLevel(logging.WARN) # get the configuration: tenant + public network + from file flag usage = ("Usage: nsxadmin -r projects -o %s --property project-id=<> " "--property external-net= " "<--property from-file=True>" % shell.Operations.NSX_MIGRATE_V_V3.value) if not kwargs.get('property'): LOG.error("Missing parameters: %s", usage) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) project = properties.get('project-id') ext_net_id = properties.get('external-net') from_file = properties.get('from-file', 'false').lower() == "true" # TODO(asarfaty): get files path if not project: LOG.error("Missing project-id parameter: %s", usage) return if not ext_net_id: LOG.error("Missing external-net parameter: %s", usage) return # check if files exist in the current directory try: filename = get_resource_file_name(project, 'network') file = open(filename, 'r') if file.read(): if not from_file: from_file = admin_utils.query_yes_no( "Use existing resources files for this project?", default="yes") file.close() except Exception: sys.exc_clear() if from_file: LOG.error("Cannot run from file: files not found") return # validate tenant id and public network ctx = n_context.get_admin_context() mapping = db.get_project_plugin_mapping(ctx.session, project) current_plugin = mapping.plugin if not mapping: LOG.error("Project %s is unknown", project) return if not from_file and current_plugin != projectpluginmap.NsxPlugins.NSX_V: LOG.error("Project %s belongs to plugin %s.", project, mapping.plugin) return with v3_utils.NsxV3PluginWrapper() as plugin: try: plugin.get_network(ctx, ext_net_id) except exceptions.NetworkNotFound: LOG.error("Network %s was not found", ext_net_id) return if not plugin._network_is_external(ctx, ext_net_id): LOG.error("Network %s is not external", ext_net_id) return if from_file: # read resources from files objects = read_v_resources_from_files(project) else: # read all V resources and dump to a file objects = read_v_resources_to_files(ctx, project) # delete all the V resources (reading it from the files) if current_plugin == projectpluginmap.NsxPlugins.NSX_V: delete_v_resources(ctx, objects) # change the mapping of this tenant to T db.update_project_plugin_mapping(ctx.session, project, projectpluginmap.NsxPlugins.NSX_T) # use api replay flag to allow keeping the IDs cfg.CONF.set_override('api_replay_mode', True) # add resources 1 by one after adapting them to T (api-replay code) create_t_resources(ctx, objects, ext_net_id) # reset api replay flag to allow keeping the IDs cfg.CONF.set_override('api_replay_mode', False) registry.subscribe(import_projects, constants.PROJECTS, shell.Operations.IMPORT.value) registry.subscribe(migrate_v_project_to_t, constants.PROJECTS, shell.Operations.NSX_MIGRATE_V_V3.value) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2262545 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/0000755000175000017500000000000000000000000024246 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/__init__.py0000644000175000017500000000000000000000000026345 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2262545 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/0000755000175000017500000000000000000000000026260 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/__init__.py0000644000175000017500000000000000000000000030357 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/backup_edges.py0000644000175000017500000003377400000000000031264 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import l3_db from neutron_lib.callbacks import registry from neutron_lib import exceptions from oslo_log import log as logging from oslo_utils import uuidutils from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.db import nsxv_models from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters import vmware_nsx.shell.admin.plugins.common.utils as admin_utils import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils import vmware_nsx.shell.resources as shell LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() _uuid = uuidutils.generate_uuid def get_nsxv_backup_edges(scope="all"): edges = utils.get_nsxv_backend_edges() backup_edges = [] edgeapi = utils.NeutronDbClient() for edge in edges: if edge['name'].startswith("backup-"): # Make sure it is really a backup edge edge_vnic_binds = nsxv_db.get_edge_vnic_bindings_by_edge( edgeapi.context.session, edge['id']) if scope != "all": # Make sure the backup edge exists in neutron # Return backup edges existing in both neutron and backend # when scope != all edge_in_neutron = nsxv_db.get_nsxv_router_binding_by_edge( edgeapi.context.session, edge['id']) if not edge_vnic_binds and edge_in_neutron: extend_edge_info(edge) backup_edges.append(edge) else: if not edge_vnic_binds: extend_edge_info(edge) backup_edges.append(edge) return backup_edges def extend_edge_info(edge): """Add information from the nsxv-db, if available""" edgeapi = utils.NeutronDbClient() rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge( edgeapi.context.session, edge['id']) if rtr_binding: edge['availability_zone'] = rtr_binding['availability_zone'] edge['db_status'] = rtr_binding['status'] @admin_utils.output_header def nsx_list_backup_edges(resource, event, trigger, **kwargs): """List backup edges""" backup_edges = get_nsxv_backup_edges() LOG.info(formatters.output_formatter( constants.BACKUP_EDGES, backup_edges, ['id', 'name', 'size', 'type', 'availability_zone', 'db_status'])) def _delete_backup_from_neutron_db(edge_id, router_id): # Remove bindings from Neutron DB edgeapi = utils.NeutronDbClient() nsxv_db.delete_nsxv_router_binding( edgeapi.context.session, router_id) if edge_id: nsxv_db.clean_edge_vnic_binding(edgeapi.context.session, edge_id) def _delete_edge_from_nsx_and_neutron(edge_id, router_id): try: with locking.LockManager.get_lock(edge_id): # Delete from NSXv backend nsxv.delete_edge(edge_id) # Remove bindings from Neutron DB _delete_backup_from_neutron_db(edge_id, router_id) return True except Exception as expt: LOG.error("%s", str(expt)) return False def _nsx_delete_backup_edge(edge_id, all_backup_edges): """Delete a specific backup edge""" try: edge_result = nsxv.get_edge(edge_id) except exceptions.NeutronException as x: LOG.error("%s", str(x)) else: # edge_result[0] is response status code # edge_result[1] is response body edge = edge_result[1] backup_edges = [e['id'] for e in all_backup_edges] if (not edge['name'].startswith('backup-') or edge['id'] not in backup_edges): LOG.error( 'Edge: %s is not a backup edge; aborting delete', edge_id) else: return _delete_edge_from_nsx_and_neutron(edge_id, edge['name']) def nsx_clean_backup_edge(resource, event, trigger, **kwargs): """Delete backup edge""" errmsg = ("Need to specify edge-id property. Add --property " "edge-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) edge_id = properties.get('edge-id') if not edge_id: LOG.error("%s", errmsg) return if not kwargs.get('force'): #ask for the user confirmation confirm = admin_utils.query_yes_no( "Do you want to delete edge: %s" % edge_id, default="no") if not confirm: LOG.info("Backup edge deletion aborted by user") return # delete the backup edge _nsx_delete_backup_edge(edge_id, get_nsxv_backup_edges()) def nsx_clean_all_backup_edges(resource, event, trigger, **kwargs): """Delete all backup edges""" scope = "all" if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) scope = properties.get("scope", "all") if scope not in ["neutron", "all"]: LOG.error("Need to specify the scope in ['neutron', 'all']") return backup_edges = get_nsxv_backup_edges(scope=scope) if not kwargs.get('force'): #ask for the user confirmation confirm = admin_utils.query_yes_no( "Do you want to delete %s backup edges?" % len(backup_edges), default="no") if not confirm: LOG.info("Backup edges deletion aborted by user") return deleted_cnt = 0 for edge in backup_edges: # delete the backup edge if _nsx_delete_backup_edge(edge['id'], backup_edges): deleted_cnt = deleted_cnt + 1 LOG.info('Done Deleting %s backup edges', deleted_cnt) @admin_utils.output_header def neutron_clean_backup_edge(resource, event, trigger, **kwargs): """Delete a backup edge from the neutron, and backend by it's name The name of the backup edge is the router-id column in the BD table nsxv_router_bindings, and it is also printed by list-mismatches """ errmsg = ("Need to specify router-id property. Add --property " "router-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) router_id = properties.get('router-id') if not router_id: LOG.error("%s", errmsg) return # look for the router-binding entry edgeapi = utils.NeutronDbClient() rtr_binding = nsxv_db.get_nsxv_router_binding( edgeapi.context.session, router_id) if not rtr_binding: LOG.error('Backup %s was not found in DB', router_id) return edge_id = rtr_binding['edge_id'] if edge_id: # delete from backend too _delete_edge_from_nsx_and_neutron(edge_id, router_id) else: # delete only from DB _delete_backup_from_neutron_db(None, router_id) @admin_utils.output_header def nsx_list_name_mismatches(resource, event, trigger, **kwargs): edges = utils.get_nsxv_backend_edges() plugin_nsx_mismatch = [] backend_edge_ids = [] edgeapi = utils.NeutronDbClient() # Look for edges with the wrong names: for edge in edges: backend_edge_ids.append(edge['id']) rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge( edgeapi.context.session, edge['id']) if (rtr_binding and edge['name'].startswith('backup-') and rtr_binding['router_id'] != edge['name']): plugin_nsx_mismatch.append( {'edge_id': edge['id'], 'edge_name': edge['name'], 'router_id': rtr_binding['router_id']}) LOG.info(formatters.output_formatter( constants.BACKUP_EDGES + ' with name mismatch:', plugin_nsx_mismatch, ['edge_id', 'edge_name', 'router_id'])) # Also look for missing edges like_filters = {'router_id': vcns_const.BACKUP_ROUTER_PREFIX + "%"} rtr_bindings = nsxv_db.get_nsxv_router_bindings(edgeapi.context.session, like_filters=like_filters) plugin_nsx_missing = [] for rtr_binding in rtr_bindings: if rtr_binding['edge_id'] not in backend_edge_ids: plugin_nsx_missing.append( {'edge_id': rtr_binding['edge_id'], 'router_id': rtr_binding['router_id'], 'db_status': rtr_binding['status']}) LOG.info(formatters.output_formatter( constants.BACKUP_EDGES + ' missing from backend:', plugin_nsx_missing, ['edge_id', 'router_id', 'db_status'])) def nsx_fix_name_mismatch(resource, event, trigger, **kwargs): errmsg = ("Need to specify edge-id property. Add --property " "edge-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) edgeapi = utils.NeutronDbClient() edge_id = properties.get('edge-id') if not edge_id: LOG.error("%s", errmsg) return try: # edge[0] is response status code # edge[1] is response body edge = nsxv.get_edge(edge_id)[1] except exceptions.NeutronException as e: LOG.error("%s", str(e)) else: if edge['name'].startswith('backup-'): rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge( edgeapi.context.session, edge['id']) if rtr_binding['router_id'] == edge['name']: LOG.error('Edge %s no mismatch with NSX', edge_id) return try: with locking.LockManager.get_lock(edge_id): # Update edge at NSXv backend if rtr_binding['router_id'].startswith('dhcp-'): # Edge is a DHCP edge - just use router_id as name edge['name'] = rtr_binding['router_id'] else: # This is a router - if shared, prefix with 'shared-' nsx_attr = (edgeapi.context.session.query( nsxv_models.NsxvRouterExtAttributes).filter_by( router_id=rtr_binding['router_id']).first()) if nsx_attr and nsx_attr['router_type'] == 'shared': edge['name'] = ('shared-' + _uuid())[ :vcns_const.EDGE_NAME_LEN] elif (nsx_attr and nsx_attr['router_type'] == 'exclusive'): rtr_db = (edgeapi.context.session.query( l3_db.Router).filter_by( id=rtr_binding['router_id']).first()) if rtr_db: edge['name'] = ( rtr_db['name'][ :nsxv_constants.ROUTER_NAME_LENGTH - len(rtr_db['id'])] + '-' + rtr_db['id']) else: LOG.error( 'No database entry for router id %s', rtr_binding['router_id']) else: LOG.error( 'Could not determine the name for ' 'Edge %s', edge_id) return if not kwargs.get('force'): confirm = admin_utils.query_yes_no( "Do you want to rename edge %s to %s" % (edge_id, edge['name']), default="no") if not confirm: LOG.info("Edge rename aborted by user") return LOG.info("Edge rename started") # remove some keys that will fail the NSX transaction edge_utils.remove_irrelevant_keys_from_edge_request(edge) try: LOG.error("Update edge...") nsxv.update_edge(edge_id, edge) except Exception as e: LOG.error("Update failed - %s", (e)) except Exception as e: LOG.error("%s", str(e)) else: LOG.error( 'Edge %s has no backup prefix on NSX', edge_id) return registry.subscribe(nsx_list_backup_edges, constants.BACKUP_EDGES, shell.Operations.LIST.value) registry.subscribe(nsx_clean_backup_edge, constants.BACKUP_EDGES, shell.Operations.CLEAN.value) registry.subscribe(nsx_clean_all_backup_edges, constants.BACKUP_EDGES, shell.Operations.CLEAN_ALL.value) registry.subscribe(nsx_list_name_mismatches, constants.BACKUP_EDGES, shell.Operations.LIST_MISMATCHES.value) registry.subscribe(nsx_fix_name_mismatch, constants.BACKUP_EDGES, shell.Operations.FIX_MISMATCH.value) registry.subscribe(neutron_clean_backup_edge, constants.BACKUP_EDGES, shell.Operations.NEUTRON_CLEAN.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/config.py0000644000175000017500000000325000000000000030077 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import registry from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) @admin_utils.output_header def validate_configuration(resource, event, trigger, **kwargs): """Validate the nsxv configuration""" try: utils.NsxVPluginWrapper() except exceptions.Forbidden: LOG.error("Configuration validation failed: wrong VSM credentials " "for %s", cfg.CONF.nsxv.manager_uri) except Exception as e: LOG.error("Configuration validation failed: %s", e) else: LOG.info("Configuration validation succeeded") registry.subscribe(validate_configuration, constants.CONFIG, shell.Operations.VALIDATE.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py0000644000175000017500000003522500000000000031251 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pprint import sys from neutron_lib import context as n_context from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.shell.admin.plugins.common import constants import vmware_nsx.shell.admin.plugins.common.utils as admin_utils import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils import vmware_nsx.shell.resources as shell from neutron_lib.callbacks import registry from neutron_lib import exceptions as nl_exc from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as nsxv_constants) from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield import vcns_driver LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() neutron_db = utils.NeutronDbClient() def nsx_get_static_bindings_by_edge(edge_id): nsx_dhcp_static_bindings = set() try: nsx_dhcp_bindings = nsxv.query_dhcp_configuration(edge_id) except exceptions.ResourceNotFound: LOG.error("Edge %s was not found", edge_id) return # nsx_dhcp_bindings[0] contains response headers; # nsx_dhcp_bindings[1] contains response payload sbindings = nsx_dhcp_bindings[1].get('staticBindings').get( 'staticBindings') for binding in sbindings: nsx_dhcp_static_bindings.add( (edge_id, binding.get('macAddress').lower(), binding.get('bindingId').lower())) return nsx_dhcp_static_bindings def neutron_get_static_bindings_by_edge(edge_id): neutron_db_dhcp_bindings = set() for binding in nsxv_db.get_dhcp_static_bindings_by_edge( neutron_db.context.session, edge_id): neutron_db_dhcp_bindings.add( (binding.edge_id, binding.mac_address.lower(), binding.binding_id.lower())) return neutron_db_dhcp_bindings @admin_utils.output_header def list_missing_dhcp_bindings(resource, event, trigger, **kwargs): """List missing DHCP bindings from NSXv backend. Missing DHCP bindings are those that exist in Neutron DB; but are not present on corresponding NSXv Edge. """ for (edge_id, count) in nsxv_db.get_nsxv_dhcp_bindings_count_per_edge( neutron_db.context.session): LOG.info("%s", "=" * 60) LOG.info("For edge: %s", edge_id) nsx_dhcp_static_bindings = nsx_get_static_bindings_by_edge(edge_id) if nsx_dhcp_static_bindings is None: continue neutron_dhcp_static_bindings = \ neutron_get_static_bindings_by_edge(edge_id) LOG.info("# of DHCP bindings in Neutron DB: %s", len(neutron_dhcp_static_bindings)) LOG.info("# of DHCP bindings on NSXv backend: %s", len(nsx_dhcp_static_bindings)) missing = neutron_dhcp_static_bindings - nsx_dhcp_static_bindings if not missing: LOG.info("No missing DHCP bindings found.") LOG.info("Neutron DB and NSXv backend are in sync") else: LOG.info("Missing DHCP bindings:") LOG.info("%s", pprint.pformat(missing)) @admin_utils.output_header def nsx_update_dhcp_edge_binding(resource, event, trigger, **kwargs): """Resync DHCP bindings on NSXv Edge""" if not kwargs.get('property'): LOG.error("Need to specify edge-id parameter") return else: properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) edge_id = properties.get('edge-id') if not edge_id: LOG.error("Need to specify edge-id parameter") return LOG.info("Updating NSXv Edge: %s", edge_id) # Need to create a plugin object; so that we are able to # do neutron list-ports. with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) try: edge_manager.update_dhcp_service_config( neutron_db.context, edge_id) except exceptions.ResourceNotFound: LOG.error("Edge %s not found", edge_id) def delete_old_dhcp_edge(context, old_edge_id, bindings): LOG.info("Deleting the old DHCP edge: %s", old_edge_id) with locking.LockManager.get_lock(old_edge_id): # Delete from NSXv backend # Note - If we will not delete the router, but free it - it will be # immediately used as the new one, So it is better to delete it. try: nsxv.delete_edge(old_edge_id) except Exception as e: LOG.warning("Failed to delete the old edge %(id)s: %(e)s", {'id': old_edge_id, 'e': e}) # Continue the process anyway # The edge may have been already deleted at the backend try: # Remove bindings from Neutron DB nsxv_db.clean_edge_router_binding(context.session, old_edge_id) nsxv_db.clean_edge_vnic_binding(context.session, old_edge_id) except Exception as e: LOG.warning("Failed to delete the old edge %(id)s from the " "DB : %(e)s", {'id': old_edge_id, 'e': e}) def recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id): """Handle the DHCP edge recreation of a network """ LOG.info("Moving network %s to a new edge", net_id) # delete the old binding resource_id = (nsxv_constants.DHCP_EDGE_PREFIX + net_id)[:36] nsxv_db.delete_nsxv_router_binding(context.session, resource_id) # Delete the old static binding of the networks` compute ports port_filters = {'network_id': [net_id], 'device_owner': ['compute:None']} compute_ports = plugin.get_ports(context, filters=port_filters) if old_edge_id: for port in compute_ports: # Delete old binding from the DB nsxv_db.delete_edge_dhcp_static_binding(context.session, old_edge_id, port['mac_address']) # Go over all the subnets with DHCP net_filters = {'network_id': [net_id], 'enable_dhcp': [True]} subnets = plugin.get_subnets(context, filters=net_filters) for subnet in subnets: LOG.info("Moving subnet %s to a new edge", subnet['id']) # allocate / reuse the new dhcp edge new_resource_id = edge_manager.create_dhcp_edge_service( context, net_id, subnet) if new_resource_id: # also add fw rules and metadata, once for the new edge plugin._update_dhcp_service_new_edge(context, resource_id) # Update the ip of the dhcp port LOG.info("Creating network %s DHCP address group", net_id) address_groups = plugin._create_network_dhcp_address_group( context, net_id) plugin.edge_manager.update_dhcp_edge_service( context, net_id, address_groups=address_groups) # find out the id of the new edge: new_binding = nsxv_db.get_nsxv_router_binding( context.session, resource_id) if new_binding: LOG.info("Network %(net_id)s was moved to edge %(edge_id)s", {'net_id': net_id, 'edge_id': new_binding['edge_id']}) else: LOG.error("Network %(net_id)s was not moved to a new edge", {'net_id': net_id}) @admin_utils.output_header def nsx_recreate_dhcp_edge(resource, event, trigger, **kwargs): """Recreate a dhcp edge with all the networks on a new NSXv edge""" usage_msg = ("Need to specify edge-id or net-id parameter") if not kwargs.get('property'): LOG.error(usage_msg) return # input validation properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) old_edge_id = properties.get('edge-id') if not old_edge_id: # if the net-id property exist - recreate the edge for this network net_id = properties.get('net-id') if net_id: nsx_recreate_dhcp_edge_by_net_id(net_id) return LOG.error(usage_msg) return LOG.info("ReCreating NSXv Edge: %s", old_edge_id) context = n_context.get_admin_context() # verify that this is a DHCP edge bindings = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, old_edge_id) if (not bindings or not bindings[0]['router_id'].startswith( nsxv_constants.DHCP_EDGE_PREFIX)): LOG.error("Edge %(edge_id)s is not a DHCP edge", {'edge_id': old_edge_id}) return # init the plugin and edge manager cfg.CONF.set_override('core_plugin', 'vmware_nsx.shell.admin.plugins.nsxv.resources' '.utils.NsxVPluginWrapper') with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) # find the networks bound to this DHCP edge networks_binding = nsxv_db.get_edge_vnic_bindings_by_edge( context.session, old_edge_id) network_ids = [binding['network_id'] for binding in networks_binding] # Delete the old edge delete_old_dhcp_edge(context, old_edge_id, bindings) # Move all the networks to other (new or existing) edge for net_id in network_ids: recreate_network_dhcp(context, plugin, edge_manager, old_edge_id, net_id) def nsx_recreate_dhcp_edge_by_net_id(net_id): """Recreate a dhcp edge for a specific network without an edge""" LOG.info("ReCreating NSXv Edge for network: %s", net_id) context = n_context.get_admin_context() # init the plugin and edge manager cfg.CONF.set_override('core_plugin', 'vmware_nsx.shell.admin.plugins.nsxv.resources' '.utils.NsxVPluginWrapper') with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver(edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) # verify that there is no DHCP edge for this network at the moment resource_id = (nsxv_constants.DHCP_EDGE_PREFIX + net_id)[:36] router_binding = nsxv_db.get_nsxv_router_binding( context.session, resource_id) if router_binding: # make sure there is no real edge if router_binding['edge_id']: edge_id = router_binding['edge_id'] try: nsxv_manager.vcns.get_edge(edge_id) except exceptions.ResourceNotFound: # No edge on backend # prevent logger from logging this exception sys.exc_clear() LOG.info("Edge %s does not exist on the NSX", edge_id) else: LOG.warning("Network %(net_id)s already has a dhcp edge: " "%(edge_id)s", {'edge_id': edge_id, 'net_id': net_id}) return # delete this old entry nsxv_db.delete_nsxv_router_binding(context.session, resource_id) # Verify that the network exists on neutron try: plugin.get_network(context, net_id) except nl_exc.NetworkNotFound: LOG.error("Network %s does not exist", net_id) return recreate_network_dhcp(context, plugin, edge_manager, None, net_id) @admin_utils.output_header def nsx_redistribute_dhcp_edges(resource, event, trigger, **kwargs): """If any of the DHCP networks are on a conflicting edge move them""" context = n_context.get_admin_context() with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) # go over all DHCP subnets networks = plugin.get_networks(context) for network in networks: network_id = network['id'] # Check if the network has a related DHCP edge resource_id = (nsxv_constants.DHCP_EDGE_PREFIX + network_id)[:36] dhcp_edge_binding = nsxv_db.get_nsxv_router_binding( context.session, resource_id) if not dhcp_edge_binding: continue LOG.info("Checking network %s", network_id) edge_id = dhcp_edge_binding['edge_id'] availability_zone = plugin.get_network_az_by_net_id( context, network['id']) filters = {'network_id': [network_id], 'enable_dhcp': [True]} subnets = plugin.get_subnets(context, filters=filters) for subnet in subnets: (conflict_edge_ids, available_edge_ids) = edge_manager._get_used_edges( context, subnet, availability_zone) if edge_id in conflict_edge_ids: # move the DHCP to another edge LOG.info("Network %(net)s on DHCP edge %(edge)s is " "conflicting with another network and will be " "moved", {'net': network_id, 'edge': edge_id}) edge_manager.remove_network_from_dhcp_edge( context, network_id, edge_id) edge_manager.create_dhcp_edge_service( context, network_id, subnet) break registry.subscribe(list_missing_dhcp_bindings, constants.DHCP_BINDING, shell.Operations.LIST.value) registry.subscribe(nsx_update_dhcp_edge_binding, constants.DHCP_BINDING, shell.Operations.NSX_UPDATE.value) registry.subscribe(nsx_recreate_dhcp_edge, constants.DHCP_BINDING, shell.Operations.NSX_RECREATE.value) registry.subscribe(nsx_redistribute_dhcp_edges, constants.DHCP_BINDING, shell.Operations.NSX_REDISTRIBURE.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/edges.py0000644000175000017500000006237400000000000027735 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pprint import textwrap from vmware_nsx.common import config from vmware_nsx.dvs import dvs from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters import vmware_nsx.shell.admin.plugins.common.utils as admin_utils import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils import vmware_nsx.shell.resources as shell from neutron_lib.callbacks import registry from neutron_lib import context as n_context from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) import vmware_nsx.plugins.nsx_v.vshield.common.exceptions as nsxv_exceptions LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() @admin_utils.output_header def nsx_list_edges(resource, event, trigger, **kwargs): """List edges from NSXv backend""" headers = ['id', 'name', 'type', 'size', 'ha'] edges = utils.get_nsxv_backend_edges() if (kwargs.get('verbose')): headers += ['syslog'] extend_edge_info(edges) LOG.info(formatters.output_formatter(constants.EDGES, edges, headers)) def extend_edge_info(edges): """Add syslog info to each edge in list""" for edge in edges: # for the table to remain human readable, we need to # wrap long edge names edge['name'] = textwrap.fill(edge['name'], 25) edge['syslog'] = utils.get_edge_syslog_info(edge['id']) def get_router_edge_bindings(): edgeapi = utils.NeutronDbClient() return nsxv_db.get_nsxv_router_bindings(edgeapi.context) @admin_utils.output_header def neutron_list_router_edge_bindings(resource, event, trigger, **kwargs): """List NSXv edges from Neutron DB""" edges = get_router_edge_bindings() LOG.info(formatters.output_formatter( constants.EDGES, edges, ['edge_id', 'router_id', 'availability_zone', 'status'])) @admin_utils.output_header def clean_orphaned_router_bindings(resource, event, trigger, **kwargs): """Delete nsx router bindings entries without real objects behind them""" orphaned_list = get_orphaned_router_bindings() if not len(orphaned_list): LOG.info("No orphaned Router bindings found.") return LOG.info("Before delete; Orphaned Bindings:") LOG.info(formatters.output_formatter( constants.ORPHANED_BINDINGS, orphaned_list, ['edge_id', 'router_id', 'availability_zone', 'status'])) if not kwargs.get('force'): if len(orphaned_list): user_confirm = admin_utils.query_yes_no("Do you want to delete " "orphaned bindings", default="no") if not user_confirm: LOG.info("NSXv Router bindings deletion aborted by user") return edgeapi = utils.NeutronDbClient() for binding in orphaned_list: nsxv_db.delete_nsxv_router_binding( edgeapi.context.session, binding.router_id) LOG.info("Deleted %s orphaned router bindings. You may need to check for " "orphaned edges now.", len(orphaned_list)) @admin_utils.output_header def list_orphaned_router_bindings(resource, event, trigger, **kwargs): """List nsx router bindings entries without real objects behind them""" orphaned_list = get_orphaned_router_bindings() LOG.info(formatters.output_formatter( constants.ORPHANED_BINDINGS, orphaned_list, ['edge_id', 'router_id', 'availability_zone', 'status'])) def get_orphaned_router_bindings(): context = n_context.get_admin_context() orphaned_list = [] with utils.NsxVPluginWrapper() as plugin: networks = plugin.get_networks(context, fields=['id']) net_ids = [x['id'] for x in networks] routers = plugin.get_routers(context, fields=['id']) rtr_ids = [x['id'] for x in routers] nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) plr_tlr_ids = {} for tlr_id in rtr_ids: plr_id = edge_manager.get_plr_by_tlr_id(context, tlr_id) if plr_id: plr_tlr_ids[plr_id] = tlr_id for binding in get_router_edge_bindings(): if not router_binding_obj_exist(context, binding, net_ids, rtr_ids, plr_tlr_ids): orphaned_list.append(binding) return orphaned_list def _get_obj_id_from_binding(router_id, prefix): """Return the id part of the router-binding router-id field""" return router_id[len(prefix):] def _is_id_prefix_in_list(id_prefix, ids): """Return True if the id_prefix is the prefix of one of the ids""" for x in ids: if x.startswith(id_prefix): return True return False def router_binding_obj_exist(context, binding, net_ids, rtr_ids, plr_tlr_ids): """Check if the object responsible for the router binding entry exists Check if the relevant router/network/loadbalancer exists in the neutron DB """ router_id = binding.router_id if router_id.startswith(vcns_const.BACKUP_ROUTER_PREFIX): # no neutron object that should match backup edges return True if router_id.startswith(vcns_const.DHCP_EDGE_PREFIX): # should have a network starting with this id # get the id. and look for a network with this id net_id_prefix = _get_obj_id_from_binding( router_id, vcns_const.DHCP_EDGE_PREFIX) if _is_id_prefix_in_list(net_id_prefix, net_ids): return True else: LOG.warning("Network for binding entry %s not found", router_id) return False if router_id.startswith(vcns_const.PLR_EDGE_PREFIX): # Look for the TLR that matches this PLR # and check if it exists in the neutron DB if router_id in plr_tlr_ids: tlr_id = plr_tlr_ids[router_id] if _is_id_prefix_in_list(tlr_id, rtr_ids): return True else: LOG.warning("TLR Router %s for PLR binding entry %s not found", tlr_id, router_id) return False else: LOG.warning("TLR Router binding for PLR binding entry %s not " "found", router_id) return False if router_id.startswith(lb_common.RESOURCE_ID_PFX): # should have a load balancer starting with this id on the same edge if nsxv_db.get_nsxv_lbaas_loadbalancer_binding_by_edge( context.session, binding.edge_id): return True else: LOG.warning("Loadbalancer for binding entry %s not found", router_id) return False # regular router # get the id. and look for a router with this id if _is_id_prefix_in_list(router_id, rtr_ids): return True else: LOG.warning("Router for binding entry %s not found", router_id) return False def get_orphaned_edges(): nsxv_edge_ids = set() for edge in utils.get_nsxv_backend_edges(): nsxv_edge_ids.add(edge.get('id')) neutron_edge_bindings = set() for binding in get_router_edge_bindings(): neutron_edge_bindings.add(binding.edge_id) return nsxv_edge_ids - neutron_edge_bindings @admin_utils.output_header def nsx_list_orphaned_edges(resource, event, trigger, **kwargs): """List orphaned Edges on NSXv. Orphaned edges are NSXv edges that exist on NSXv backend but don't have a corresponding binding in Neutron DB """ LOG.info("NSXv edges present on NSXv backend but not present " "in Neutron DB\n") orphaned_edges = get_orphaned_edges() if not orphaned_edges: LOG.info("\nNo orphaned edges found." "\nNeutron DB and NSXv backend are in sync\n") else: LOG.info(constants.ORPHANED_EDGES) data = [('edge_id',)] for edge in orphaned_edges: data.append((edge,)) LOG.info(formatters.tabulate_results(data)) @admin_utils.output_header def nsx_delete_orphaned_edges(resource, event, trigger, **kwargs): """Delete orphaned edges from NSXv backend""" orphaned_edges = get_orphaned_edges() LOG.info("Before delete; Orphaned Edges: %s", orphaned_edges) if not kwargs.get('force'): if len(orphaned_edges): user_confirm = admin_utils.query_yes_no("Do you want to delete " "orphaned edges", default="no") if not user_confirm: LOG.info("NSXv Edge deletion aborted by user") return nsxv = utils.get_nsxv_client() for edge in orphaned_edges: LOG.info("Deleting edge: %s", edge) nsxv.delete_edge(edge) LOG.info("After delete; Orphaned Edges: \n%s", pprint.pformat(get_orphaned_edges())) def get_missing_edges(): nsxv_edge_ids = set() for edge in utils.get_nsxv_backend_edges(): nsxv_edge_ids.add(edge.get('id')) neutron_edge_bindings = set() for binding in get_router_edge_bindings(): neutron_edge_bindings.add(binding.edge_id) return neutron_edge_bindings - nsxv_edge_ids def get_router_edge_vnic_bindings(edge_id): edgeapi = utils.NeutronDbClient() return nsxv_db.get_edge_vnic_bindings_by_edge( edgeapi.context.session, edge_id) @admin_utils.output_header def nsx_list_missing_edges(resource, event, trigger, **kwargs): """List missing edges and networks serviced by those edges. Missing edges are NSXv edges that have a binding in Neutron DB but are currently missing from the NSXv backend. """ LOG.info("NSXv edges present in Neutron DB but not present " "on the NSXv backend\n") missing_edges = get_missing_edges() if not missing_edges: LOG.info("\nNo edges are missing." "\nNeutron DB and NSXv backend are in sync\n") else: data = [('edge_id', 'network_id')] for edge in missing_edges: # Retrieve all networks which are serviced by this edge. edge_serviced_networks = get_router_edge_vnic_bindings(edge) if not edge_serviced_networks: # If the edge is missing on the backend but no network # is serviced by this edge, output N/A. data.append((edge, 'N/A')) for bindings in edge_serviced_networks: data.append((edge, bindings.network_id)) LOG.info(formatters.tabulate_results(data)) def change_edge_ha(ha, edge_id): request = { 'featureType': 'highavailability_4.0', 'enabled': ha} try: nsxv.enable_ha(edge_id, request) except nsxv_exceptions.ResourceNotFound: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) def change_edge_syslog(properties): request = { 'featureType': 'syslog', 'serverAddresses': {'ipAddress': [], 'type': 'IpAddressesDto'}} request['protocol'] = properties.get('syslog-proto', 'tcp') if request['protocol'] not in ['tcp', 'udp']: LOG.error("Property value error: syslog-proto must be tcp/udp") return if properties.get('syslog-server'): request['serverAddresses']['ipAddress'].append( properties.get('syslog-server')) if properties.get('syslog-server2'): request['serverAddresses']['ipAddress'].append( properties.get('syslog-server2')) edge_id = properties.get('edge-id') try: nsxv.update_edge_syslog(edge_id, request) except nsxv_exceptions.ResourceNotFound: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) def delete_edge_syslog(edge_id): try: nsxv.delete_edge_syslog(edge_id) except nsxv_exceptions.ResourceNotFound: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) def change_edge_loglevel(properties): """Update log level on edge Update log level either for specific module or for all modules. 'none' disables logging, any other level enables logging Returns True if found any log level properties (regardless if action succeeded) """ modules = {} if properties.get('log-level'): level = properties.get('log-level') # change log level for all modules modules = {k: level for k in edge_utils.SUPPORTED_EDGE_LOG_MODULES} else: # check for log level settings for specific modules for k, v in properties.items(): if k.endswith('-log-level'): module = k[:-10] # module is in parameter prefix modules[module] = v if not modules: # no log level properties return False edge_id = properties.get('edge-id') for module, level in modules.items(): if level == 'none': LOG.info("Disabling logging for %s", module) else: LOG.info("Enabling logging for %(m)s with level %(l)s", {'m': module, 'l': level}) try: edge_utils.update_edge_loglevel(nsxv, edge_id, module, level) except nsxv_exceptions.ResourceNotFound: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) # take ownership for properties return True def change_edge_appliance_size(properties): size = properties.get('size') if size not in vcns_const.ALLOWED_EDGE_SIZES: LOG.error("Edge appliance size not in %(size)s", {'size': vcns_const.ALLOWED_EDGE_SIZES}) return try: nsxv.change_edge_appliance_size( properties.get('edge-id'), size) except nsxv_exceptions.ResourceNotFound: LOG.error("Edge %s not found", properties.get('edge-id')) except exceptions.NeutronException as e: LOG.error("%s", str(e)) def _get_edge_az_and_size(edge_id): edgeapi = utils.NeutronDbClient() binding = nsxv_db.get_nsxv_router_binding_by_edge( edgeapi.context.session, edge_id) if binding: return binding['availability_zone'], binding['appliance_size'] # default fallback return nsx_az.DEFAULT_NAME, nsxv_constants.LARGE def change_edge_appliance(edge_id): """Update the appliances data of an edge Update the edge appliances data according to its current availability zone and the nsx.ini config, including the resource pool, edge_ha, datastore & ha_datastore. The availability zone of the edge will not be changed. This can be useful when the global resource pool/datastore/edge ha configuration is updated, or when the configuration of a specific availability zone was updated. """ # find out what is the current resource pool & size, so we can keep them az_name, size = _get_edge_az_and_size(edge_id) config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) az = nsx_az.NsxVAvailabilityZones().get_availability_zone(az_name) appliances = [{'resourcePoolId': az.resource_pool, 'datastoreId': az.datastore_id}] if az.ha_datastore_id and az.edge_ha: appliances.append({'resourcePoolId': az.resource_pool, 'datastoreId': az.ha_datastore_id}) request = {'appliances': appliances, 'applianceSize': size} try: nsxv.change_edge_appliance(edge_id, request) except nsxv_exceptions.ResourceNotFound: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) else: # also update the edge_ha of the edge change_edge_ha(az.edge_ha, edge_id) def change_edge_appliance_reservations(properties): reservations = {} res = {} if properties.get('limit'): res['limit'] = properties.get('limit') if properties.get('reservation'): res['reservation'] = properties.get('reservation') if properties.get('shares'): res['shares'] = properties.get('shares') resource = properties.get('resource') if not res: LOG.error("Please configure reservations") return if resource == 'cpu': reservations['cpuReservation'] = res elif resource == 'memory': reservations['memoryReservation'] = res else: LOG.error("Please configure resource") return edge_id = properties.get('edge-id') try: h, edge = nsxv.get_edge(edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) return appliances = edge['appliances']['appliances'] for appliance in appliances: appliance.update(reservations) request = {'appliances': appliances} try: nsxv.change_edge_appliance(edge_id, request) except nsxv_exceptions.ResourceNotFound: LOG.error("Edge %s not found", edge_id) except exceptions.NeutronException as e: LOG.error("%s", str(e)) def _update_host_group_for_edge(nsxv, cluster_mng, edge_id, edge): if edge.get('type') == 'gatewayServices': try: az_name, size = _get_edge_az_and_size(edge_id) config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) zones = nsx_az.NsxVAvailabilityZones() az = zones.get_availability_zone(az_name) if az.edge_ha and az.edge_host_groups: edge_utils.update_edge_host_groups(nsxv, edge_id, cluster_mng, az, validate=True) else: LOG.error("Availability zone:%s does not have HA enabled or " "no host groups defined. Skipping %s.", az_name, edge_id) except Exception as e: LOG.error("Failed to update edge %(id)s - %(e)s", {'id': edge['id'], 'e': e}) else: LOG.error("%s is not a gateway services", edge_id) def change_edge_hostgroup(properties): cluster_mng = dvs.ClusterManager() if properties.get('hostgroup').lower() == "update": edge_id = properties.get('edge-id') try: edge_result = nsxv.get_edge(edge_id) except exceptions.NeutronException as x: LOG.error("%s", str(x)) else: # edge_result[0] is response status code # edge_result[1] is response body edge = edge_result[1] _update_host_group_for_edge(nsxv, cluster_mng, edge_id, edge) elif properties.get('hostgroup').lower() == "all": edges = utils.get_nsxv_backend_edges() for edge in edges: edge_id = edge['id'] _update_host_group_for_edge(nsxv, cluster_mng, edge_id, edge) elif properties.get('hostgroup').lower() == "clean": config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) azs = nsx_az.NsxVAvailabilityZones() for az in azs.list_availability_zones_objects(): try: edge_utils.clean_host_groups(cluster_mng, az) except Exception: LOG.error("Failed to clean AZ %s", az.name) else: LOG.error('Currently not supported') @admin_utils.output_header def nsx_update_edge(resource, event, trigger, **kwargs): """Update edge properties""" usage_msg = ("Need to specify edge-id parameter and " "attribute to update. Add --property edge-id= " "and --property highavailability= or " "--property size= or --property appliances=True. " "\nFor syslog, add --property syslog-server=|none and " "(optional) --property syslog-server2= and/or " "(optional) --property syslog-proto=[tcp/udp] " "\nFor log levels, add --property [routing|dhcp|dns|" "highavailability|loadbalancer]-log-level=" "[debug|info|warning|error]. To set log level for all " "modules, add --property log-level= " "\nFor edge reservations, add " "--property resource=cpu|memory and " "(optional) --property limit= and/or " "(optional) --property shares= and/or " "(optional) --property reservation= " "\nFor hostgroup updates, add " "--property hostgroup=update/all/clean") if not kwargs.get('property'): LOG.error(usage_msg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) if (not properties.get('edge-id') and not properties.get('hostgroup', '').lower() == "all" and not properties.get('hostgroup', '').lower() == "clean"): LOG.error("Need to specify edge-id. " "Add --property edge-id=") return LOG.info("Updating NSXv edge: %(edge)s with properties\n%(prop)s", {'edge': properties.get('edge-id'), 'prop': properties}) if properties.get('highavailability'): change_edge_ha(properties['highavailability'].lower() == "true", properties['edge-id']) elif properties.get('size'): change_edge_appliance_size(properties) elif (properties.get('appliances') and properties.get('appliances').lower() == "true"): change_edge_appliance(properties['edge-id']) elif properties.get('syslog-server'): if (properties.get('syslog-server').lower() == "none"): delete_edge_syslog(properties['edge-id']) else: change_edge_syslog(properties) elif properties.get('resource'): change_edge_appliance_reservations(properties) elif properties.get('hostgroup'): change_edge_hostgroup(properties) elif change_edge_loglevel(properties): pass else: # no attribute was specified LOG.error(usage_msg) @admin_utils.output_header def nsx_update_edges(resource, event, trigger, **kwargs): """Update all edges with the given property""" if not kwargs.get('property'): usage_msg = ("Need to specify a property to update all edges. " "Add --property appliances=") LOG.error(usage_msg) return edges = utils.get_nsxv_backend_edges() properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) result = 0 for edge in edges: if properties.get('appliances', 'false').lower() == "true": try: change_edge_appliance(edge.get('edge-id')) except Exception as e: result += 1 LOG.error("Failed to update edge %(edge)s. Exception: " "%(e)s", {'edge': edge.get('edge-id'), 'e': str(e)}) if result > 0: total = len(edges) LOG.error("%(result)s of %(total)s edges failed " "to update.", {'result': result, 'total': total}) registry.subscribe(nsx_list_edges, constants.EDGES, shell.Operations.NSX_LIST.value) registry.subscribe(neutron_list_router_edge_bindings, constants.EDGES, shell.Operations.NEUTRON_LIST.value) registry.subscribe(nsx_list_orphaned_edges, constants.ORPHANED_EDGES, shell.Operations.LIST.value) registry.subscribe(nsx_delete_orphaned_edges, constants.ORPHANED_EDGES, shell.Operations.CLEAN.value) registry.subscribe(nsx_list_missing_edges, constants.MISSING_EDGES, shell.Operations.LIST.value) registry.subscribe(nsx_update_edge, constants.EDGES, shell.Operations.NSX_UPDATE.value) registry.subscribe(nsx_update_edges, constants.EDGES, shell.Operations.NSX_UPDATE_ALL.value) registry.subscribe(list_orphaned_router_bindings, constants.ORPHANED_BINDINGS, shell.Operations.LIST.value) registry.subscribe(clean_orphaned_router_bindings, constants.ORPHANED_BINDINGS, shell.Operations.CLEAN.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/gw_edges.py0000644000175000017500000003717300000000000030431 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.callbacks import registry from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import config from vmware_nsx.common import nsxv_constants from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.services.dynamic_routing.nsx_v import driver as nsxv_bgp from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as v_utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) MIN_ASNUM = 1 MAX_ASNUM = 65535 nsxv = vcns_driver.VcnsDriver([]) def get_ip_prefix(name, ip_address): return {'ipPrefix': {'name': name, 'ipAddress': ip_address}} def get_redistribution_rule(prefix_name, from_bgp, from_ospf, from_static, from_connected, action): rule = { 'action': action, 'from': { 'ospf': from_ospf, 'bgp': from_bgp, 'connected': from_connected, 'static': from_static } } if prefix_name: rule['prefixName'] = prefix_name return {'rule': rule} def _validate_asn(asn): if not MIN_ASNUM <= int(asn) <= MAX_ASNUM: msg = "Invalid AS number, expecting an integer value (1 - 65535)." LOG.error(msg) return False return True def _extract_interface_info(info): info = info.split(':') try: network = netaddr.IPNetwork(info[-1]) except Exception: LOG.error("Invalid IP address given: '%s'.", info) return None portgroup = info[0] subnet_mask = str(network.netmask) ip_address = str(network.ip) return portgroup, ip_address, subnet_mask def _assemble_gw_edge(name, size, external_iface_info, internal_iface_info, default_gateway, az): edge = nsxv._assemble_edge( name, datacenter_moid=az.datacenter_moid, deployment_container_id=az.datastore_id, appliance_size=size, remote_access=False, edge_ha=az.edge_ha) appliances = [nsxv._assemble_edge_appliance( az.resource_pool, az.datastore_id)] edge['appliances']['appliances'] = appliances portgroup, ip_address, subnet_mask = external_iface_info vnic_external = nsxv._assemble_edge_vnic(vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX, portgroup, primary_address=ip_address, subnet_mask=subnet_mask, type="uplink") portgroup, gateway_ip, subnet_mask = internal_iface_info vnic_internal = nsxv._assemble_edge_vnic(vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX, portgroup, primary_address=gateway_ip, subnet_mask=subnet_mask, type="internal") if (cfg.CONF.nsxv.edge_appliance_user and cfg.CONF.nsxv.edge_appliance_password): edge['cliSettings'].update({ 'userName': cfg.CONF.nsxv.edge_appliance_user, 'password': cfg.CONF.nsxv.edge_appliance_password}) edge['vnics']['vnics'].append(vnic_external) edge['vnics']['vnics'].append(vnic_internal) edge['featureConfigs']['features'] = [{'featureType': 'firewall_4.0', 'enabled': False}] if default_gateway: routing = {'featureType': 'routing_4.0', 'enabled': True, 'staticRouting': { 'defaultRoute': { 'description': 'default-gateway', 'gatewayAddress': default_gateway } }} edge['featureConfigs']['features'].append(routing) header = nsxv.vcns.deploy_edge(edge)[0] edge_id = header.get('location', '/').split('/')[-1] return edge_id, gateway_ip @admin_utils.output_header def create_bgp_gw(resource, event, trigger, **kwargs): """Creates a new BGP GW edge""" usage = ("nsxadmin -r bgp-gw-edge -o create " "--property name= " "--property local-as= " "--property external-iface=: " "--property internal-iface=: " "[--property default-gateway=] " "[--property az-hint=] " "[--property size=compact,large,xlarge,quadlarge]") required_params = ('name', 'local-as', 'internal-iface', 'external-iface') properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return local_as = properties['local-as'] if not _validate_asn(local_as): return size = properties.get('size', nsxv_constants.LARGE) if size not in vcns_const.ALLOWED_EDGE_SIZES: msg = ("Property 'size' takes one of the following values: %s." % ','.join(vcns_const.ALLOWED_EDGE_SIZES)) LOG.error(msg) return external_iface_info = _extract_interface_info(properties['external-iface']) internal_iface_info = _extract_interface_info(properties['internal-iface']) if not (external_iface_info and internal_iface_info): return if 'default-gateway' in properties: default_gw = _extract_interface_info(properties['default-gateway']) if not default_gw: msg = ("Property 'default-gateway' doesn't contain a valid IP " "address.") LOG.error(msg) return default_gw = default_gw[1] else: default_gw = None config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) az_hint = properties.get('az-hint', 'default') az = nsx_az.NsxVAvailabilityZones().get_availability_zone(az_hint) edge_id, gateway_ip = _assemble_gw_edge(properties['name'], size, external_iface_info, internal_iface_info, default_gw, az) nsxv.add_bgp_speaker_config(edge_id, gateway_ip, local_as, True, [], [], [], default_originate=True) res = {'name': properties['name'], 'edge_id': edge_id, 'size': size, 'availability_zone': az.name, 'bgp_identifier': gateway_ip, 'local_as': local_as} headers = ['name', 'edge_id', 'size', 'bgp_identifier', 'availability_zone', 'local_as'] LOG.info(formatters.output_formatter('BGP GW Edge', [res], headers)) def delete_bgp_gw(resource, event, trigger, **kwargs): usage = ("nsxadmin -r bgp-gw-edge -o delete " "--property gw-edge-id=") required_params = ('gw-edge-id', ) properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return edge_id = properties['gw-edge-id'] try: nsxv.vcns.delete_edge(edge_id) except Exception: LOG.error("Failed to delete edge %s", edge_id) return def list_bgp_edges(resource, event, trigger, **kwargs): bgp_edges = [] edges = v_utils.get_nsxv_backend_edges() for edge in edges: bgp_config = nsxv.get_routing_bgp_config(edge['id']) if bgp_config['bgp']['enabled']: bgp_edges.append({'name': edge['name'], 'edge_id': edge['id'], 'local_as': bgp_config['bgp']['localAS']}) if not bgp_edges: LOG.info("No BGP GW edges found") return headers = ['name', 'edge_id', 'local_as'] LOG.info(formatters.output_formatter(constants.EDGES, bgp_edges, headers)) @admin_utils.output_header def create_redis_rule(resource, event, trigger, **kwargs): usage = ("nsxadmin -r routing-redistribution-rule -o create " "--property gw-edge-ids=[,...] " "[--property prefix=] " "--property learn-from=ospf,bgp,connected,static " "--property action=") required_params = ('gw-edge-ids', 'learn-from', 'action') properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return prefix = properties.get('prefix') if prefix: prefix_name, cidr = prefix.split(':') prefixes = [get_ip_prefix(prefix_name, cidr)] if cidr else [] else: prefix_name = None prefixes = [] learn_from = properties['learn-from'].split(',') rule = get_redistribution_rule(prefix_name, 'bgp' in learn_from, 'ospf' in learn_from, 'static' in learn_from, 'connected' in learn_from, properties['action']) edge_ids = properties['gw-edge-ids'].split(',') for edge_id in edge_ids: try: bgp_config = nsxv.get_routing_bgp_config(edge_id) if not bgp_config['bgp'].get('enabled'): LOG.error("BGP is not enabled on edge %s", edge_id) return if not bgp_config['bgp']['redistribution']['enabled']: LOG.error("BGP redistribution is not enabled on edge %s", edge_id) return nsxv.add_bgp_redistribution_rules(edge_id, prefixes, [rule]) except exceptions.ResourceNotFound: LOG.error("Edge %s was not found", edge_id) return res = [{'edge_id': edge_id, 'prefix': prefix_name if prefix_name else 'ANY', 'learner-protocol': 'bgp', 'learn-from': ', '.join(set(learn_from)), 'action': properties['action']} for edge_id in edge_ids] headers = ['edge_id', 'prefix', 'learner-protocol', 'learn-from', 'action'] LOG.info(formatters.output_formatter( 'Routing redistribution rule', res, headers)) def delete_redis_rule(resource, event, trigger, **kwargs): usage = ("nsxadmin -r routing-redistribution-rule -o delete " "--property gw-edge-ids=[,...]" "[--property prefix-name=]") required_params = ('gw-edge-ids', ) properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return edge_ids = properties['gw-edge-ids'].split(',') # If no prefix-name is given then remove rules configured with default # prefix. prefixes = [properties.get('prefix-name')] for edge_id in edge_ids: try: nsxv.remove_bgp_redistribution_rules(edge_id, prefixes) except exceptions.ResourceNotFound: LOG.error("Edge %s was not found", edge_id) return @admin_utils.output_header def add_bgp_neighbour(resource, event, trigger, **kwargs): usage = ("nsxadmin -r bgp-neighbour -o create " "--property gw-edge-ids=[,...] " "--property ip-address= " "--property remote-as= " "--property password=") required_params = ('gw-edge-ids', 'ip-address', 'remote-as', 'password') properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return remote_as = properties['remote-as'] if not _validate_asn(remote_as): return nbr = nsxv_bgp.gw_bgp_neighbour(properties['ip-address'], properties['remote-as'], properties['password']) edge_ids = properties['gw-edge-ids'].split(',') for edge_id in edge_ids: try: nsxv.add_bgp_neighbours(edge_id, [nbr]) except exceptions.ResourceNotFound: LOG.error("Edge %s was not found", edge_id) return res = [{'edge_id': edge_id, 'ip_address': properties['ip-address'], 'remote_as': properties['remote-as'], 'hold_down_timer': cfg.CONF.nsxv.bgp_neighbour_hold_down_timer, 'keep_alive_timer': cfg.CONF.nsxv.bgp_neighbour_keep_alive_timer} for edge_id in edge_ids] headers = ['edge_id', 'ip_address', 'remote_as', 'hold_down_timer', 'keep_alive_timer'] LOG.info(formatters.output_formatter('New BPG neighbour', res, headers)) def remove_bgp_neighbour(resource, event, trigger, **kwargs): usage = ("nsxadmin -r bgp-neighbour -o delete " "--property gw-edge-ids=[,...] " "--property ip-address=") required_params = ('gw-edge-ids', 'ip-address') properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property', [])) if not properties or not set(required_params) <= set(properties.keys()): LOG.error(usage) return nbr = nsxv_bgp.gw_bgp_neighbour(properties['ip-address'], '', '') edge_ids = properties['gw-edge-ids'].split(',') for edge_id in edge_ids: try: nsxv.remove_bgp_neighbours(edge_id, [nbr]) except exceptions.ResourceNotFound: LOG.error("Edge %s was not found", edge_id) return registry.subscribe(create_bgp_gw, constants.BGP_GW_EDGE, shell.Operations.CREATE.value) registry.subscribe(delete_bgp_gw, constants.BGP_GW_EDGE, shell.Operations.DELETE.value) registry.subscribe(list_bgp_edges, constants.BGP_GW_EDGE, shell.Operations.LIST.value) registry.subscribe(create_redis_rule, constants.ROUTING_REDIS_RULE, shell.Operations.CREATE.value) registry.subscribe(delete_redis_rule, constants.ROUTING_REDIS_RULE, shell.Operations.DELETE.value) registry.subscribe(add_bgp_neighbour, constants.BGP_NEIGHBOUR, shell.Operations.CREATE.value) registry.subscribe(remove_bgp_neighbour, constants.BGP_NEIGHBOUR, shell.Operations.DELETE.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/metadata.py0000644000175000017500000004153700000000000030424 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import hmac from neutron.db import models_v2 from neutron_lib.callbacks import registry from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import config from vmware_nsx.common import locking from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v import md_proxy from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_constants from vmware_nsx.plugins.nsx_v.vshield import nsxv_loadbalancer as nsxv_lb from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as utils from vmware_nsx.shell import resources as shell INTERNAL_SUBNET = '169.254.128.0/17' NSXV_MD_RULES = [ { 'name': 'VSERule', 'enabled': True, 'action': 'accept', 'source_vnic_groups': ['vse'], 'destination_vnic_groups': ['external']}, { 'name': 'MDServiceIP', 'destination': {'ipAddress': ['169.254.169.254']}, 'enabled': True, 'application': {'service': [{'protocol': 'tcp', 'port': [80, 443, 8775]}]}, 'action': 'accept', 'ruleTag': None}, { 'name': 'VSEMDInterEdgeNet', 'enabled': True, 'action': 'accept', 'source_vnic_groups': ['vse'], 'destination_ip_address': [INTERNAL_SUBNET]}, { 'name': 'MDInterEdgeNet', 'destination': {'ipAddress': ['169.254.128.0/17']}, 'enabled': True, 'action': 'deny', 'ruleTag': None}] LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() def _append_md_fw_rules(fw_rules): fw_rules = NSXV_MD_RULES + fw_rules # Set FW rules tags for i in range(len(fw_rules)): fw_rules[i]['ruleTag'] = i + 1 return fw_rules def _handle_edge_firewall_rules(edge_id): try: h, fw_cfg = nsxv.get_firewall(edge_id) except Exception as e: fw_cfg = {} LOG.error("Failed to retrieve firewall config for edge %(edge)s " "with exception %(e)s", {'edge': edge_id, 'e': e}) fw_rules = fw_cfg.get('firewallRules', {}).get('firewallRules', []) md_rule_names = ['MDInterEdgeNet', 'MDServiceIP', 'VSEMDInterEdgeNet', 'VSERule'] new_rules = [] for rule in fw_rules: if rule['name'] in md_rule_names: md_rule_names.remove(rule['name']) else: new_rules.append(rule) if md_rule_names: new_rules = _append_md_fw_rules(new_rules) fw_cfg['firewallRules']['firewallRules'] = new_rules try: nsxv.update_firewall(edge_id, fw_cfg) LOG.info('Added missing firewall rules for edge %s', edge_id) except Exception as e: LOG.warning("Failed to update firewall config for edge " "%(edge)s with exception %(e)s", {'edge': edge_id, 'e': e}) def _recreate_rtr_metadata_cfg(context, plugin, az_name, edge_id): rtr_binding = nsxv_db.get_nsxv_router_binding_by_edge( context.session, edge_id) md_handler = plugin.metadata_proxy_handler[az_name] if md_handler: try: md_handler.configure_router_edge( context, rtr_binding['router_id']) LOG.info('Added metadata components for edge %s', edge_id) except Exception as e: LOG.error('Recreation of metadata components for edge ' '%(edge)s failed with error %(e)s', {'edge': edge_id, 'e': e}) else: LOG.error('Could not find a metadata handler for availability zone %s', az_name) def _update_md_lb_members(edge_id, edge_internal_ips, lb, pool): LOG.info('Updating metadata members for edge %s', edge_id) pool.members = {} i = 0 s_port = cfg.CONF.nsxv.nova_metadata_port for member_ip in edge_internal_ips: i += 1 member = nsxv_lb.NsxvLBPoolMember( name='Member-%d' % i, ip_address=member_ip, port=s_port, monitor_port=s_port) pool.add_member(member) try: lb.submit_to_backend(nsxv, edge_id) LOG.info('Updated members for %s', edge_id) except Exception as e: LOG.error('Updating members for %(edge)s failed with ' 'error %(e)s', {'edge': edge_id, 'e': e}) def _get_internal_edge_ips(context, az_name): # Get the list of internal networks for this AZ db_net = nsxv_db.get_nsxv_internal_network_for_az( context.session, vcns_constants.InternalEdgePurposes.INTER_EDGE_PURPOSE, az_name) internal_net = None internal_subnet = None if db_net: internal_net = db_net['network_id'] internal_subnet = context.session.query( models_v2.Subnet).filter_by( network_id=internal_net).first().get('id') # Get the list of internal edges for this AZ edge_list = nsxv_db.get_nsxv_internal_edges_by_purpose( context.session, vcns_constants.InternalEdgePurposes.INTER_EDGE_PURPOSE) edge_az_list = [edge for edge in edge_list if nsxv_db.get_router_availability_zone( context.session, edge['router_id']) == az_name] md_rtr_ids = [edge['router_id'] for edge in edge_az_list] edge_internal_ips = [] for edge in edge_az_list: edge_internal_port = context.session.query( models_v2.Port).filter_by(network_id=internal_net, device_id=edge['router_id']).first() if edge_internal_port: edge_internal_ip = context.session.query( models_v2.IPAllocation).filter_by( port_id=edge_internal_port['id']).first() edge_internal_ips.append(edge_internal_ip['ip_address']) if not internal_net or not internal_subnet or not edge_internal_ips: return None, None LOG.info('Metadata proxy internal IPs are %s', edge_internal_ips) return edge_internal_ips, md_rtr_ids def _handle_edge(context, plugin, az_name, edge_id, edge_internal_ips): with locking.LockManager.get_lock(edge_id): lb = nsxv_lb.NsxvLoadbalancer.get_loadbalancer(nsxv, edge_id) virt = lb.virtual_servers.get(md_proxy.METADATA_VSE_NAME) if virt: pool = virt.default_pool curr_member_ips = [member.payload['ipAddress'] for member in pool.members.values()] if set(curr_member_ips) != set(edge_internal_ips): _update_md_lb_members(edge_id, edge_internal_ips, lb, pool) else: # Interface connectivity and LB definition are done at the same # operation. if LB is missing then interface should be missing # as well LOG.info('Metadata LB components for edge %s are missing', edge_id) _recreate_rtr_metadata_cfg(context, plugin, az_name, edge_id) _handle_edge_firewall_rules(edge_id) @admin_utils.output_header def nsx_redo_metadata_cfg(resource, event, trigger, **kwargs): properties = admin_utils.parse_multi_keyval_opt(kwargs.get('property')) edgeapi = utils.NeutronDbClient() plugin = utils.NsxVPluginWrapper() edge_id = properties.get('edge-id') if properties: if edge_id: nsx_redo_metadata_cfg_for_edge(edgeapi.context, plugin, edge_id) return else: # if the net-id property exist - recreate the edge for this network az_name = properties.get('az-name') if az_name: nsx_redo_metadata_cfg_for_az(edgeapi.context, plugin, az_name) return LOG.error('Cannot parse properties %s', properties) return nsx_redo_metadata_cfg_all(edgeapi.context, plugin) def nsx_redo_metadata_cfg_for_edge(context, plugin, edge_id): binding = nsxv_db.get_nsxv_router_binding_by_edge(context.session, edge_id) if binding: az_name = binding['availability_zone'] conf_az = nsx_az.NsxVAvailabilityZones() az = conf_az.availability_zones[az_name] if not az.supports_metadata(): LOG.error('Edge %(edge)s belongs to az %(az)s which does not ' 'support metadata', {'az': az_name, 'edge': edge_id}) edge_internal_ips, md_rtr_ids = _get_internal_edge_ips(context, az_name) if not edge_internal_ips and not md_rtr_ids: LOG.error("Metadata infrastructure is missing or broken. " "It is recommended to restart neutron service before " "proceeding with configuration restoration") return if binding['router_id'] in md_rtr_ids: LOG.error('Edge %s is a metadata proxy', edge_id) return if (binding['router_id'].startswith( vcns_constants.BACKUP_ROUTER_PREFIX) or binding['router_id'].startswith( vcns_constants.PLR_EDGE_PREFIX)or binding['router_id'].startswith( lb_common.RESOURCE_ID_PFX)): LOG.error('Edge %s is not a metadata delivery appliance', edge_id) return _handle_edge(context, plugin, az_name, edge_id, edge_internal_ips) else: LOG.error('No edge binding found for edge %s', edge_id) @admin_utils.output_header def nsx_redo_metadata_cfg_all(context, plugin): user_confirm = admin_utils.query_yes_no("Do you want to setup metadata " "infrastructure for all the edges", default="no") if not user_confirm: LOG.info("NSXv vnics deletion aborted by user") return config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) conf_az = nsx_az.NsxVAvailabilityZones() az_list = conf_az.list_availability_zones_objects() for az in az_list: if az.supports_metadata(): nsx_redo_metadata_cfg_for_az(context, plugin, az.name, False) else: LOG.info("Skipping availability zone: %s - no metadata " "configuration", az.name) def nsx_redo_metadata_cfg_for_az(context, plugin, az_name, check_az=True): LOG.info("Updating MetaData for availability zone: %s", az_name) if check_az: conf_az = nsx_az.NsxVAvailabilityZones() az = conf_az.availability_zones.get(az_name) if not az: LOG.error('Availability zone %s not found', az_name) return if not az.supports_metadata(): LOG.error('Availability zone %s is not configured with metadata', az_name) return edge_internal_ips, md_rtr_ids = _get_internal_edge_ips(context, az_name) if not edge_internal_ips and not md_rtr_ids: LOG.error("Metadata infrastructure is missing or broken. " "It is recommended to restart neutron service before " "proceeding with configuration restoration") return router_bindings = nsxv_db.get_nsxv_router_bindings( context.session, filters={'edge_type': [nsxv_constants.SERVICE_EDGE], 'availability_zone': [az_name]}) edge_ids = list(set([binding['edge_id'] for binding in router_bindings if (binding['router_id'] not in set(md_rtr_ids) and not binding['router_id'].startswith( vcns_constants.BACKUP_ROUTER_PREFIX) and not binding['router_id'].startswith( vcns_constants.PLR_EDGE_PREFIX)and not binding['router_id'].startswith( lb_common.RESOURCE_ID_PFX))])) for edge_id in edge_ids: _handle_edge(context, plugin, az_name, edge_id, edge_internal_ips) @admin_utils.output_header def update_shared_secret(resource, event, trigger, **kwargs): edgeapi = utils.NeutronDbClient() edge_list = nsxv_db.get_nsxv_internal_edges_by_purpose( edgeapi.context.session, vcns_constants.InternalEdgePurposes.INTER_EDGE_PURPOSE) md_rtr_ids = [edge['router_id'] for edge in edge_list] router_bindings = nsxv_db.get_nsxv_router_bindings( edgeapi.context.session, filters={'edge_type': [nsxv_constants.SERVICE_EDGE]}) edge_ids = list(set([binding['edge_id'] for binding in router_bindings if (binding['router_id'] not in set(md_rtr_ids) and not binding['router_id'].startswith( vcns_constants.BACKUP_ROUTER_PREFIX) and not binding['router_id'].startswith( vcns_constants.PLR_EDGE_PREFIX))])) for edge_id in edge_ids: with locking.LockManager.get_lock(edge_id): lb = nsxv_lb.NsxvLoadbalancer.get_loadbalancer(nsxv, edge_id) virt = lb.virtual_servers.get(md_proxy.METADATA_VSE_NAME) if not virt: LOG.error("Virtual server not found for edge: %s", edge_id) continue virt.del_app_rule('insert-auth') if cfg.CONF.nsxv.metadata_shared_secret: signature = hmac.new( bytearray(cfg.CONF.nsxv.metadata_shared_secret, 'ascii'), bytearray(edge_id, 'ascii'), hashlib.sha256).hexdigest() sign = 'reqadd X-Metadata-Provider-Signature:' + signature sign_app_rule = nsxv_lb.NsxvLBAppRule('insert-auth', sign) virt.add_app_rule(sign_app_rule) lb.submit_to_backend(nsxv, edge_id) def _md_member_status(title, edge_ids): for edge_id in edge_ids: lb_stats = nsxv.get_loadbalancer_statistics( edge_id) pools_stats = lb_stats[1].get('pool', []) members = [] for pool_stats in pools_stats: if pool_stats['name'] == md_proxy.METADATA_POOL_NAME: for member in pool_stats.get('member', []): members.append({'member_ip': member['ipAddress'], 'member_status': member['status']}) LOG.info(formatters.output_formatter( title % edge_id, members, ['member_ip', 'member_status'])) @admin_utils.output_header def get_metadata_status(resource, event, trigger, **kwargs): if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) net_id = properties.get('network_id') else: net_id = None edgeapi = utils.NeutronDbClient() edge_list = nsxv_db.get_nsxv_internal_edges_by_purpose( edgeapi.context.session, vcns_constants.InternalEdgePurposes.INTER_EDGE_PURPOSE) md_rtr_ids = [edge['router_id'] for edge in edge_list] router_bindings = nsxv_db.get_nsxv_router_bindings( edgeapi.context.session, filters={'router_id': md_rtr_ids}) edge_ids = [b['edge_id'] for b in router_bindings] _md_member_status('Metadata edge appliance: %s members', edge_ids) if net_id: as_provider_data = nsxv_db.get_edge_vnic_bindings_by_int_lswitch( edgeapi.context.session, net_id) providers = [asp['edge_id'] for asp in as_provider_data] if providers: LOG.info('Metadata providers for network %s', net_id) _md_member_status('Edge %s', providers) else: LOG.info('No providers found for network %s', net_id) registry.subscribe(nsx_redo_metadata_cfg, constants.METADATA, shell.Operations.NSX_UPDATE.value) registry.subscribe(update_shared_secret, constants.METADATA, shell.Operations.NSX_UPDATE_SECRET.value) registry.subscribe(get_metadata_status, constants.METADATA, shell.Operations.STATUS.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/migration.py0000644000175000017500000002233300000000000030626 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import log as logging from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api import validators from neutron_lib.callbacks import registry from neutron_lib import constants as nl_constants from neutron_lib import context as n_context from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsx.services.lbaas.octavia import constants as oct_const from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts LOG = logging.getLogger(__name__) @admin_utils.output_header def validate_config_for_migration(resource, event, trigger, **kwargs): """Validate the nsxv configuration before migration to nsx-t""" transit_networks = ["100.64.0.0/16"] if kwargs.get('property'): # input validation properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) transit_network = properties.get('transit-network') if transit_network: transit_networks = [transit_network] # Max number of allowed address pairs (allowing 3 for fixed ips) num_allowed_addr_pairs = nsxlib_consts.NUM_ALLOWED_IP_ADDRESSES - 3 admin_context = n_context.get_admin_context() n_errors = 0 with utils.NsxVPluginWrapper() as plugin: # Ports validations: ports = plugin.get_ports(admin_context) for port in ports: net_id = port['network_id'] # Too many address pairs in a port address_pairs = port.get(addr_apidef.ADDRESS_PAIRS) if len(address_pairs) > num_allowed_addr_pairs: n_errors = n_errors + 1 LOG.error("%s allowed address pairs for port %s. Only %s are " "allowed.", len(address_pairs), port['id'], num_allowed_addr_pairs) # Compute port on external network if (port.get('device_owner', '').startswith( nl_constants.DEVICE_OWNER_COMPUTE_PREFIX) and plugin._network_is_external(admin_context, net_id)): n_errors = n_errors + 1 LOG.error("Compute port %s on external network %s is not " "allowed.", port['id'], net_id) # Networks & subnets validations: networks = plugin.get_networks(admin_context) for net in networks: # skip internal networks if net['project_id'] == nsxv_constants.INTERNAL_TENANT_ID: continue # VXLAN or portgroup provider networks net_type = net.get(pnet.NETWORK_TYPE) if (net_type == c_utils.NsxVNetworkTypes.VXLAN or net_type == c_utils.NsxVNetworkTypes.PORTGROUP): n_errors = n_errors + 1 LOG.error("Network %s of type %s is not supported.", net['id'], net_type) subnets = plugin._get_subnets_by_network(admin_context, net['id']) n_dhcp_subnets = 0 # Multiple DHCP subnets per network for subnet in subnets: if subnet['enable_dhcp']: n_dhcp_subnets = n_dhcp_subnets + 1 if n_dhcp_subnets > 1: n_errors = n_errors + 1 LOG.error("Network %s has %s dhcp subnets. Only 1 is allowed.", net['id'], n_dhcp_subnets) # Subnets overlapping with the transit network for subnet in subnets: # get the subnet IPs if ('allocation_pools' in subnet and validators.is_attr_set(subnet['allocation_pools'])): # use the pools instead of the cidr subnet_networks = [ netaddr.IPRange(pool.get('start'), pool.get('end')) for pool in subnet.get('allocation_pools')] else: cidr = subnet.get('cidr') if not validators.is_attr_set(cidr): return subnet_networks = [netaddr.IPNetwork(subnet['cidr'])] for subnet_net in subnet_networks: if (netaddr.IPSet(subnet_net) & netaddr.IPSet(transit_networks)): n_errors = n_errors + 1 LOG.error("Subnet %s overlaps with the transit " "network ips: %s.", subnet['id'], transit_networks) # Network attached to multiple routers intf_ports = plugin._get_network_interface_ports( admin_context, net['id']) if len(intf_ports) > 1: n_errors = n_errors + 1 LOG.error("Network %s has interfaces on multiple routers. " "Only 1 is allowed.", net['id']) # Routers validations: routers = plugin.get_routers(admin_context) for router in routers: # Interface subnets overlap with the GW subnet gw_subnets = plugin._find_router_gw_subnets(admin_context, router) gw_cidrs = [subnet['cidr'] for subnet in gw_subnets] gw_ip_set = netaddr.IPSet(gw_cidrs) if_cidrs = plugin._find_router_subnets_cidrs( admin_context, router['id']) if_ip_set = netaddr.IPSet(if_cidrs) if gw_ip_set & if_ip_set: n_errors = n_errors + 1 LOG.error("Interface network of router %s cannot overlap with " "router GW network", router['id']) # TODO(asarfaty): missing validations: # - Vlan provider network with the same VLAN tag as the uplink # profile tag used in the relevant transport node # (cannot check this without access to the T manager) # Octavia loadbalancers validation: filters = {'device_owner': [nl_constants.DEVICE_OWNER_LOADBALANCERV2, oct_const.DEVICE_OWNER_OCTAVIA]} lb_ports = plugin.get_ports(admin_context, filters=filters) lb_routers = [] for port in lb_ports: fixed_ips = port.get('fixed_ips', []) if fixed_ips: subnet_id = fixed_ips[0]['subnet_id'] network = lb_utils.get_network_from_subnet( admin_context, plugin, subnet_id) router_id = lb_utils.get_router_from_network( admin_context, plugin, subnet_id) # Loadbalancer vip subnet must be connected to a router or # belong to an external network if (not router_id and network and not network.get('router:external')): n_errors = n_errors + 1 LOG.error("Loadbalancer %s subnet %s is not external " "nor connected to a router.", port.get('device_id'), subnet_id) # Multiple loadbalancers on the same router cannot be supported if router_id in lb_routers: n_errors = n_errors + 1 LOG.error("Router %s has multiple loadbalancers which is " "not supported.", router_id) else: lb_routers.append(router_id) # TODO(asarfaty): Multiple listeners on the same pool is not # supported, but currently the admin utility has no access to this # information from octavia # TODO(asarfaty): Member on external subnet must have fip as ip, # but currently the admin utility has no access to this information # from octavia # General validations: # TODO(asarfaty): multiple transport zones (migrator limitation)? if n_errors > 0: plural = n_errors > 1 LOG.error("The NSX-V plugin configuration is not ready to be " "migrated to NSX-T. %s error%s found.", n_errors, 's were' if plural else ' was') exit(n_errors) LOG.info("The NSX-V plugin configuration is ready to be migrated to " "NSX-T.") registry.subscribe(validate_config_for_migration, constants.NSX_MIGRATE_V_T, shell.Operations.VALIDATE.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/networks.py0000644000175000017500000003434500000000000030517 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import sys import xml.etree.ElementTree as et from neutron_lib.callbacks import registry from neutron_lib import context from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_vmware import vim_util from vmware_nsx.db import db as nsx_db from vmware_nsx.dvs import dvs from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() network_types = ['Network', 'VirtualWire', 'DistributedVirtualPortgroup'] PORTGROUP_PREFIX = 'dvportgroup' def get_networks_from_backend(): nsxv = utils.get_nsxv_client() so_list = nsxv.get_scoping_objects() return et.fromstring(so_list) def get_networks(): """Create an array of all the backend networks and their data """ root = get_networks_from_backend() networks = [] for obj in root.iter('object'): if obj.find('objectTypeName').text in network_types: networks.append({'type': obj.find('objectTypeName').text, 'moref': obj.find('objectId').text, 'name': obj.find('name').text}) return networks def get_networks_name_map(): """Create a dictionary mapping moref->backend name """ root = get_networks_from_backend() networks = {} for obj in root.iter('object'): if obj.find('objectTypeName').text in network_types: networks[obj.find('objectId').text] = obj.find('name').text return networks @admin_utils.output_header def neutron_list_networks(resource, event, trigger, **kwargs): LOG.info(formatters.output_formatter(constants.NETWORKS, get_networks(), ['type', 'moref', 'name'])) @admin_utils.output_header def nsx_update_switch(resource, event, trigger, **kwargs): nsxv = utils.get_nsxv_client() if not kwargs.get('property'): LOG.error("Need to specify dvs-id parameter and " "attribute to update. Add --property dvs-id= " "--property teamingpolicy=") return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) dvs_id = properties.get('dvs-id') if not dvs_id: LOG.error("Need to specify dvs-id. " "Add --property dvs-id=") return try: h, switch = nsxv.get_vdn_switch(dvs_id) except exceptions.ResourceNotFound: LOG.error("DVS %s not found", dvs_id) return supported_policies = ['ETHER_CHANNEL', 'LOADBALANCE_LOADBASED', 'LOADBALANCE_SRCID', 'LOADBALANCE_SRCMAC', 'FAILOVER_ORDER', 'LACP_ACTIVE', 'LACP_PASSIVE', 'LACP_V2'] policy = properties.get('teamingpolicy') if policy in supported_policies: if switch['teamingPolicy'] == policy: LOG.info("Policy already set!") return LOG.info("Updating NSXv switch %(dvs)s teaming policy to " "%(policy)s", {'dvs': dvs_id, 'policy': policy}) switch['teamingPolicy'] = policy try: switch = nsxv.update_vdn_switch(switch) except exceptions.VcnsApiException as e: desc = jsonutils.loads(e.response) details = desc.get('details') if details.startswith("No enum constant"): LOG.error("Unknown teaming policy %s", policy) else: LOG.error("Unexpected error occurred: %s", details) return LOG.info("Switch value after update: %s", switch) else: LOG.info("Current switch value is: %s", switch) LOG.error("Invalid teaming policy. " "Add --property teamingpolicy=") LOG.error("Possible values: %s", ', '.join(supported_policies)) @admin_utils.output_header def list_missing_networks(resource, event, trigger, **kwargs): """List the neutron networks which are missing the backend moref """ # get the neutron-nsx networks mapping from DB admin_context = context.get_admin_context() mappings = nsx_db.get_nsx_networks_mapping(admin_context.session) # get the list of backend networks: backend_networks = get_networks_name_map() missing_networks = [] # For each neutron network - check if there is a matching backend network for entry in mappings: nsx_id = entry['nsx_id'] dvs_id = entry['dvs_id'] if nsx_id not in backend_networks.keys(): missing_networks.append({'neutron_id': entry['neutron_id'], 'moref': nsx_id, 'dvs_id': dvs_id}) elif dvs_id: netname = backend_networks[nsx_id] if not netname.startswith(dvs_id): missing_networks.append({'neutron_id': entry['neutron_id'], 'moref': nsx_id, 'dvs_id': dvs_id}) LOG.info(formatters.output_formatter(constants.MISSING_NETWORKS, missing_networks, ['neutron_id', 'moref', 'dvs_id'])) @admin_utils.output_header def list_orphaned_networks(resource, event, trigger, **kwargs): """List the NSX networks which are missing the neutron DB """ admin_context = context.get_admin_context() missing_networks = [] # get all neutron distributed routers in advanced with utils.NsxVPluginWrapper() as plugin: neutron_routers = plugin.get_routers( admin_context, fields=['id', 'name', 'distributed']) neutron_dist_routers = [rtr for rtr in neutron_routers if rtr['distributed']] # get the list of backend networks: backend_networks = get_networks() for net in backend_networks: moref = net['moref'] backend_name = net['name'] # Decide if this is a neutron network by its name (which should always # contain the net-id), and type if (backend_name.startswith('edge-') or len(backend_name) < 36 or net['type'] == 'Network'): # This is not a neutron network continue if backend_name.startswith('int-') and net['type'] == 'VirtualWire': # This is a PLR network. Check that the router exists found = False # compare the expected lswitch name by the dist router name & id for rtr in neutron_dist_routers: lswitch_name = ('int-' + rtr['name'] + rtr['id'])[:36] if lswitch_name == backend_name: found = True break # if the neutron router got renamed, this will not work. # compare ids prefixes instead (might cause false positives) for rtr in neutron_dist_routers: if rtr['id'][:5] in backend_name: LOG.info("Logical switch %s probably matches distributed " "router %s", backend_name, rtr['id']) found = True break if not found: missing_networks.append(net) continue # get the list of neutron networks with this moref neutron_networks = nsx_db.get_nsx_network_mapping_for_nsx_id( admin_context.session, moref) if not neutron_networks: # no network found for this moref missing_networks.append(net) elif moref.startswith(PORTGROUP_PREFIX): # This is a VLAN network. Also verify that the DVS Id matches for entry in neutron_networks: if (not entry['dvs_id'] or backend_name.startswith(entry['dvs_id'])): found = True # this moref & dvs-id does not appear in the DB if not found: missing_networks.append(net) LOG.info(formatters.output_formatter(constants.ORPHANED_NETWORKS, missing_networks, ['type', 'moref', 'name'])) def _get_nsx_portgroups(dvs_id): dvsManager = dvs.VCManager() dvs_moref = dvsManager._get_dvs_moref_by_id(dvs_id) port_groups = dvsManager._session.invoke_api(vim_util, 'get_object_properties', dvsManager._session.vim, dvs_moref, ['portgroup']) nsx_portgroups = [] if len(port_groups) and hasattr(port_groups[0], 'propSet'): for prop in port_groups[0].propSet: for val in prop.val[0]: nsx_portgroups.append({'moref': val.value, 'type': val._type}) return nsx_portgroups @admin_utils.output_header def list_nsx_portgroups(resource, event, trigger, **kwargs): if not cfg.CONF.dvs.host_ip: LOG.info("Please configure the dvs section in the nsx configuration " "file") return dvs_id = cfg.CONF.nsxv.dvs_id port_groups = _get_nsx_portgroups(dvs_id) LOG.info(formatters.output_formatter( constants.NSX_PORTGROUPS + " for %s" % dvs_id, port_groups, ['moref', 'type'])) @admin_utils.output_header def delete_nsx_portgroups(resource, event, trigger, **kwargs): if not cfg.CONF.dvs.host_ip: LOG.info("Please configure the dvs section in the nsx configuration " "file") return dvs_id = cfg.CONF.nsxv.dvs_id portgroups = _get_nsx_portgroups(dvs_id) if not portgroups: LOG.info("No NSX portgroups found for %s", dvs_id) return if not kwargs.get('force'): #ask for the user confirmation confirm = admin_utils.query_yes_no( "Do you want to delete all NSX portgroups for %s" % dvs_id, default="no") if not confirm: LOG.info("NSX portgroups deletion aborted by user") return vcns = utils.get_nsxv_client() for portgroup in portgroups: try: vcns.delete_port_group(dvs_id, portgroup['moref']) except Exception as e: LOG.error("Failed to delete portgroup %(pg)s: %(e)s", {'pg': portgroup['moref'], 'e': e}) sys.exc_clear() else: LOG.info("Successfully deleted portgroup %(pg)s", {'pg': portgroup['moref']}) LOG.info("Done.") def get_dvs_id_from_backend_name(backend_name): reg = re.search(r"^dvs-\d*", backend_name) if reg: return reg.group(0) @admin_utils.output_header def delete_backend_network(resource, event, trigger, **kwargs): """Delete a backend network by its moref """ errmsg = ("Need to specify moref property. Add --property moref=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) moref = properties.get('moref') if not moref: LOG.error("%s", errmsg) return backend_name = get_networks_name_map().get(moref) if not backend_name: LOG.error("Failed to find the backend network %(moref)s", {'moref': moref}) return # Note: in case the backend network is attached to other backend objects, # like VM, the deleting may fail and through an exception nsxv = utils.get_nsxv_client() if moref.startswith(PORTGROUP_PREFIX): # get the dvs id from the backend name: dvs_id = get_dvs_id_from_backend_name(backend_name) if not dvs_id: LOG.error("Failed to find the DVS id of backend network " "%(moref)s", {'moref': moref}) else: try: nsxv.delete_port_group(dvs_id, moref) except Exception as e: LOG.error("Failed to delete backend network %(moref)s : " "%(e)s", {'moref': moref, 'e': e}) else: LOG.info("Backend network %(moref)s was deleted", {'moref': moref}) else: # Virtual wire try: nsxv.delete_virtual_wire(moref) except Exception as e: LOG.error("Failed to delete backend network %(moref)s : " "%(e)s", {'moref': moref, 'e': e}) else: LOG.info("Backend network %(moref)s was deleted", {'moref': moref}) registry.subscribe(neutron_list_networks, constants.NETWORKS, shell.Operations.LIST.value) registry.subscribe(nsx_update_switch, constants.NETWORKS, shell.Operations.NSX_UPDATE.value) registry.subscribe(list_missing_networks, constants.MISSING_NETWORKS, shell.Operations.LIST.value) registry.subscribe(list_orphaned_networks, constants.ORPHANED_NETWORKS, shell.Operations.LIST.value) registry.subscribe(delete_backend_network, constants.ORPHANED_NETWORKS, shell.Operations.NSX_CLEAN.value) registry.subscribe(list_nsx_portgroups, constants.NSX_PORTGROUPS, shell.Operations.LIST.value) registry.subscribe(delete_nsx_portgroups, constants.NSX_PORTGROUPS, shell.Operations.NSX_CLEAN.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py0000644000175000017500000004032600000000000030342 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters import vmware_nsx.shell.admin.plugins.common.utils as admin_utils import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils import vmware_nsx.shell.resources as shell from neutron_lib.callbacks import registry from neutron_lib import context as n_context from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import locking from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import routersize from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v import md_proxy from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield import vcns_driver LOG = logging.getLogger(__name__) def delete_old_edge(context, old_edge_id): LOG.info("Deleting the old edge: %s", old_edge_id) # clean it up from the DB nsxv_db.clean_edge_router_binding(context.session, old_edge_id) nsxv_db.clean_edge_vnic_binding(context.session, old_edge_id) nsxv_db.cleanup_nsxv_edge_firewallrule_binding(context.session, old_edge_id) with locking.LockManager.get_lock(old_edge_id): # Delete from NSXv backend # Note - If we will not delete the edge, but free it - it will be # immediately used as the new one, So it is better to delete it. try: nsxv = utils.get_nsxv_client() nsxv.delete_edge(old_edge_id) except Exception as e: LOG.warning("Failed to delete the old edge %(id)s: %(e)s", {'id': old_edge_id, 'e': e}) # Continue the process anyway # The edge may have been already deleted at the backend def _get_router_az_from_plugin_router(router): # If the router edge was already deployed the availability_zones will # return the az az_name = router.get('availability_zones', [''])[0] if not az_name: # If it was not deployed - it may be in the creation hints az_name = router.get('availability_zones_hints', [''])[0] if not az_name: # If not - the default az was used. az_name = nsx_az.DEFAULT_NAME return az_name def nsx_recreate_router_edge(old_edge_id): # init the plugin and edge manager cfg.CONF.set_override('core_plugin', 'vmware_nsx.shell.admin.plugins.nsxv.resources' '.utils.NsxVPluginWrapper') with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) context = n_context.get_admin_context() # verify that this is a Router edge router_ids = edge_manager.get_routers_on_edge(context, old_edge_id) if not router_ids: LOG.error("Edge %(edge_id)s is not a router edge", {'edge_id': old_edge_id}) return # all the routers on the same edge have the same type, so it # is ok to check the type once example_router = plugin.get_router(context, router_ids[0]) if example_router.get('distributed'): LOG.error("Recreating a distributed router edge is not " "supported") return router_driver = plugin._router_managers.get_tenant_router_driver( context, example_router['router_type']) # load all the routers before deleting their binding routers = [] for router_id in router_ids: routers.append(plugin.get_router(context, router_id)) # delete the backend edge and all the relevant DB entries delete_old_edge(context, old_edge_id) # Go over all the relevant routers for router in routers: router_id = router['id'] az_name = _get_router_az_from_plugin_router(router) # clean up other objects related to this router if plugin.metadata_proxy_handler: md_proxy = plugin.get_metadata_proxy_handler(az_name) md_proxy.cleanup_router_edge(context, router_id) # attach the router to a new edge appliance_size = router.get(routersize.ROUTER_SIZE) router_driver.attach_router(context, router_id, {'router': router}, appliance_size=appliance_size) # find out who is the new edge to print it new_edge_id = router_driver._get_edge_id_or_raise( context, router_id) LOG.info("Router %(router)s was attached to edge %(edge)s", {'router': router_id, 'edge': new_edge_id}) def nsx_recreate_router(router_id): # init the plugin and edge manager cfg.CONF.set_override('core_plugin', 'vmware_nsx.shell.admin.plugins.nsxv.resources' '.utils.NsxVPluginWrapper') with utils.NsxVPluginWrapper() as plugin: context = n_context.get_admin_context() router = plugin.get_router(context, router_id) if router.get('distributed'): LOG.error("Recreating a distributed router is not supported") return router_driver = plugin._router_managers.get_tenant_router_driver( context, router['router_type']) # Check if it is already attached to an edge binding = nsxv_db.get_nsxv_router_binding(context.session, router_id) if binding: old_edge_id = binding['edge_id'] # detach the router from this edge LOG.info("Detaching the router from edge %s", old_edge_id) router_driver.detach_router(context, router_id, {'router': router}) # attach the router to a new edge appliance_size = router.get(routersize.ROUTER_SIZE) router_driver.attach_router(context, router_id, {'router': router}, appliance_size=appliance_size) # find out who is the new edge to print it new_edge_id = router_driver._get_edge_id_or_raise( context, router_id) LOG.info("Router %(router)s was attached to edge %(edge)s", {'router': router_id, 'edge': new_edge_id}) @admin_utils.output_header def nsx_recreate_router_or_edge(resource, event, trigger, **kwargs): """Recreate a router edge with all the data on a new NSXv edge""" if not kwargs.get('property'): LOG.error("Need to specify edge-id or router-id parameter") return # input validation properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) old_edge_id = properties.get('edge-id') router_id = properties.get('router-id') if (not old_edge_id and not router_id) or (old_edge_id and router_id): LOG.error("Need to specify edge-id or router-id parameter") return if old_edge_id: LOG.info("ReCreating NSXv Router Edge: %s", old_edge_id) return nsx_recreate_router_edge(old_edge_id) else: LOG.info("ReCreating NSXv Router: %s", router_id) return nsx_recreate_router(router_id) @admin_utils.output_header def migrate_distributed_routers_dhcp(resource, event, trigger, **kwargs): context = n_context.get_admin_context() nsxv = utils.get_nsxv_client() with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) edge_manager = edge_utils.EdgeManager(nsxv_manager, plugin) routers = plugin.get_routers(context) for router in routers: if router.get('distributed', False): binding = nsxv_db.get_nsxv_router_binding(context.session, router['id']) if binding: edge_id = binding['edge_id'] with locking.LockManager.get_lock(edge_id): route_obj = nsxv.get_routes(edge_id)[1] routes = route_obj.get('staticRoutes', {} ).get('staticRoutes', []) new_routes = [route for route in routes if route.get( 'network') != '169.254.169.254/32'] route_obj['staticRoutes']['staticRoutes'] = new_routes nsxv.update_routes(edge_id, route_obj) _update_vdr_fw_config(nsxv, edge_id) plr_id = edge_manager.get_plr_by_tlr_id(context, router['id']) if plr_id: binding = nsxv_db.get_nsxv_router_binding( context.session, plr_id) if binding: _update_vdr_fw_config(nsxv, binding['edge_id']) def _update_vdr_fw_config(nsxv, edge_id): fw_config = nsxv.get_firewall(edge_id)[1] md_rule_names = [rule['name'] for rule in md_proxy.get_router_fw_rules()] fw_rules = fw_config.get('firewallRules', {}).get('firewallRules', []) if fw_rules: fw_rules = [rule for rule in fw_rules if rule['name'] not in md_rule_names] fw_config['firewallRules']['firewallRules'] = fw_rules nsxv.update_firewall(edge_id, fw_config) def is_router_conflicting_on_edge(context, driver, router_id): edge_id = edge_utils.get_router_edge_id(context, router_id) if not edge_id: return False (available_routers, conflict_routers) = driver._get_available_and_conflicting_ids( context, router_id) for conf_router in conflict_routers: conf_edge_id = edge_utils.get_router_edge_id(context, conf_router) if conf_edge_id == edge_id: LOG.info("Router %(rtr)s on edge %(edge)s is conflicting with " "another router and will be moved", {'rtr': router_id, 'edge': edge_id}) return True return False @admin_utils.output_header def redistribute_routers(resource, event, trigger, **kwargs): """If any of the shared routers are on a conflicting edge move them""" context = n_context.get_admin_context() with utils.NsxVPluginWrapper() as plugin: router_driver = plugin._router_managers.get_tenant_router_driver( context, 'shared') routers = plugin.get_routers(context) for router in routers: if (not router.get('distributed', False) and router.get('router_type') == 'shared' and is_router_conflicting_on_edge( context, router_driver, router['id'])): router_driver.detach_router(context, router['id'], router) router_driver.attach_router(context, router['id'], router) @admin_utils.output_header def list_orphaned_vnics(resource, event, trigger, **kwargs): """List router orphaned router vnics where the port was deleted""" orphaned_vnics = get_orphaned_vnics() if not orphaned_vnics: LOG.info("No orphaned router vnics found") return headers = ['edge_id', 'vnic_index', 'tunnel_index', 'network_id'] LOG.info(formatters.output_formatter(constants.ORPHANED_VNICS, orphaned_vnics, headers)) def get_orphaned_vnics(): orphaned_vnics = [] context = n_context.get_admin_context() vnic_binds = nsxv_db.get_edge_vnic_bindings_with_networks( context.session) with utils.NsxVPluginWrapper() as plugin: for vnic_bind in vnic_binds: edge_id = vnic_bind['edge_id'] # check if this is a router edge by the router bindings table router_bindings = nsxv_db.get_nsxv_router_bindings_by_edge( context.session, edge_id) if not router_bindings: # Only log it. this is a different type of orphaned LOG.warning("Router bindings for vnic %s not found", vnic_bind) continue router_ids = [b['router_id'] for b in router_bindings] routers = plugin.get_routers(context, filters={'id': router_ids}) if routers: interface_found = False # check if any of those routers is attached to this network for router in routers: if plugin._get_router_interface_ports_by_network( context, router['id'], vnic_bind['network_id']): interface_found = True break if not interface_found: # for later deleting the interface we need to know if this # is a distributed router. # All the routers on the same edge are of the same type, # so we can check the first one. vnic_bind['distributed'] = routers[0].get('distributed') orphaned_vnics.append(vnic_bind) return orphaned_vnics @admin_utils.output_header def clean_orphaned_vnics(resource, event, trigger, **kwargs): """List router orphaned router vnics where the port was deleted""" orphaned_vnics = get_orphaned_vnics() if not orphaned_vnics: LOG.info("No orphaned router vnics found") return headers = ['edge_id', 'vnic_index', 'tunnel_index', 'network_id'] LOG.info(formatters.output_formatter(constants.ORPHANED_VNICS, orphaned_vnics, headers)) user_confirm = admin_utils.query_yes_no("Do you want to delete " "orphaned vnics", default="no") if not user_confirm: LOG.info("NSXv vnics deletion aborted by user") return context = n_context.get_admin_context() with utils.NsxVPluginWrapper() as plugin: nsxv_manager = vcns_driver.VcnsDriver( edge_utils.NsxVCallbacks(plugin)) for vnic in orphaned_vnics: if not vnic['distributed']: try: nsxv_manager.vcns.delete_interface( vnic['edge_id'], vnic['vnic_index']) except Exception as e: LOG.error("Failed to delete vnic from NSX: %s", e) nsxv_db.free_edge_vnic_by_network( context.session, vnic['edge_id'], vnic['network_id']) else: try: nsxv_manager.vcns.delete_vdr_internal_interface( vnic['edge_id'], vnic['vnic_index']) except Exception as e: LOG.error("Failed to delete vnic from NSX: %s", e) nsxv_db.delete_edge_vnic_binding_by_network( context.session, vnic['edge_id'], vnic['network_id']) registry.subscribe(nsx_recreate_router_or_edge, constants.ROUTERS, shell.Operations.NSX_RECREATE.value) registry.subscribe(migrate_distributed_routers_dhcp, constants.ROUTERS, shell.Operations.MIGRATE_VDR_DHCP.value) registry.subscribe(redistribute_routers, constants.ROUTERS, shell.Operations.NSX_REDISTRIBURE.value) registry.subscribe(list_orphaned_vnics, constants.ORPHANED_VNICS, shell.Operations.NSX_LIST.value) registry.subscribe(clean_orphaned_vnics, constants.ORPHANED_VNICS, shell.Operations.NSX_CLEAN.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/securitygroups.py0000644000175000017500000006344700000000000031757 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator import re import xml.etree.ElementTree as et from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 from neutron.db import securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron_lib.callbacks import registry from neutron_lib import context as n_context from neutron_lib.db import api as db_api from oslo_log import log as logging from vmware_nsx.common import utils as com_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import extended_security_group as extended_secgroup from vmware_nsx.db import extended_security_group_rule as extend_sg_rule from vmware_nsx.db import nsx_models from vmware_nsx.db import nsxv_db from vmware_nsx.db import nsxv_models from vmware_nsx.extensions import securitygrouplogging as sg_logging from vmware_nsx.extensions import securitygrouppolicy as sg_policy from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) class NeutronSecurityGroupDB( utils.NeutronDbClient, securitygroups_db.SecurityGroupDbMixin, extended_secgroup.ExtendedSecurityGroupPropertiesMixin, extend_sg_rule.ExtendedSecurityGroupRuleMixin): def __init__(self): super(NeutronSecurityGroupDB, self) # FIXME(roeyc): context is already defined in NeutrondDbClient self.context = n_context.get_admin_context() def get_security_groups_mappings(self): q = self.context.session.query( sg_models.SecurityGroup.name, sg_models.SecurityGroup.id, nsxv_models.NsxvSecurityGroupSectionMapping.ip_section_id, nsx_models.NeutronNsxSecurityGroupMapping.nsx_id).join( nsxv_models.NsxvSecurityGroupSectionMapping, nsx_models.NeutronNsxSecurityGroupMapping).all() sg_mappings = [{'name': mapp.name, 'id': mapp.id, 'section-uri': mapp.ip_section_id, 'nsx-securitygroup-id': mapp.nsx_id} for mapp in q] return sg_mappings def get_security_group_rules_mappings(self): q = self.context.session.query( sg_models.SecurityGroupRule.id, nsxv_models.NsxvRuleMapping.nsx_rule_id).join( nsxv_models.NsxvRuleMapping).all() sg_mappings = [{'rule_id': mapp[0], 'nsx_rule_id': mapp[1]} for mapp in q] return sg_mappings def get_security_group(self, sg_id): return super(NeutronSecurityGroupDB, self).get_security_group( self.context, sg_id) def get_security_groups(self): filters = utils.get_plugin_filters(self.context) return super(NeutronSecurityGroupDB, self).get_security_groups(self.context, filters=filters) def get_security_group_id_by_section_id(self, section_id): section_url = ("/api/4.0/firewall/globalroot-0/config/layer3sections" "/%s" % section_id) q = self.context.session.query( nsxv_models.NsxvSecurityGroupSectionMapping).filter_by( ip_section_id=section_url).all() if q: return q[0].neutron_id def _is_provider_section(self, section_id): # look for this section id in the nsx_db, and get the security group sg_id = self.get_security_group_id_by_section_id(section_id) if sg_id: # Check in the DB if this is a provider SG return self._is_provider_security_group(self.context, sg_id) return False def delete_security_group_section_mapping(self, sg_id): with db_api.CONTEXT_WRITER.using(self.context): fw_mapping = self.context.session.query( nsxv_models.NsxvSecurityGroupSectionMapping).filter_by( neutron_id=sg_id).one_or_none() if fw_mapping: self.context.session.delete(fw_mapping) def delete_security_group_backend_mapping(self, sg_id): with db_api.CONTEXT_WRITER.using(self.context): sg_mapping = self.context.session.query( nsx_models.NeutronNsxSecurityGroupMapping).filter_by( neutron_id=sg_id).one_or_none() if sg_mapping: self.context.session.delete(sg_mapping) def get_vnics_in_security_group(self, security_group_id): with utils.NsxVPluginWrapper() as plugin: vnics = [] query = self.context.session.query( models_v2.Port.id, models_v2.Port.device_id ).join(sg_models.SecurityGroupPortBinding).filter_by( security_group_id=security_group_id).all() for p in query: vnic_index = plugin._get_port_vnic_index(self.context, p.id) vnic_id = plugin._get_port_vnic_id(vnic_index, p.device_id) vnics.append(vnic_id) return vnics class NsxFirewallAPI(object): def __init__(self): self.vcns = utils.get_nsxv_client() def list_security_groups(self): h, secgroups = self.vcns.list_security_groups() if not secgroups: return [] root = et.fromstring(secgroups) secgroups = [] for sg in root.iter('securitygroup'): sg_id = sg.find('objectId').text # This specific security-group is not relevant to the plugin if sg_id == 'securitygroup-1': continue secgroups.append({'name': sg.find('name').text, 'id': sg_id}) return secgroups def list_fw_sections(self): h, firewall_config = self.vcns.get_dfw_config() if not firewall_config: return [] root = com_utils.normalize_xml(firewall_config) sections = [] for sec in root.iter('section'): sec_id = sec.attrib['id'] # Don't show NSX default sections, which are not relevant to OS. if sec_id in ['1001', '1002', '1003']: continue sections.append({'name': sec.attrib['name'], 'id': sec_id}) return sections def delete_fw_section(self, section_id): section_uri = ("/api/4.0/firewall/globalroot-0/" "config/layer3sections/%s" % section_id) self.vcns.delete_section(section_uri) def list_fw_section_rules(self, section_uri): return self.vcns.get_section_rules(section_uri) def remove_rule_from_section(self, section_uri, rule_id): return self.vcns.remove_rule_from_section(section_uri, rule_id) def reorder_fw_sections(self): # read all the sections h, firewall_config = self.vcns.get_dfw_config() if not firewall_config: LOG.info("No firewall sections were found.") return root = com_utils.normalize_xml(firewall_config) for child in root: if str(child.tag) == 'layer3Sections': # go over the L3 sections and reorder them. # The correct order should be: # 1. OS provider security groups # 2. service composer policies # 3. regular OS security groups sections = list(child.iter('section')) provider_sections = [] regular_sections = [] policy_sections = [] for sec in sections: if sec.attrib.get('managedBy') == 'NSX Service Composer': policy_sections.append(sec) else: if neutron_sg._is_provider_section( sec.attrib.get('id')): provider_sections.append(sec) else: regular_sections.append(sec) child.remove(sec) if not policy_sections and not provider_sections: LOG.info("No need to reorder the firewall sections.") return # reorder the sections reordered_sections = (provider_sections + policy_sections + regular_sections) child.extend(reordered_sections) # update the new order of sections in the backend self.vcns.update_dfw_config(et.tostring(root), h) LOG.info("L3 Firewall sections were reordered.") neutron_sg = NeutronSecurityGroupDB() nsxv_firewall = NsxFirewallAPI() def _log_info(resource, data, attrs=['name', 'id']): LOG.info(formatters.output_formatter(resource, data, attrs)) @admin_utils.list_handler(constants.SECURITY_GROUPS) @admin_utils.output_header def neutron_list_security_groups_mappings(resource, event, trigger, **kwargs): sg_mappings = neutron_sg.get_security_groups_mappings() _log_info(constants.SECURITY_GROUPS, sg_mappings, attrs=['name', 'id', 'section-uri', 'nsx-securitygroup-id']) return bool(sg_mappings) @admin_utils.list_handler(constants.FIREWALL_SECTIONS) @admin_utils.output_header def nsx_list_dfw_sections(resource, event, trigger, **kwargs): fw_sections = nsxv_firewall.list_fw_sections() _log_info(constants.FIREWALL_SECTIONS, fw_sections) return bool(fw_sections) @admin_utils.list_handler(constants.FIREWALL_NSX_GROUPS) @admin_utils.output_header def nsx_list_security_groups(resource, event, trigger, **kwargs): nsx_secgroups = nsxv_firewall.list_security_groups() _log_info(constants.FIREWALL_NSX_GROUPS, nsx_secgroups) return bool(nsx_secgroups) def _find_missing_security_groups(): nsx_secgroups = nsxv_firewall.list_security_groups() sg_mappings = neutron_sg.get_security_groups_mappings() missing_secgroups = {} for sg_db in sg_mappings: for nsx_sg in nsx_secgroups: if nsx_sg['id'] == sg_db['nsx-securitygroup-id']: break else: missing_secgroups[sg_db['id']] = sg_db return missing_secgroups @admin_utils.list_mismatches_handler(constants.FIREWALL_NSX_GROUPS) @admin_utils.output_header def list_missing_security_groups(resource, event, trigger, **kwargs): sgs_with_missing_nsx_group = _find_missing_security_groups() missing_securitgroups_info = [ {'securitygroup-name': sg['name'], 'securitygroup-id': sg['id'], 'nsx-securitygroup-id': sg['nsx-securitygroup-id']} for sg in sgs_with_missing_nsx_group.values()] _log_info(constants.FIREWALL_NSX_GROUPS, missing_securitgroups_info, attrs=['securitygroup-name', 'securitygroup-id', 'nsx-securitygroup-id']) return bool(missing_securitgroups_info) def _find_missing_sections(): fw_sections = nsxv_firewall.list_fw_sections() sg_mappings = neutron_sg.get_security_groups_mappings() missing_sections = {} for sg_db in sg_mappings: for fw_section in fw_sections: if fw_section['id'] == sg_db.get('section-uri', '').split('/')[-1]: break else: missing_sections[sg_db['id']] = sg_db return missing_sections @admin_utils.list_mismatches_handler(constants.FIREWALL_SECTIONS) @admin_utils.output_header def list_missing_firewall_sections(resource, event, trigger, **kwargs): sgs_with_missing_section = _find_missing_sections() missing_sections_info = [{'securitygroup-name': sg['name'], 'securitygroup-id': sg['id'], 'section-id': sg['section-uri']} for sg in sgs_with_missing_section.values()] _log_info(constants.FIREWALL_SECTIONS, missing_sections_info, attrs=['securitygroup-name', 'securitygroup-id', 'section-uri']) return bool(missing_sections_info) def _get_unused_firewall_sections(): fw_sections = nsxv_firewall.list_fw_sections() sg_mappings = neutron_sg.get_security_groups_mappings() unused_sections = [] for fw_section in fw_sections: for sg_db in sg_mappings: if fw_section['id'] == sg_db.get('section-uri', '').split('/')[-1]: break else: # skip sections with non neutron like names if re.search("SG Section: .* (.*)", fw_section['name']): unused_sections.append(fw_section) return unused_sections @admin_utils.output_header def list_unused_firewall_sections(resource, event, trigger, **kwargs): unused_sections = _get_unused_firewall_sections() _log_info(constants.FIREWALL_SECTIONS, unused_sections, attrs=['name', 'id']) return bool(unused_sections) @admin_utils.output_header def clean_unused_firewall_sections(resource, event, trigger, **kwargs): unused_sections = _get_unused_firewall_sections() for fw_section in unused_sections: LOG.info("Deleting firewall section %s", fw_section['id']) nsxv_firewall.delete_fw_section(fw_section['id']) return bool(unused_sections) def _find_orphaned_section_rules(): fw_sections = nsxv_firewall.list_fw_sections() sg_mappings = neutron_sg.get_security_groups_mappings() rules_mappings = neutron_sg.get_security_group_rules_mappings() mapped_rules_ids = [rule['nsx_rule_id'] for rule in rules_mappings] orphaned_rules = [] for sg_db in sg_mappings: for fw_section in fw_sections: if fw_section['id'] == sg_db.get('section-uri', '').split('/')[-1]: # Neutron section. nsx_rules = nsxv_firewall.list_fw_section_rules( sg_db.get('section-uri')) for nsx_rule in nsx_rules: if str(nsx_rule['id']) not in mapped_rules_ids: orphaned_rules.append( {'nsx-rule-id': nsx_rule['id'], 'section-uri': sg_db['section-uri'], 'section-id': fw_section['id'], 'security-group-id': sg_db['id'], 'security-group-name': sg_db['name']}) return orphaned_rules @admin_utils.output_header def list_orphaned_firewall_section_rules(resource, event, trigger, **kwargs): orphaned_rules = _find_orphaned_section_rules() _log_info(constants.FIREWALL_SECTIONS, orphaned_rules, attrs=['security-group-name', 'security-group-id', 'section-id', 'nsx-rule-id']) return bool(orphaned_rules) @admin_utils.output_header def clean_orphaned_firewall_section_rules(resource, event, trigger, **kwargs): orphaned_rules = _find_orphaned_section_rules() for rule in orphaned_rules: try: nsxv_firewall.remove_rule_from_section( rule['section-uri'], rule['nsx-rule-id']) except Exception as e: LOG.error("Failed to delete rule %s from section %s: %s", rule['nsx-rule-id'], rule['section-id'], e) else: LOG.info("Backend rule %s was deleted from section %s", rule['nsx-rule-id'], rule['section-id']) @admin_utils.output_header def reorder_firewall_sections(resource, event, trigger, **kwargs): nsxv_firewall.reorder_fw_sections() @admin_utils.fix_mismatches_handler(constants.SECURITY_GROUPS) @admin_utils.output_header def fix_security_groups(resource, event, trigger, **kwargs): context_ = n_context.get_admin_context() sgs_with_missing_section = _find_missing_sections() sgs_with_missing_nsx_group = _find_missing_security_groups() if not sgs_with_missing_section and not sgs_with_missing_nsx_group: # no mismatches return with utils.NsxVPluginWrapper() as plugin: # If only the fw section is missing then create it. for sg_id in (set(sgs_with_missing_section.keys()) - set(sgs_with_missing_nsx_group.keys())): neutron_sg.delete_security_group_section_mapping(sg_id) secgroup = plugin.get_security_group(context_, sg_id) plugin._create_fw_section_for_security_group( context_, secgroup, sgs_with_missing_section[sg_id]['nsx-securitygroup-id']) LOG.info("Created NSX section for security group %s", sg_id) # If nsx security-group is missing then create both nsx security-group # and a new fw section (remove old one). for sg_id, sg in sgs_with_missing_nsx_group.items(): secgroup = plugin.get_security_group(context_, sg_id) if sg_id not in sgs_with_missing_section: plugin._delete_section(sg['section-uri']) neutron_sg.delete_security_group_section_mapping(sg_id) neutron_sg.delete_security_group_backend_mapping(sg_id) plugin._process_security_group_create_backend_resources(context_, secgroup) LOG.info("Created NSX section & security group for security group" " %s", sg_id) nsx_id = nsx_db.get_nsx_security_group_id(context_.session, sg_id, moref=False) for vnic_id in neutron_sg.get_vnics_in_security_group(sg_id): plugin._add_member_to_security_group(nsx_id, vnic_id) @admin_utils.output_header def list_policies(resource, event, trigger, **kwargs): """List nsx service composer policies""" context = n_context.get_admin_context() with utils.NsxVPluginWrapper() as plugin: policies = plugin.get_nsx_policies(context) policies.sort(key=operator.itemgetter('id')) _log_info("NSX service composer policies:", policies, attrs=['id', 'name', 'description']) @admin_utils.output_header def migrate_sg_to_policy(resource, event, trigger, **kwargs): """Change the mode of a security group from rules to NSX policy""" if not kwargs.get('property'): LOG.error("Need to specify security-group-id and policy-id " "parameters") return # input validation properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) sg_id = properties.get('security-group-id') if not sg_id: LOG.error("Need to specify security-group-id parameter") return policy_id = properties.get('policy-id') if not policy_id: LOG.error("Need to specify policy-id parameter") return # validate that the security group exist and contains rules and no policy context_ = n_context.get_admin_context() with utils.NsxVPluginWrapper() as plugin: try: secgroup = plugin.get_security_group(context_, sg_id) except ext_sg.SecurityGroupNotFound: LOG.error("Security group %s was not found", sg_id) return if secgroup.get('policy'): LOG.error("Security group %s already uses a policy", sg_id) return # validate that the policy exists if not plugin.nsx_v.vcns.validate_inventory(policy_id): LOG.error("NSX policy %s was not found", policy_id) return # get the nsx id from the backend nsx_sg_id = nsx_db.get_nsx_security_group_id(context_.session, sg_id, moref=True) if not nsx_sg_id: LOG.error("Did not find security groups %s neutron ID", sg_id) return # Delete the rules from the security group LOG.info("Deleting the rules of security group: %s", sg_id) for rule in secgroup.get('security_group_rules', []): try: plugin.delete_security_group_rule(context_, rule['id']) except Exception as e: LOG.warning("Failed to delete rule %(r)s from security " "group %(sg)s: %(e)s", {'r': rule['id'], 'sg': sg_id, 'e': e}) # continue anyway # Delete the security group FW section LOG.info("Deleting the section of security group: %s", sg_id) try: section_uri = plugin._get_section_uri(context_.session, sg_id) plugin._delete_section(section_uri) nsxv_db.delete_neutron_nsx_section_mapping( context_.session, sg_id) except Exception as e: LOG.warning("Failed to delete firewall section of security " "group %(sg)s: %(e)s", {'sg': sg_id, 'e': e}) # continue anyway # bind this security group to the policy in the backend and DB LOG.info("Binding the NSX security group %(nsx)s to policy " "%(pol)s", {'nsx': nsx_sg_id, 'pol': policy_id}) plugin._update_nsx_security_group_policies( policy_id, None, nsx_sg_id) with context_.session.begin(subtransactions=True): prop = context_.session.query( extended_secgroup.NsxExtendedSecurityGroupProperties).\ filter_by(security_group_id=sg_id).one() prop[sg_policy.POLICY] = policy_id LOG.info("Done.") @admin_utils.output_header def firewall_update_cluster_default_fw_section(resource, event, trigger, **kwargs): with utils.NsxVPluginWrapper() as plugin: plugin._create_cluster_default_fw_section(update_section=True) LOG.info("Cluster default FW section updated.") @admin_utils.output_header def update_security_groups_logging(resource, event, trigger, **kwargs): """Update allowed traffic logging for all neutron security group rules""" errmsg = ("Need to specify log-allowed-traffic property. Add --property " "log-allowed-traffic=true/false") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) log_allowed_str = properties.get('log-allowed-traffic') if not log_allowed_str or log_allowed_str.lower() not in ['true', 'false']: LOG.error("%s", errmsg) return log_allowed = log_allowed_str.lower() == 'true' context = n_context.get_admin_context() with utils.NsxVPluginWrapper() as plugin: vcns = plugin.nsx_v.vcns sg_utils = plugin.nsx_sg_utils # If the section/sg is already logged, then no action is # required. security_groups = plugin.get_security_groups(context) LOG.info("Going to update logging of %s sections", len(security_groups)) for sg in [sg for sg in plugin.get_security_groups(context) if sg.get(sg_logging.LOGGING) is False]: if sg.get(sg_policy.POLICY): # Logging is not relevant with a policy continue section_uri = plugin._get_section_uri(context.session, sg['id']) if section_uri is None: continue # Section/sg is not logged, update rules logging according # to the 'log_security_groups_allowed_traffic' config # option. try: h, c = vcns.get_section(section_uri) section = sg_utils.parse_section(c) section_needs_update = sg_utils.set_rules_logged_option( section, log_allowed) if section_needs_update: vcns.update_section(section_uri, sg_utils.to_xml_string(section), h) except Exception as exc: LOG.error('Unable to update security group %(sg)s ' 'section for logging. %(e)s', {'e': exc, 'sg': sg['id']}) registry.subscribe(update_security_groups_logging, constants.SECURITY_GROUPS, shell.Operations.UPDATE_LOGGING.value) registry.subscribe(migrate_sg_to_policy, constants.SECURITY_GROUPS, shell.Operations.MIGRATE_TO_POLICY.value) registry.subscribe(list_policies, constants.SECURITY_GROUPS, shell.Operations.LIST_POLICIES.value) registry.subscribe(reorder_firewall_sections, constants.FIREWALL_SECTIONS, shell.Operations.NSX_REORDER.value) registry.subscribe(fix_security_groups, constants.FIREWALL_SECTIONS, shell.Operations.NSX_UPDATE.value) registry.subscribe(firewall_update_cluster_default_fw_section, constants.FIREWALL_SECTIONS, shell.Operations.NSX_UPDATE.value) registry.subscribe(list_unused_firewall_sections, constants.FIREWALL_SECTIONS, shell.Operations.LIST_UNUSED.value) registry.subscribe(clean_unused_firewall_sections, constants.FIREWALL_SECTIONS, shell.Operations.NSX_CLEAN.value) registry.subscribe(list_orphaned_firewall_section_rules, constants.ORPHANED_RULES, shell.Operations.LIST.value) registry.subscribe(clean_orphaned_firewall_section_rules, constants.ORPHANED_RULES, shell.Operations.NSX_CLEAN.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/spoofguard_policy.py0000644000175000017500000002553300000000000032372 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters import vmware_nsx.shell.admin.plugins.common.utils as admin_utils import vmware_nsx.shell.admin.plugins.nsxv.resources.utils as utils import vmware_nsx.shell.resources as shell from neutron_lib.callbacks import registry from neutron_lib import exceptions from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import ( vnicindex as ext_vnic_idx) from oslo_log import log as logging LOG = logging.getLogger(__name__) nsxv = utils.get_nsxv_client() def get_spoofguard_policies(): nsxv = utils.get_nsxv_client() return nsxv.get_spoofguard_policies()[1].get("policies") def get_spoofguard_policy_data(policy_id): nsxv = utils.get_nsxv_client() return nsxv.get_spoofguard_policy_data(policy_id)[1].get( 'spoofguardList', []) @admin_utils.output_header def nsx_list_spoofguard_policies(resource, event, trigger, **kwargs): """List spoofguard policies from NSXv backend""" policies = get_spoofguard_policies() LOG.info(formatters.output_formatter(constants.SPOOFGUARD_POLICY, policies, ['policyId', 'name'])) def get_spoofguard_policy_network_mappings(): spgapi = utils.NeutronDbClient() return nsxv_db.get_nsxv_spoofguard_policy_network_mappings( spgapi.context) @admin_utils.output_header def neutron_list_spoofguard_policy_mappings(resource, event, trigger, **kwargs): mappings = get_spoofguard_policy_network_mappings() LOG.info(formatters.output_formatter(constants.SPOOFGUARD_POLICY, mappings, ['network_id', 'policy_id'])) def get_missing_spoofguard_policy_mappings(reverse=None): nsxv_spoofguard_policies = set() for spg in get_spoofguard_policies(): nsxv_spoofguard_policies.add(spg.get('policyId')) neutron_spoofguard_policy_mappings = set() for binding in get_spoofguard_policy_network_mappings(): neutron_spoofguard_policy_mappings.add(binding.policy_id) if reverse: return nsxv_spoofguard_policies - neutron_spoofguard_policy_mappings else: return neutron_spoofguard_policy_mappings - nsxv_spoofguard_policies @admin_utils.output_header def nsx_list_missing_spoofguard_policies(resource, event, trigger, **kwargs): """List missing spoofguard policies on NSXv. Spoofguard policies that have a binding in Neutron Db but there is no policy on NSXv backend to back it. """ props = kwargs.get('property') reverse = True if props and props[0] == 'reverse' else False if reverse: LOG.info("Spoofguard policies on NSXv but not present in " "Neutron Db") else: LOG.info("Spoofguard policies in Neutron Db but not present " "on NSXv") missing_policies = get_missing_spoofguard_policy_mappings(reverse) if not missing_policies: LOG.info("\nNo missing spoofguard policies found." "\nNeutron DB and NSXv backend are in sync\n") else: LOG.info(missing_policies) missing_policies = [{'policy_id': pid} for pid in missing_policies] LOG.info(formatters.output_formatter( constants.SPOOFGUARD_POLICY, missing_policies, ['policy_id'])) def get_port_vnic_id(plugin, port): vnic_idx = port.get(ext_vnic_idx.VNIC_INDEX) device_id = port.get('device_id') return plugin._get_port_vnic_id(vnic_idx, device_id) def nsx_list_mismatch_addresses_for_net(context, plugin, network_id, policy_id): policy_data = get_spoofguard_policy_data(policy_id) missing = [] # Get all neutron compute ports on this network port_filters = {'network_id': [network_id]} neutron_ports = plugin.get_ports(context, filters=port_filters) comp_ports = [port for port in neutron_ports if port.get('device_owner', '').startswith('compute:')] for port in comp_ports: if not port['port_security_enabled']: # This port is not in spoofguard continue error_data = None port_ips = [] for pair in port.get('allowed_address_pairs'): port_ips.append(pair['ip_address']) for fixed in port.get('fixed_ips'): port_ips.append(fixed['ip_address']) if not port_ips: continue port_ips.sort() mac_addr = port['mac_address'] vnic_id = get_port_vnic_id(plugin, port) # look for this port in the spoofguard data found_port = False for spd in policy_data: if spd['id'] == vnic_id: found_port = True actual_ips = spd.get('publishedIpAddress', {}).get('ipAddresses', []) actual_ips.sort() if actual_ips != port_ips: error_data = ('Different IPs (%s/%s)' % ( len(actual_ips), len(port_ips))) elif spd.get('publishedMacAddress') != mac_addr: error_data = ('Different MAC address (%s/%s)' % ( spd.get('publishedMacAddress'), mac_addr)) continue if not found_port: error_data = 'Port missing from SG policy' if error_data: missing.append({'network': network_id, 'policy': policy_id, 'port': port['id'], 'data': error_data}) return missing @admin_utils.output_header def nsx_list_mismatch_addresses(resource, event, trigger, **kwargs): """List missing spoofguard policies approved addresses on NSXv. Address pairs defined on neutron compute ports that are missing from the NSX-V spoofguard policy of a specific/all networks. """ network_id = None if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) network_id = properties.get('network') spgapi = utils.NeutronDbClient() if network_id: policy_id = nsxv_db.get_spoofguard_policy_id( spgapi.context.session, network_id) if not policy_id: LOG.error("Could not find spoofguard policy for neutron network " "%s", network_id) return with utils.NsxVPluginWrapper() as plugin: missing_data = nsx_list_mismatch_addresses_for_net( spgapi.context, plugin, network_id, policy_id) else: with utils.NsxVPluginWrapper() as plugin: missing_data = [] # Go over all the networks with spoofguard policies mappings = get_spoofguard_policy_network_mappings() for entry in mappings: missing_data.extend(nsx_list_mismatch_addresses_for_net( spgapi.context, plugin, entry['network_id'], entry['policy_id'])) if missing_data: LOG.info(formatters.output_formatter( constants.SPOOFGUARD_POLICY, missing_data, ['network', 'policy', 'port', 'data'])) else: LOG.info("No mismatches found.") @admin_utils.output_header def nsx_fix_mismatch_addresses(resource, event, trigger, **kwargs): """Fix missing spoofguard policies approved addresses for a port.""" port_id = None if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) port_id = properties.get('port') if not port_id: usage_msg = ("Need to specify the id of the neutron port. " "Add --property port=") LOG.error(usage_msg) return spgapi = utils.NeutronDbClient() with utils.NsxVPluginWrapper() as plugin: try: port = plugin.get_port(spgapi.context, port_id) except exceptions.PortNotFound: LOG.error("Could not find neutron port %s", port_id) return vnic_id = get_port_vnic_id(plugin, port) plugin._update_vnic_assigned_addresses( spgapi.context.session, port, vnic_id) LOG.info("Done.") def nsx_clean_spoofguard_policy(resource, event, trigger, **kwargs): """Delete spoofguard policy""" errmsg = ("Need to specify policy-id. Add --property " "policy-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) policy_id = properties.get('policy-id') if not policy_id: LOG.error("%s", errmsg) return try: h, c = nsxv.get_spoofguard_policy(policy_id) except exceptions.NeutronException as e: LOG.error("Unable to retrieve policy %(p)s: %(e)s", {'p': policy_id, 'e': str(e)}) else: if not c.get('spoofguardList'): LOG.error("Policy %s does not exist", policy_id) return confirm = admin_utils.query_yes_no( "Do you want to delete spoofguard-policy: %s" % policy_id, default="no") if not confirm: LOG.info("spoofguard-policy deletion aborted by user") return try: nsxv.delete_spoofguard_policy(policy_id) except Exception as e: LOG.error("%s", str(e)) LOG.info('spoofguard-policy successfully deleted.') registry.subscribe(neutron_list_spoofguard_policy_mappings, constants.SPOOFGUARD_POLICY, shell.Operations.LIST.value) registry.subscribe(nsx_list_spoofguard_policies, constants.SPOOFGUARD_POLICY, shell.Operations.LIST.value) registry.subscribe(nsx_list_missing_spoofguard_policies, constants.SPOOFGUARD_POLICY, shell.Operations.LIST.value) registry.subscribe(nsx_list_mismatch_addresses, constants.SPOOFGUARD_POLICY, shell.Operations.LIST_MISMATCHES.value) registry.subscribe(nsx_fix_mismatch_addresses, constants.SPOOFGUARD_POLICY, shell.Operations.FIX_MISMATCH.value) registry.subscribe(nsx_clean_spoofguard_policy, constants.SPOOFGUARD_POLICY, shell.Operations.CLEAN.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv/resources/utils.py0000644000175000017500000001463500000000000030003 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time import mock from oslo_config import cfg from oslo_log import log as logging from neutron_lib import context as neutron_context from neutron_lib.plugins import directory from vmware_nsx.common import config from vmware_nsx.extensions import projectpluginmap from vmware_nsx import plugin from vmware_nsx.plugins.nsx_v.vshield import vcns from vmware_nsx.shell.admin.plugins.common import utils as admin_utils LOG = logging.getLogger(__name__) def get_nsxv_client(): return vcns.Vcns( address=cfg.CONF.nsxv.manager_uri, user=cfg.CONF.nsxv.user, password=cfg.CONF.nsxv.password, ca_file=cfg.CONF.nsxv.ca_file, insecure=cfg.CONF.nsxv.insecure) def get_plugin_filters(context): return admin_utils.get_plugin_filters( context, projectpluginmap.NsxPlugins.NSX_V) class NeutronDbClient(object): def __init__(self): super(NeutronDbClient, self) self.context = neutron_context.get_admin_context() class NsxVPluginWrapper(plugin.NsxVPlugin): def __init__(self): config.register_nsxv_azs(cfg.CONF, cfg.CONF.nsxv.availability_zones) self.context = neutron_context.get_admin_context() self.filters = get_plugin_filters(self.context) super(NsxVPluginWrapper, self).__init__() # Make this the core plugin directory.add_plugin('CORE', self) # finish the plugin initialization # (with md-proxy config, but without housekeeping) with mock.patch("vmware_nsx.plugins.common.housekeeper." "housekeeper.NsxHousekeeper"): self.init_complete(0, 0, 0) def start_rpc_listeners(self): pass def _extend_get_network_dict_provider(self, context, net): self._extend_network_dict_provider(context, net) # skip getting the Qos policy ID because get_object calls # plugin init again on admin-util environment def count_spawn_jobs(self): # check if there are any spawn jobs running return self.edge_manager._get_worker_pool().running() # Define enter & exit to be used in with statements def __enter__(self): return self def __exit__(self, type, value, traceback): """Wait until no more jobs are pending We want to wait until all spawn edge creation are done, or else the edges might be in PERNDING_CREATE state in the nsx DB """ if not self.count_spawn_jobs(): return LOG.warning("Waiting for plugin jobs to finish properly...") sleep_time = 1 print_time = 20 max_loop = 600 for print_index in range(1, max_loop): n_jobs = self.count_spawn_jobs() if n_jobs > 0: if (print_index % print_time) == 0: LOG.warning("Still Waiting on %(jobs)s " "job%(plural)s", {'jobs': n_jobs, 'plural': 's' if n_jobs > 1 else ''}) time.sleep(sleep_time) else: LOG.warning("Done.") return LOG.warning("Sorry. Waited for too long. Some jobs are still " "running.") def _update_filters(self, requested_filters): filters = self.filters.copy() if requested_filters: filters.update(requested_filters) return filters def get_networks(self, context, filters=None, fields=None, filter_project=True): if filter_project: filters = self._update_filters(filters) return super(NsxVPluginWrapper, self).get_networks( context, filters=filters, fields=fields) def get_subnets(self, context, filters=None, fields=None, filter_project=True): if filter_project: filters = self._update_filters(filters) return super(NsxVPluginWrapper, self).get_subnets( context, filters=filters, fields=fields) def get_ports(self, context, filters=None, fields=None, filter_project=True): if filter_project: filters = self._update_filters(filters) return super(NsxVPluginWrapper, self).get_ports( context, filters=filters, fields=fields) def get_routers(self, context, filters=None, fields=None, filter_project=True): if filter_project: filters = self._update_filters(filters) return super(NsxVPluginWrapper, self).get_routers( context, filters=filters, fields=fields) def get_nsxv_backend_edges(): """Get a list of all the backend edges and some of their attributes """ nsxv = get_nsxv_client() edges = nsxv.get_edges() backend_edges = [] for edge in edges: summary = edge.get('appliancesSummary') size = ha = None if summary: size = summary.get('applianceSize') deployed_vms = summary.get('numberOfDeployedVms', 1) ha = 'Enabled' if deployed_vms > 1 else 'Disabled' # get all the relevant backend information for this edge edge_data = { 'id': edge.get('id'), 'name': edge.get('name'), 'size': size, 'type': edge.get('edgeType'), 'ha': ha, } backend_edges.append(edge_data) return backend_edges def get_edge_syslog_info(edge_id): """Get syslog information for specific edge id""" nsxv = get_nsxv_client() syslog_info = nsxv.get_edge_syslog(edge_id)[1] if not syslog_info['enabled']: return 'Disabled' output = "" if 'protocol' in syslog_info: output += syslog_info['protocol'] if 'serverAddresses' in syslog_info: for server_address in syslog_info['serverAddresses']['ipAddress']: output += "\n" + server_address return output ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2262545 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/0000755000175000017500000000000000000000000024331 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/__init__.py0000644000175000017500000000000000000000000026430 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2302547 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/0000755000175000017500000000000000000000000026343 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/__init__.py0000644000175000017500000000000000000000000030442 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/certificates.py0000644000175000017500000000510100000000000031357 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.common import v3_common_cert from vmware_nsx.shell import resources as shell from neutron_lib.callbacks import registry from oslo_config import cfg @admin_utils.output_header def generate_cert(resource, event, trigger, **kwargs): """Generate self signed client certificate and private key """ return v3_common_cert.generate_cert(cfg.CONF.nsx_v3, **kwargs) @admin_utils.output_header def delete_cert(resource, event, trigger, **kwargs): """Delete client certificate and private key """ return v3_common_cert.delete_cert(cfg.CONF.nsx_v3, **kwargs) @admin_utils.output_header def show_cert(resource, event, trigger, **kwargs): """Show client certificate details """ return v3_common_cert.show_cert(cfg.CONF.nsx_v3, **kwargs) @admin_utils.output_header def import_cert(resource, event, trigger, **kwargs): """Import client certificate that was generated externally""" return v3_common_cert.import_cert(cfg.CONF.nsx_v3, **kwargs) @admin_utils.output_header def show_nsx_certs(resource, event, trigger, **kwargs): """Show client certificates associated with openstack identity in NSX""" return v3_common_cert.show_nsx_certs(cfg.CONF.nsx_v3, **kwargs) registry.subscribe(generate_cert, constants.CERTIFICATE, shell.Operations.GENERATE.value) registry.subscribe(show_cert, constants.CERTIFICATE, shell.Operations.SHOW.value) registry.subscribe(delete_cert, constants.CERTIFICATE, shell.Operations.CLEAN.value) registry.subscribe(import_cert, constants.CERTIFICATE, shell.Operations.IMPORT.value) registry.subscribe(show_nsx_certs, constants.CERTIFICATE, shell.Operations.NSX_LIST.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/cluster.py0000644000175000017500000000271700000000000030405 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import registry from oslo_log import log as logging from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) @admin_utils.output_header def find_cluster_managers_ips(resource, event, trigger, **kwargs): """Show the current NSX rate limit.""" nsxlib = utils.get_connected_nsxlib() manager_ips = nsxlib.cluster_nodes.get_managers_ips() LOG.info("NSX Cluster has %s manager nodes:", len(manager_ips)) for ip in manager_ips: LOG.info("%s", str(ip)) registry.subscribe(find_cluster_managers_ips, constants.CLUSTER, shell.Operations.SHOW.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/config.py0000644000175000017500000000265400000000000030171 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import registry from oslo_log import log as logging from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) @admin_utils.output_header def validate_configuration(resource, event, trigger, **kwargs): """Validate the nsxv3 configuration""" try: utils.NsxV3PluginWrapper() except Exception as e: LOG.error("Configuration validation failed: %s", e) else: LOG.info("Configuration validation succeeded") registry.subscribe(validate_configuration, constants.CONFIG, shell.Operations.VALIDATE.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/dhcp_binding.py0000644000175000017500000002176000000000000031333 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from neutron_lib.callbacks import registry from neutron_lib import constants as const from neutron_lib import context as neutron_context from neutron_lib import exceptions from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import utils as nsx_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils import vmware_nsx.shell.resources as shell from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() @admin_utils.output_header def list_dhcp_bindings(resource, event, trigger, **kwargs): """List DHCP bindings in Neutron.""" comp_ports = [port for port in neutron_client.get_ports() if nsx_utils.is_port_dhcp_configurable(port)] LOG.info(formatters.output_formatter(constants.DHCP_BINDING, comp_ports, ['id', 'mac_address', 'fixed_ips'])) @admin_utils.output_header def nsx_update_dhcp_bindings(resource, event, trigger, **kwargs): """Resync DHCP bindings for NSXv3 CrossHairs.""" nsxlib = utils.get_connected_nsxlib() nsx_version = nsxlib.get_version() if not nsx_utils.is_nsx_version_1_1_0(nsx_version): LOG.error("This utility is not available for NSX version %s", nsx_version) return dhcp_profile_uuid = None # TODO(asarfaty) Add availability zones support here if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) dhcp_profile_uuid = properties.get('dhcp_profile_uuid') if not dhcp_profile_uuid: LOG.error("dhcp_profile_uuid is not defined") return cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') cfg.CONF.set_override('dhcp_profile', dhcp_profile_uuid, 'nsx_v3') port_bindings = {} # lswitch_id: [(port_id, mac, ip), ...] server_bindings = {} # lswitch_id: dhcp_server_id ports = neutron_client.get_ports() for port in ports: device_owner = port['device_owner'] if (device_owner != const.DEVICE_OWNER_DHCP and not nsx_utils.is_port_dhcp_configurable(port)): continue for fixed_ip in port['fixed_ips']: if netaddr.IPNetwork(fixed_ip['ip_address']).version == 6: continue network_id = port['network_id'] subnet = neutron_client.get_subnet(None, fixed_ip['subnet_id']) if device_owner == const.DEVICE_OWNER_DHCP: # For each DHCP-enabled network, create a logical DHCP server # and update the attachment type to DHCP on the corresponding # logical port of the Neutron DHCP port. network = neutron_client.get_network(None, port['network_id']) net_tags = nsxlib.build_v3_tags_payload( network, resource_type='os-neutron-net-id', project_name='admin') # TODO(asarfaty): add default_dns_nameservers & dns_domain # from availability zone server_data = nsxlib.native_dhcp.build_server_config( network, subnet, port, net_tags) server_data['dhcp_profile_id'] = dhcp_profile_uuid dhcp_server = nsxlib.dhcp_server.create(**server_data) LOG.info("Created logical DHCP server %(server)s for " "network %(network)s", {'server': dhcp_server['id'], 'network': port['network_id']}) # Add DHCP service binding in neutron DB. neutron_client.add_dhcp_service_binding( network['id'], port['id'], dhcp_server['id']) # Update logical port for DHCP purpose. lswitch_id, lport_id = ( neutron_client.get_lswitch_and_lport_id(port['id'])) nsxlib.logical_port.update( lport_id, dhcp_server['id'], attachment_type=nsx_constants.ATTACHMENT_DHCP) server_bindings[lswitch_id] = dhcp_server['id'] LOG.info("Updated DHCP logical port %(port)s for " "network %(network)s", {'port': lport_id, 'network': port['network_id']}) elif subnet['enable_dhcp']: # Store (mac, ip) binding of each compute port in a # DHCP-enabled subnet. lswitch_id = neutron_client.net_id_to_lswitch_id(network_id) bindings = port_bindings.get(lswitch_id, []) bindings.append((port['id'], port['mac_address'], fixed_ip['ip_address'], fixed_ip['subnet_id'])) port_bindings[lswitch_id] = bindings break # process only the first IPv4 address # Populate mac/IP bindings in each logical DHCP server. for lswitch_id, bindings in port_bindings.items(): dhcp_server_id = server_bindings.get(lswitch_id) if not dhcp_server_id: continue for (port_id, mac, ip, subnet_id) in bindings: hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip}]}} subnet = neutron_client.get_subnet(None, subnet_id) binding = nsxlib.dhcp_server.create_binding( dhcp_server_id, mac, ip, hostname, cfg.CONF.nsx_v3.dhcp_lease_time, options, subnet.get('gateway_ip')) # Add DHCP static binding in neutron DB. neutron_client.add_dhcp_static_binding( port_id, subnet_id, ip, dhcp_server_id, binding['id']) LOG.info("Added DHCP binding (mac: %(mac)s, ip: %(ip)s) " "for neutron port %(port)s", {'mac': mac, 'ip': ip, 'port': port_id}) @admin_utils.output_header def nsx_recreate_dhcp_server(resource, event, trigger, **kwargs): """Recreate DHCP server & binding for a neutron network""" if not cfg.CONF.nsx_v3.native_dhcp_metadata: LOG.error("Native DHCP is disabled.") return errmsg = ("Need to specify net-id property. Add --property net-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) net_id = properties.get('net-id') if not net_id: LOG.error("%s", errmsg) return context = neutron_context.get_admin_context() with utils.NsxV3PluginWrapper() as plugin: # verify that this is an existing network with dhcp enabled try: network = plugin._get_network(context, net_id) except exceptions.NetworkNotFound: LOG.error("Network %s was not found", net_id) return if plugin._has_no_dhcp_enabled_subnet(context, network): LOG.error("Network %s has no DHCP enabled subnet", net_id) return dhcp_relay = plugin.get_network_az_by_net_id( context, net_id).dhcp_relay_service if dhcp_relay: LOG.error("Native DHCP should not be enabled with dhcp relay") return # find the dhcp subnet of this network subnet_id = None for subnet in network.subnets: if subnet.enable_dhcp: subnet_id = subnet.id break if not subnet_id: LOG.error("Network %s has no DHCP enabled subnet", net_id) return dhcp_subnet = plugin.get_subnet(context, subnet_id) # disable and re-enable the dhcp plugin._enable_native_dhcp(context, network, dhcp_subnet) LOG.info("Done.") registry.subscribe(list_dhcp_bindings, constants.DHCP_BINDING, shell.Operations.LIST.value) registry.subscribe(nsx_update_dhcp_bindings, constants.DHCP_BINDING, shell.Operations.NSX_UPDATE.value) registry.subscribe(nsx_recreate_dhcp_server, constants.DHCP_BINDING, shell.Operations.NSX_RECREATE.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/dhcp_servers.py0000644000175000017500000001053300000000000031406 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import registry from neutron_lib import context from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import utils as nsx_utils from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils import vmware_nsx.shell.resources as shell LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() def _get_dhcp_profile_uuid(**kwargs): if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) dhcp_profile_uuid = properties.get('dhcp_profile_uuid') if dhcp_profile_uuid: return dhcp_profile_uuid nsxlib = utils.get_connected_nsxlib() if cfg.CONF.nsx_v3.dhcp_profile: return nsxlib.native_dhcp_profile.get_id_by_name_or_id( cfg.CONF.nsx_v3.dhcp_profile) @admin_utils.output_header def nsx_list_orphaned_dhcp_servers(resource, event, trigger, **kwargs): """List logical DHCP servers without associated DHCP-enabled subnet.""" nsxlib = utils.get_connected_nsxlib() nsx_version = nsxlib.get_version() if not nsx_utils.is_nsx_version_1_1_0(nsx_version): LOG.error("This utility is not available for NSX version %s", nsx_version) return dhcp_profile_uuid = _get_dhcp_profile_uuid(**kwargs) if not dhcp_profile_uuid: LOG.error("dhcp_profile_uuid is not defined") return orphaned_servers = v3_utils.get_orphaned_dhcp_servers( context.get_admin_context(), neutron_client, nsxlib, dhcp_profile_uuid) LOG.info(formatters.output_formatter( constants.ORPHANED_DHCP_SERVERS, orphaned_servers, ['id', 'neutron_net_id', 'display_name'])) @admin_utils.output_header def nsx_clean_orphaned_dhcp_servers(resource, event, trigger, **kwargs): """Remove logical DHCP servers without associated DHCP-enabled subnet.""" # For each orphaned DHCP server, # (1) delete the attached logical DHCP port, # (2) delete the logical DHCP server, # (3) clean corresponding neutron DB entry. nsxlib = utils.get_connected_nsxlib() nsx_version = nsxlib.get_version() if not nsx_utils.is_nsx_version_1_1_0(nsx_version): LOG.error("This utility is not available for NSX version %s", nsx_version) return dhcp_profile_uuid = _get_dhcp_profile_uuid(**kwargs) if not dhcp_profile_uuid: LOG.error("dhcp_profile_uuid is not defined") return cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') cfg.CONF.set_override('dhcp_profile', dhcp_profile_uuid, 'nsx_v3') orphaned_servers = v3_utils.get_orphaned_dhcp_servers( context.get_admin_context(), neutron_client, nsxlib, dhcp_profile_uuid) for server in orphaned_servers: success, error = v3_utils.delete_orphaned_dhcp_server( context.get_admin_context(), nsxlib, server) if success: LOG.info("Removed orphaned DHCP server %s", server['id']) else: LOG.error("Failed to clean orphaned DHCP server %(id)s. " "Exception: %(e)s", {'id': server['id'], 'e': error}) registry.subscribe(nsx_list_orphaned_dhcp_servers, constants.ORPHANED_DHCP_SERVERS, shell.Operations.NSX_LIST.value) registry.subscribe(nsx_clean_orphaned_dhcp_servers, constants.ORPHANED_DHCP_SERVERS, shell.Operations.NSX_CLEAN.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/http_service.py0000644000175000017500000000443100000000000031416 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.callbacks import registry from oslo_log import log as logging from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils import vmware_nsx.shell.resources as shell LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() @admin_utils.output_header def nsx_rate_limit_show(resource, event, trigger, **kwargs): """Show the current NSX rate limit.""" nsxlib = utils.get_connected_nsxlib() rate_limit = nsxlib.http_services.get_rate_limit() LOG.info("Current NSX rate limit is %s", rate_limit) @admin_utils.output_header def nsx_rate_limit_update(resource, event, trigger, **kwargs): """Set the NSX rate limit The default value is 40. 0 means no limit """ nsxlib = utils.get_connected_nsxlib() rate_limit = None if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) rate_limit = properties.get('value', None) if rate_limit is None or not rate_limit.isdigit(): usage = ("nsxadmin -r rate-limit -o nsx-update " "--property value=") LOG.error("Missing parameters. Usage: %s", usage) return nsxlib.http_services.update_rate_limit(rate_limit) LOG.info("NSX rate limit was updated to %s", rate_limit) registry.subscribe(nsx_rate_limit_show, constants.RATE_LIMIT, shell.Operations.SHOW.value) registry.subscribe(nsx_rate_limit_update, constants.RATE_LIMIT, shell.Operations.NSX_UPDATE.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/loadbalancer.py0000644000175000017500000001146000000000000031326 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from neutron_lib.callbacks import registry from neutron_lib import context as neutron_context from vmware_nsx.db import db as nsx_db from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils from vmware_nsx.shell import resources as shell LOG = logging.getLogger(__name__) @admin_utils.list_handler(constants.LB_SERVICES) @admin_utils.output_header def nsx_list_lb_services(resource, event, trigger, **kwargs): """List LB services on NSX backend""" nsxlib = utils.get_connected_nsxlib() lb_services = nsxlib.load_balancer.service.list() LOG.info(formatters.output_formatter( constants.LB_SERVICES, lb_services['results'], ['display_name', 'id', 'virtual_server_ids', 'attachment'])) return bool(lb_services) @admin_utils.list_handler(constants.LB_VIRTUAL_SERVERS) @admin_utils.output_header def nsx_list_lb_virtual_servers(resource, event, trigger, **kwargs): """List LB virtual servers on NSX backend""" nsxlib = utils.get_connected_nsxlib() lb_virtual_servers = nsxlib.load_balancer.virtual_server.list() LOG.info(formatters.output_formatter( constants.LB_VIRTUAL_SERVERS, lb_virtual_servers['results'], ['display_name', 'id', 'ip_address', 'pool_id'])) return bool(lb_virtual_servers) @admin_utils.list_handler(constants.LB_POOLS) @admin_utils.output_header def nsx_list_lb_pools(resource, event, trigger, **kwargs): nsxlib = utils.get_connected_nsxlib() lb_pools = nsxlib.load_balancer.pool.list() LOG.info(formatters.output_formatter( constants.LB_POOLS, lb_pools['results'], ['display_name', 'id', 'active_monitor_ids', 'members'])) return bool(lb_pools) @admin_utils.list_handler(constants.LB_MONITORS) @admin_utils.output_header def nsx_list_lb_monitors(resource, event, trigger, **kwargs): nsxlib = utils.get_connected_nsxlib() lb_monitors = nsxlib.load_balancer.monitor.list() LOG.info(formatters.output_formatter( constants.LB_MONITORS, lb_monitors['results'], ['display_name', 'id', 'resource_type'])) return bool(lb_monitors) @admin_utils.output_header def nsx_update_router_lb_advertisement(resource, event, trigger, **kwargs): """The implementation of the VIP advertisement changed. This utility will update existing LB/routers """ nsxlib = utils.get_connected_nsxlib() # Get the list of neutron routers used by LB lb_services = nsxlib.load_balancer.service.list()['results'] lb_routers = [] for lb_srv in lb_services: for tag in lb_srv.get('tags', []): if tag['scope'] == 'os-neutron-router-id': lb_routers.append(tag['tag']) lb_routers = set(lb_routers) LOG.info("Going to update LB advertisement on %(num)s router(s): " "%(routers)s", {'num': len(lb_routers), 'routers': lb_routers}) context = neutron_context.get_admin_context() with utils.NsxV3PluginWrapper() as plugin: for rtr_id in lb_routers: nsx_router_id = nsx_db.get_nsx_router_id(context.session, rtr_id) if not nsx_router_id: LOG.error("Router %s NSX Id was not found.", rtr_id) continue try: # disable the global vip advertisement flag plugin.nsxlib.logical_router.update_advertisement( nsx_router_id, advertise_lb_vip=False) # Add an advertisement rule for the external network router = plugin.get_router(context, rtr_id) lb_utils.update_router_lb_vip_advertisement( context, plugin, router, nsx_router_id) except Exception as e: LOG.error("Failed updating router %(id)s: %(e)s", {'id': rtr_id, 'e': e}) LOG.info("Done.") registry.subscribe(nsx_update_router_lb_advertisement, constants.LB_ADVERTISEMENT, shell.Operations.NSX_UPDATE.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/metadata_proxy.py0000644000175000017500000002256100000000000031744 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import netaddr from neutron_lib.callbacks import registry from neutron_lib import constants as const from oslo_config import cfg from oslo_log import log as logging from vmware_nsx.common import config # noqa from vmware_nsx.common import utils as nsx_utils from vmware_nsx.dhcp_meta import rpc as nsx_rpc from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils import vmware_nsx.shell.resources as shell from vmware_nsxlib.v3 import exceptions as nsx_exc from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() def _is_metadata_network(network): # If a Neutron network has only one subnet with 169.254.169.252/30 CIDR, # then it is an internal metadata network. if len(network['subnets']) == 1: subnet = neutron_client.get_subnet(None, network['subnets'][0]) if subnet['cidr'] == nsx_rpc.METADATA_SUBNET_CIDR: return True return False @admin_utils.output_header def list_metadata_networks(resource, event, trigger, **kwargs): """List Metadata networks in Neutron.""" if not cfg.CONF.nsx_v3.native_metadata_route: meta_networks = [network for network in neutron_client.get_networks() if _is_metadata_network(network)] LOG.info(formatters.output_formatter(constants.METADATA_PROXY, meta_networks, ['id', 'name', 'subnets'])) else: nsxlib = utils.get_connected_nsxlib() tags = [{'scope': 'os-neutron-net-id'}] ports = nsxlib.search_by_tags(resource_type='LogicalPort', tags=tags) for port in ports['results']: if port['attachment']['attachment_type'] == 'METADATA_PROXY': net_id = None for tag in port.get('tags', []): if tag['scope'] == 'os-neutron-net-id': net_id = tag['tag'] break status = nsxlib.native_md_proxy.get_md_proxy_status( port['attachment']['id'], port['logical_switch_id']) LOG.info("Status for MD proxy on neutron network %s (logical " "switch %s) is %s", net_id, port['logical_switch_id'], status.get('proxy_status', 'Unknown')) @admin_utils.output_header def nsx_update_metadata_proxy(resource, event, trigger, **kwargs): """Update Metadata proxy for NSXv3 CrossHairs.""" nsxlib = utils.get_connected_nsxlib() nsx_version = nsxlib.get_version() if not nsx_utils.is_nsx_version_1_1_0(nsx_version): LOG.error("This utility is not available for NSX version %s", nsx_version) return metadata_proxy_uuid = None if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) metadata_proxy_uuid = properties.get('metadata_proxy_uuid') if not metadata_proxy_uuid: LOG.error("metadata_proxy_uuid is not defined") return cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') cfg.CONF.set_override('metadata_proxy', metadata_proxy_uuid, 'nsx_v3') with utils.NsxV3PluginWrapper() as plugin: # For each Neutron network, check if it is an internal metadata # network. # If yes, delete the network and associated router interface. # Otherwise, create a logical switch port with MD-Proxy attachment. for network in neutron_client.get_networks(): if _is_metadata_network(network): # It is a metadata network, find the attached router, # remove the router interface and the network. filters = {'device_owner': const.ROUTER_INTERFACE_OWNERS, 'fixed_ips': { 'subnet_id': [network['subnets'][0]], 'ip_address': [nsx_rpc.METADATA_GATEWAY_IP]}} ports = neutron_client.get_ports(filters=filters) if not ports: continue router_id = ports[0]['device_id'] interface = {'subnet_id': network['subnets'][0]} plugin.remove_router_interface(None, router_id, interface) LOG.info("Removed metadata interface on router %s", router_id) plugin.delete_network(None, network['id']) LOG.info("Removed metadata network %s", network['id']) else: lswitch_id = neutron_client.net_id_to_lswitch_id( network['id']) if not lswitch_id: continue tags = nsxlib.build_v3_tags_payload( network, resource_type='os-neutron-net-id', project_name='admin') name = plugin._get_mdproxy_port_name(network['name'], network['id']) # check if this logical port already exists existing_ports = nsxlib.logical_port.find_by_display_name( name) if not existing_ports: # create a new port with the md-proxy nsxlib.logical_port.create( lswitch_id, metadata_proxy_uuid, tags=tags, name=name, attachment_type=nsx_constants.ATTACHMENT_MDPROXY) LOG.info("Enabled native metadata proxy for network %s", network['id']) else: # update the MDproxy of this port port = existing_ports[0] nsxlib.logical_port.update( port['id'], metadata_proxy_uuid, attachment_type=nsx_constants.ATTACHMENT_MDPROXY) LOG.info("Updated native metadata proxy for network %s", network['id']) @admin_utils.output_header def nsx_update_metadata_proxy_server_ip(resource, event, trigger, **kwargs): """Update Metadata proxy server ip on the nsx.""" nsxlib = utils.get_connected_nsxlib() nsx_version = nsxlib.get_version() if not nsx_utils.is_nsx_version_1_1_0(nsx_version): LOG.error("This utility is not available for NSX version %s", nsx_version) return server_ip = None az_name = nsx_az.DEFAULT_NAME if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) server_ip = properties.get('server-ip') az_name = properties.get('availability-zone', az_name) if not server_ip or not netaddr.valid_ipv4(server_ip): LOG.error("Need to specify a valid server-ip parameter") return config.register_nsxv3_azs(cfg.CONF, cfg.CONF.nsx_v3.availability_zones) if (az_name != nsx_az.DEFAULT_NAME and az_name not in cfg.CONF.nsx_v3.availability_zones): LOG.error("Availability zone %s was not found in the configuration", az_name) return az = nsx_az.NsxV3AvailabilityZones().get_availability_zone(az_name) az.translate_configured_names_to_uuids(nsxlib) if (not az.metadata_proxy or not cfg.CONF.nsx_v3.native_dhcp_metadata): LOG.error("Native DHCP metadata is not enabled in the configuration " "of availability zone %s", az_name) return metadata_proxy_uuid = az._native_md_proxy_uuid try: mdproxy = nsxlib.native_md_proxy.get(metadata_proxy_uuid) except nsx_exc.ResourceNotFound: LOG.error("metadata proxy %s not found", metadata_proxy_uuid) return # update the IP in the URL url = mdproxy.get('metadata_server_url') url = re.sub(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', server_ip, url) LOG.info("Updating the URL of the metadata proxy server %(uuid)s to " "%(url)s", {'uuid': metadata_proxy_uuid, 'url': url}) nsxlib.native_md_proxy.update(metadata_proxy_uuid, server_url=url) LOG.info("Done.") registry.subscribe(list_metadata_networks, constants.METADATA_PROXY, shell.Operations.LIST.value) registry.subscribe(nsx_update_metadata_proxy, constants.METADATA_PROXY, shell.Operations.NSX_UPDATE.value) registry.subscribe(nsx_update_metadata_proxy_server_ip, constants.METADATA_PROXY, shell.Operations.NSX_UPDATE_IP.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/networks.py0000644000175000017500000001211000000000000030564 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from vmware_nsx.db import db as nsx_db from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import exceptions as nsx_exc from neutron.db import db_base_plugin_v2 from neutron_lib.callbacks import registry from neutron_lib import context as neutron_context from oslo_log import log as logging LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() def get_network_nsx_id(context, neutron_id): # get the nsx switch id from the DB mapping mappings = nsx_db.get_nsx_switch_ids(context.session, neutron_id) if mappings and len(mappings) > 0: return mappings[0] @admin_utils.output_header def list_missing_networks(resource, event, trigger, **kwargs): """List neutron networks that are missing the NSX backend network """ nsxlib = utils.get_connected_nsxlib() plugin = db_base_plugin_v2.NeutronDbPluginV2() admin_cxt = neutron_context.get_admin_context() filters = utils.get_plugin_filters(admin_cxt) neutron_networks = plugin.get_networks(admin_cxt, filters=filters) networks = [] for net in neutron_networks: neutron_id = net['id'] # get the network nsx id from the mapping table nsx_id = get_network_nsx_id(admin_cxt, neutron_id) if not nsx_id: # skip external networks pass else: try: nsxlib.logical_switch.get(nsx_id) except nsx_exc.ResourceNotFound: networks.append({'name': net['name'], 'neutron_id': neutron_id, 'nsx_id': nsx_id}) if len(networks) > 0: title = ("Found %d internal networks missing from the NSX " "manager:") % len(networks) LOG.info(formatters.output_formatter( title, networks, ['name', 'neutron_id', 'nsx_id'])) else: LOG.info("All internal networks exist on the NSX manager") @admin_utils.output_header def list_orphaned_networks(resource, event, trigger, **kwargs): nsxlib = utils.get_connected_nsxlib() admin_cxt = neutron_context.get_admin_context() missing_networks = v3_utils.get_orphaned_networks(admin_cxt, nsxlib) LOG.info(formatters.output_formatter(constants.ORPHANED_NETWORKS, missing_networks, ['id', 'display_name'])) @admin_utils.output_header def delete_backend_network(resource, event, trigger, **kwargs): errmsg = ("Need to specify nsx-id property. Add --property nsx-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) nsx_id = properties.get('nsx-id') if not nsx_id: LOG.error("%s", errmsg) return nsxlib = utils.get_connected_nsxlib() # check if the network exists try: nsxlib.logical_switch.get(nsx_id, silent=True) except nsx_exc.ResourceNotFound: # prevent logger from logging this exception sys.exc_clear() LOG.warning("Backend network %s was not found.", nsx_id) return # try to delete it try: nsxlib.logical_switch.delete(nsx_id) except Exception as e: LOG.error("Failed to delete backend network %(id)s : %(e)s.", { 'id': nsx_id, 'e': e}) return # Verify that the network was deleted since the backend does not always # through errors try: nsxlib.logical_switch.get(nsx_id, silent=True) except nsx_exc.ResourceNotFound: # prevent logger from logging this exception sys.exc_clear() LOG.info("Backend network %s was deleted.", nsx_id) else: LOG.error("Failed to delete backend network %s.", nsx_id) registry.subscribe(list_missing_networks, constants.NETWORKS, shell.Operations.LIST_MISMATCHES.value) registry.subscribe(list_orphaned_networks, constants.ORPHANED_NETWORKS, shell.Operations.LIST.value) registry.subscribe(delete_backend_network, constants.ORPHANED_NETWORKS, shell.Operations.NSX_CLEAN.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/ports.py0000644000175000017500000003016100000000000030065 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging from vmware_nsx.common import utils as nsx_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.dvs import dvs from vmware_nsx.plugins.nsx_v3 import plugin from vmware_nsx.plugins.nsx_v3 import utils as plugin_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as v3_utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import exceptions as nsx_exc from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts from vmware_nsxlib.v3 import resources from vmware_nsxlib.v3 import security from neutron.db import allowedaddresspairs_db as addr_pair_db from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron.db import portsecurity_db from neutron_lib.callbacks import registry from neutron_lib import constants as const from neutron_lib import context as neutron_context from neutron_lib.plugins import constants as plugin_constants from neutron_lib.plugins import directory LOG = logging.getLogger(__name__) class PortsPlugin(db_base_plugin_v2.NeutronDbPluginV2, portsecurity_db.PortSecurityDbMixin, addr_pair_db.AllowedAddressPairsMixin): def __enter__(self): directory.add_plugin(plugin_constants.CORE, self) return self def __exit__(self, exc_type, exc_value, traceback): directory.add_plugin(plugin_constants.CORE, None) def get_network_nsx_id(session, neutron_id): # get the nsx switch id from the DB mapping mappings = nsx_db.get_nsx_switch_ids(session, neutron_id) if not mappings or len(mappings) == 0: LOG.debug("Unable to find NSX mappings for neutron " "network %s.", neutron_id) # fallback in case we didn't find the id in the db mapping # This should not happen, but added here in case the network was # created before this code was added. return neutron_id else: return mappings[0] @admin_utils.output_header def list_missing_ports(resource, event, trigger, **kwargs): """List neutron ports that are missing the NSX backend port And ports with wrong switch profiles or bindings """ admin_cxt = neutron_context.get_admin_context() filters = v3_utils.get_plugin_filters(admin_cxt) nsxlib = v3_utils.get_connected_nsxlib() with v3_utils.NsxV3PluginWrapper() as plugin: problems = plugin_utils.get_mismatch_logical_ports( admin_cxt, nsxlib, plugin, filters) if len(problems) > 0: title = ("Found internal ports misconfiguration on the " "NSX manager:") LOG.info(formatters.output_formatter( title, problems, ['neutron_id', 'nsx_id', 'error'])) else: LOG.info("All internal ports verified on the NSX manager") def get_vm_network_device(vm_mng, vm_moref, mac_address): """Return the network device with MAC 'mac_address'. This code was inspired by Nova vif.get_network_device """ hardware_devices = vm_mng.get_vm_interfaces_info(vm_moref) if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if hasattr(device, 'macAddress'): if device.macAddress == mac_address: return device def migrate_compute_ports_vms(resource, event, trigger, **kwargs): """Update the VMs ports on the backend after migrating nsx-v -> nsx-v3 After using api_replay to migrate the neutron data from NSX-V to NSX-T we need to update the VM ports to use OpaqueNetwork instead of DistributedVirtualPortgroup """ # Connect to the DVS manager, using the configuration parameters try: vm_mng = dvs.VMManager() except Exception as e: LOG.error("Cannot connect to the DVS: Please update the [dvs] " "section in the nsx.ini file: %s", e) return port_filters = {} if kwargs.get('property'): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) project = properties.get('project-id') if project: port_filters['project_id'] = [project] net_name = properties.get('net-name', 'VM Network') LOG.info("Common network name for migration %s", net_name) host_moref = properties.get('host-moref') # TODO(garyk): We can explore the option of passing the cluster # moref then this will remove the need for the host-moref and the # resource pool moref. respool_moref = properties.get('respool-moref') datastore_moref = properties.get('datastore-moref') if not host_moref: LOG.error("Unable to migrate with no host") return # Go over all the ports from the plugin admin_cxt = neutron_context.get_admin_context() with PortsPlugin() as plugin: neutron_ports = plugin.get_ports(admin_cxt, filters=port_filters) for port in neutron_ports: # skip non compute ports if (not port.get('device_owner').startswith( const.DEVICE_OWNER_COMPUTE_PREFIX)): continue device_id = port.get('device_id') # get the vm moref & spec from the DVS vm_moref = vm_mng.get_vm_moref_obj(device_id) vm_spec = vm_mng.get_vm_spec(vm_moref) if not vm_spec: LOG.error("Failed to get the spec of vm %s", device_id) continue # Go over the VM interfaces and check if it should be updated update_spec = False for prop in vm_spec.propSet: if (prop.name == 'network' and hasattr(prop.val, 'ManagedObjectReference')): for net in prop.val.ManagedObjectReference: if (net._type == 'DistributedVirtualPortgroup' or net._type == 'Network'): update_spec = True if not update_spec: LOG.info("No need to update the spec of vm %s", device_id) continue device = get_vm_network_device(vm_mng, vm_moref, port['mac_address']) if device is None: LOG.warning("No device with MAC address %s exists on the VM", port['mac_address']) continue # Update interface to be common network devices = [vm_mng.update_vm_network(device, name=net_name)] LOG.info("Update instance %s to common network", device_id) vm_mng.update_vm_interface(vm_moref, devices=devices) LOG.info("Migrate instance %s to host %s", device_id, host_moref) vm_mng.relocate_vm(vm_moref, host_moref=host_moref, datastore_moref=datastore_moref, respool_moref=respool_moref) LOG.info("Update instance %s to opaque network", device_id) device = get_vm_network_device(vm_mng, vm_moref, port['mac_address']) vif_info = {'nsx_id': get_network_nsx_id(admin_cxt.session, port['network_id']), 'iface_id': port['id']} devices = [vm_mng.update_vm_opaque_spec(vif_info, device)] vm_mng.update_vm_interface(vm_moref, devices=devices) LOG.info("Instance %s successfully migrated!", device_id) def migrate_exclude_ports(resource, event, trigger, **kwargs): _nsx_client = v3_utils.get_nsxv3_client() nsxlib = v3_utils.get_connected_nsxlib() version = nsxlib.get_version() if not nsx_utils.is_nsx_version_2_0_0(version): LOG.info("Migration only supported from 2.0 onwards") LOG.info("Version is %s", version) return admin_cxt = neutron_context.get_admin_context() plugin = PortsPlugin() _port_client = resources.LogicalPort(_nsx_client) exclude_list = nsxlib.firewall_section.get_excludelist() for member in exclude_list['members']: if member['target_type'] == 'LogicalPort': port_id = member['target_id'] # Get port try: nsx_port = _port_client.get(port_id) except nsx_exc.ResourceNotFound: LOG.info("Port %s not found", port_id) continue # Validate its a neutron port is_neutron_port = False for tag in nsx_port.get('tags', []): if tag['scope'] == 'os-neutron-port-id': is_neutron_port = True neutron_port_id = tag['tag'] break if not is_neutron_port: LOG.info("Port %s is not a neutron port", port_id) continue # Check if this port exists in the DB try: plugin.get_port(admin_cxt, neutron_port_id) except Exception: LOG.info("Port %s is not defined in DB", neutron_port_id) continue # Update tag for the port tags_update = [{'scope': security.PORT_SG_SCOPE, 'tag': nsxlib_consts.EXCLUDE_PORT}] _port_client.update(port_id, None, tags_update=tags_update) # Remove port from the exclude list nsxlib.firewall_section.remove_member_from_fw_exclude_list( port_id, nsxlib_consts.TARGET_TYPE_LOGICAL_PORT) LOG.info("Port %s successfully updated", port_id) def tag_default_ports(resource, event, trigger, **kwargs): nsxlib = v3_utils.get_connected_nsxlib() admin_cxt = neutron_context.get_admin_context() filters = v3_utils.get_plugin_filters(admin_cxt) # the plugin creation below will create the NS group and update the default # OS section to have the correct applied to group with v3_utils.NsxV3PluginWrapper() as _plugin: neutron_ports = _plugin.get_ports(admin_cxt, filters=filters) for port in neutron_ports: neutron_id = port['id'] # get the network nsx id from the mapping table nsx_id = plugin_utils.get_port_nsx_id(admin_cxt.session, neutron_id) if not nsx_id: continue device_owner = port['device_owner'] if (device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF or device_owner == const.DEVICE_OWNER_DHCP): continue ps = _plugin._get_port_security_binding(admin_cxt, neutron_id) if not ps: continue try: nsx_port = nsxlib.logical_port.get(nsx_id) except nsx_exc.ResourceNotFound: continue tags_update = nsx_port['tags'] tags_update += [{'scope': security.PORT_SG_SCOPE, 'tag': plugin.NSX_V3_DEFAULT_SECTION}] nsxlib.logical_port.update(nsx_id, None, tags_update=tags_update) registry.subscribe(list_missing_ports, constants.PORTS, shell.Operations.LIST_MISMATCHES.value) registry.subscribe(migrate_compute_ports_vms, constants.PORTS, shell.Operations.NSX_MIGRATE_V_V3.value) registry.subscribe(migrate_exclude_ports, constants.PORTS, shell.Operations.NSX_MIGRATE_EXCLUDE_PORTS.value) registry.subscribe(tag_default_ports, constants.PORTS, shell.Operations.NSX_TAG_DEFAULT.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/routers.py0000644000175000017500000003371200000000000030426 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from neutron.db import db_base_plugin_v2 from neutron.db import l3_db from neutron_lib.callbacks import registry from neutron_lib import context as neutron_context from oslo_log import log as logging from vmware_nsx.common import utils as nsx_utils from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsx_models from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import exceptions as nsx_exc from vmware_nsxlib.v3 import nsx_constants LOG = logging.getLogger(__name__) neutron_client = utils.NeutronDbClient() class RoutersPlugin(db_base_plugin_v2.NeutronDbPluginV2, l3_db.L3_NAT_db_mixin): pass @admin_utils.output_header def list_missing_routers(resource, event, trigger, **kwargs): """List neutron routers that are missing the NSX backend router """ nsxlib = utils.get_connected_nsxlib() plugin = RoutersPlugin() admin_cxt = neutron_context.get_admin_context() filters = utils.get_plugin_filters(admin_cxt) neutron_routers = plugin.get_routers(admin_cxt, filters=filters) routers = [] for router in neutron_routers: neutron_id = router['id'] # get the router nsx id from the mapping table nsx_id = nsx_db.get_nsx_router_id(admin_cxt.session, neutron_id) if not nsx_id: routers.append({'name': router['name'], 'neutron_id': neutron_id, 'nsx_id': None}) else: try: nsxlib.logical_router.get(nsx_id) except nsx_exc.ResourceNotFound: routers.append({'name': router['name'], 'neutron_id': neutron_id, 'nsx_id': nsx_id}) if len(routers) > 0: title = ("Found %d routers missing from the NSX " "manager:") % len(routers) LOG.info(formatters.output_formatter( title, routers, ['name', 'neutron_id', 'nsx_id'])) else: LOG.info("All routers exist on the NSX manager") @admin_utils.output_header def update_nat_rules(resource, event, trigger, **kwargs): """Update all routers NAT rules to not bypass the firewall""" # This feature is supported only since nsx version 2 nsxlib = utils.get_connected_nsxlib() version = nsxlib.get_version() if not nsx_utils.is_nsx_version_2_0_0(version): LOG.info("NAT rules update only supported from 2.0 onwards") LOG.info("Version is %s", version) return # Go over all neutron routers plugin = RoutersPlugin() admin_cxt = neutron_context.get_admin_context() filters = utils.get_plugin_filters(admin_cxt) neutron_routers = plugin.get_routers(admin_cxt, filters=filters) num_of_updates = 0 for router in neutron_routers: neutron_id = router['id'] # get the router nsx id from the mapping table nsx_id = nsx_db.get_nsx_router_id(admin_cxt.session, neutron_id) if nsx_id: # get all NAT rules: rules = nsxlib.logical_router.list_nat_rules(nsx_id)['results'] for rule in rules: if rule['action'] not in ["NO_SNAT", "NO_DNAT", "NO_NAT"]: if 'nat_pass' not in rule or rule['nat_pass']: nsxlib.logical_router.update_nat_rule( nsx_id, rule['id'], nat_pass=False) num_of_updates = num_of_updates + 1 if num_of_updates: LOG.info("Done updating %s NAT rules", num_of_updates) else: LOG.info("Did not find any NAT rule to update") @admin_utils.output_header def update_enable_standby_relocation(resource, event, trigger, **kwargs): """Enable standby relocation on all routers """ # This feature is supported only since nsx version 2.4 nsxlib = utils.get_connected_nsxlib() version = nsxlib.get_version() if not nsx_utils.is_nsx_version_2_4_0(version): LOG.info("Standby relocation update is only supported from 2.4 " "onwards") LOG.info("Version is %s", version) return # Go over all neutron routers plugin = RoutersPlugin() admin_cxt = neutron_context.get_admin_context() filters = utils.get_plugin_filters(admin_cxt) neutron_routers = plugin.get_routers(admin_cxt, filters=filters) for router in neutron_routers: neutron_id = router['id'] # get the router nsx id from the mapping table nsx_id = nsx_db.get_nsx_router_id(admin_cxt.session, neutron_id) try: nsxlib.logical_router.update(lrouter_id=nsx_id, enable_standby_relocation=True) except Exception as e: # This may fail if the service router is not created LOG.warning("Router %s cannot enable standby relocation: %s", neutron_id, e) else: LOG.info("Router %s was enabled with standby relocation", neutron_id) LOG.info("Done") @admin_utils.output_header def list_orphaned_routers(resource, event, trigger, **kwargs): nsxlib = utils.get_connected_nsxlib() admin_cxt = neutron_context.get_admin_context() missing_routers = v3_utils.get_orphaned_routers(admin_cxt, nsxlib) LOG.info(formatters.output_formatter(constants.ORPHANED_ROUTERS, missing_routers, ['id', 'display_name'])) @admin_utils.output_header def delete_backend_router(resource, event, trigger, **kwargs): nsxlib = utils.get_connected_nsxlib() errmsg = ("Need to specify nsx-id property. Add --property nsx-id=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) nsx_id = properties.get('nsx-id') if not nsx_id: LOG.error("%s", errmsg) return # check if the router exists try: nsxlib.logical_router.get(nsx_id, silent=True) except nsx_exc.ResourceNotFound: # prevent logger from logging this exception sys.exc_clear() LOG.warning("Backend router %s was not found.", nsx_id) return # try to delete it success, error = v3_utils.delete_orphaned_router(nsxlib, nsx_id) if not success: LOG.error("Failed to delete backend router %(id)s : %(e)s.", { 'id': nsx_id, 'e': error}) return # Verify that the router was deleted since the backend does not always # throws errors try: nsxlib.logical_router.get(nsx_id, silent=True) except nsx_exc.ResourceNotFound: # prevent logger from logging this exception sys.exc_clear() LOG.info("Backend router %s was deleted.", nsx_id) else: LOG.error("Failed to delete backend router %s.", nsx_id) @admin_utils.output_header def update_dhcp_relay(resource, event, trigger, **kwargs): """Update all routers dhcp relay service by the current configuration""" nsxlib = utils.get_connected_nsxlib() if not nsxlib.feature_supported(nsx_constants.FEATURE_DHCP_RELAY): version = nsxlib.get_version() LOG.error("DHCP relay is not supported by NSX version %s", version) return admin_cxt = neutron_context.get_admin_context() filters = utils.get_plugin_filters(admin_cxt) with utils.NsxV3PluginWrapper() as plugin: # Make sure FWaaS was initialized plugin.init_fwaas_for_admin_utils() # get all neutron routers and interfaces ports routers = plugin.get_routers(admin_cxt, filters=filters) for router in routers: LOG.info("Updating router %s", router['id']) port_filters = {'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF], 'device_id': [router['id']]} ports = plugin.get_ports(admin_cxt, filters=port_filters) for port in ports: # get the backend router port by the tag nsx_port_id = nsxlib.get_id_by_resource_and_tag( 'LogicalRouterDownLinkPort', 'os-neutron-rport-id', port['id']) if not nsx_port_id: LOG.warning("Couldn't find nsx router port for interface " "%s", port['id']) continue # get the network of this port network_id = port['network_id'] # check the relay service on the az of the network az = plugin.get_network_az_by_net_id(admin_cxt, network_id) nsxlib.logical_router_port.update( nsx_port_id, relay_service_uuid=az.dhcp_relay_service) # if FWaaS is enables, also update the firewall rules try: plugin.update_router_firewall(admin_cxt, router['id']) except Exception as e: LOG.warning("Updating router firewall was skipped because of " "an error %s", e) LOG.info("Done.") @admin_utils.output_header def update_tier0(resource, event, trigger, **kwargs): """Replace old tier0 with a new one on the neutron DB and NSX backend""" errmsg = ("Need to specify old and new tier0 ID. Add --property " "old-tier0= --property new-tier0=") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) old_tier0 = properties.get('old-tier0') new_tier0 = properties.get('new-tier0') if not old_tier0 or not new_tier0: LOG.error("%s", errmsg) return # Verify the id of the new tier0 (old one might not exist any more) nsxlib = utils.get_connected_nsxlib() try: tier0_obj = nsxlib.logical_router.get(new_tier0) except Exception: LOG.error("Tier0 logical router %s was not found", new_tier0) return if tier0_obj.get('router_type') != 'TIER0': LOG.error("Logical router %s is not a tier0 router", new_tier0) return # update all neutron DB entries old_tier0_networks = [] admin_cxt = neutron_context.get_admin_context() with admin_cxt.session.begin(subtransactions=True): bindings = admin_cxt.session.query( nsx_models.TzNetworkBinding).filter_by(phy_uuid=old_tier0).all() for bind in bindings: old_tier0_networks.append(bind.network_id) bind.phy_uuid = new_tier0 if not old_tier0_networks: LOG.info("Did not find any provider networks using tier0 %s", old_tier0) return LOG.info("Updated provider networks in DB: %s", old_tier0_networks) # Update tier1 routers GW to point to the new tier0 in the backend plugin = RoutersPlugin() filters = utils.get_plugin_filters(admin_cxt) neutron_routers = plugin.get_routers(admin_cxt, filters=filters) for router in neutron_routers: router_gw_net = (router.get('external_gateway_info') and router['external_gateway_info'].get('network_id')) if router_gw_net and router_gw_net in old_tier0_networks: nsx_router_id = nsx_db.get_nsx_router_id( admin_cxt.session, router['id']) try: nsxlib.router.remove_router_link_port(nsx_router_id) except Exception as e: LOG.info("Could not delete router %s linked port: %s", router['id'], e) tags = nsxlib.build_v3_tags_payload( router, resource_type='os-neutron-rport', project_name=admin_cxt.tenant_name) try: nsxlib.router.add_router_link_port(nsx_router_id, new_tier0, tags=tags) except Exception as e: LOG.error("Failed to create router %s linked port: %s", router['id'], e) else: LOG.info("Updated router %s uplink port", router['id']) LOG.info("Done.") registry.subscribe(list_missing_routers, constants.ROUTERS, shell.Operations.LIST_MISMATCHES.value) registry.subscribe(update_nat_rules, constants.ROUTERS, shell.Operations.NSX_UPDATE_RULES.value) registry.subscribe(list_orphaned_routers, constants.ORPHANED_ROUTERS, shell.Operations.LIST.value) registry.subscribe(delete_backend_router, constants.ORPHANED_ROUTERS, shell.Operations.NSX_CLEAN.value) registry.subscribe(update_dhcp_relay, constants.ROUTERS, shell.Operations.NSX_UPDATE_DHCP_RELAY.value) registry.subscribe(update_enable_standby_relocation, constants.ROUTERS, shell.Operations.NSX_ENABLE_STANDBY_RELOCATION.value) registry.subscribe(update_tier0, constants.ROUTERS, shell.Operations.UPDATE_TIER0.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/securitygroups.py0000644000175000017500000005225500000000000032035 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.db import securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron_lib.callbacks import registry from neutron_lib import context as neutron_context from neutron_lib.db import api as db_api from oslo_log import log as logging from vmware_nsx.common import nsx_constants from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsx_models from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.extensions import securitygrouplogging as sg_logging from vmware_nsx.plugins.nsx_v3 import plugin as v3_plugin from vmware_nsx.plugins.nsx_v3 import utils as plugin_utils from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as v3_utils from vmware_nsx.shell import resources as shell from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as consts from vmware_nsxlib.v3 import security LOG = logging.getLogger(__name__) class NeutronSecurityGroupApi(securitygroups_db.SecurityGroupDbMixin): def __init__(self): super(NeutronSecurityGroupApi, self) self.context = neutron_context.get_admin_context() self.filters = v3_utils.get_plugin_filters(self.context) def get_security_groups(self): return super(NeutronSecurityGroupApi, self).get_security_groups(self.context, filters=self.filters) def get_security_group(self, sg_id): return super(NeutronSecurityGroupApi, self).get_security_group(self.context, sg_id) def create_security_group(self, sg, default_sg=False): return super(NeutronSecurityGroupApi, self).create_security_group(self.context, sg, default_sg=default_sg) def delete_security_group(self, sg_id): return super(NeutronSecurityGroupApi, self).delete_security_group(self.context, sg_id) def get_nsgroup_id(self, sg_id): return nsx_db.get_nsx_security_group_id( self.context.session, sg_id) def get_port_security_groups(self, port_id): secgroups_bindings = self._get_port_security_group_bindings( self.context, {'port_id': [port_id]}) return [b['security_group_id'] for b in secgroups_bindings] def get_ports_in_security_group(self, security_group_id): secgroups_bindings = self._get_port_security_group_bindings( self.context, {'security_group_id': [security_group_id]}) return [b['port_id'] for b in secgroups_bindings] def delete_security_group_section_mapping(self, sg_id): with db_api.CONTEXT_WRITER.using(self.context): fw_mapping = self.context.session.query( nsx_models.NeutronNsxFirewallSectionMapping).filter_by( neutron_id=sg_id).one_or_none() if fw_mapping: self.context.session.delete(fw_mapping) def delete_security_group_backend_mapping(self, sg_id): with db_api.CONTEXT_WRITER.using(self.context): sg_mapping = self.context.session.query( nsx_models.NeutronNsxSecurityGroupMapping).filter_by( neutron_id=sg_id).one_or_none() if sg_mapping: self.context.session.delete(sg_mapping) def get_logical_port_id(self, port_id): mapping = self.context.session.query( nsx_models.NeutronNsxPortMapping).filter_by( neutron_id=port_id).one_or_none() if mapping: return mapping.nsx_id neutron_sg = NeutronSecurityGroupApi() neutron_db = v3_utils.NeutronDbClient() def _log_info(resource, data, attrs=['display_name', 'id']): LOG.info(formatters.output_formatter(resource, data, attrs)) @admin_utils.list_handler(constants.SECURITY_GROUPS) @admin_utils.output_header def list_security_groups_mappings(resource, event, trigger, **kwargs): """List neutron security groups""" sg_mappings = plugin_utils.get_security_groups_mappings(neutron_sg.context) _log_info(constants.SECURITY_GROUPS, sg_mappings, attrs=['name', 'id', 'section-id', 'nsx-securitygroup-id']) return bool(sg_mappings) @admin_utils.list_handler(constants.FIREWALL_SECTIONS) @admin_utils.output_header def nsx_list_dfw_sections(resource, event, trigger, **kwargs): """List NSX backend firewall sections""" nsxlib = v3_utils.get_connected_nsxlib() fw_sections = nsxlib.firewall_section.list() _log_info(constants.FIREWALL_SECTIONS, fw_sections) return bool(fw_sections) @admin_utils.list_handler(constants.FIREWALL_NSX_GROUPS) @admin_utils.output_header def nsx_list_security_groups(resource, event, trigger, **kwargs): """List NSX backend security groups""" nsxlib = v3_utils.get_connected_nsxlib() nsx_secgroups = nsxlib.ns_group.list() _log_info(constants.FIREWALL_NSX_GROUPS, nsx_secgroups) return bool(nsx_secgroups) def _find_missing_security_groups(): nsxlib = v3_utils.get_connected_nsxlib() nsx_secgroups = nsxlib.ns_group.list() sg_mappings = plugin_utils.get_security_groups_mappings(neutron_sg.context) missing_secgroups = {} for sg_db in sg_mappings: for nsx_sg in nsx_secgroups: if nsx_sg['id'] == sg_db['nsx-securitygroup-id']: break else: missing_secgroups[sg_db['id']] = sg_db return missing_secgroups @admin_utils.list_mismatches_handler(constants.FIREWALL_NSX_GROUPS) @admin_utils.output_header def list_missing_security_groups(resource, event, trigger, **kwargs): """List security groups with sections missing on the NSX backend""" sgs_with_missing_nsx_group = _find_missing_security_groups() missing_securitgroups_info = [ {'securitygroup-name': sg['name'], 'securitygroup-id': sg['id'], 'nsx-securitygroup-id': sg['nsx-securitygroup-id']} for sg in sgs_with_missing_nsx_group.values()] _log_info(constants.FIREWALL_NSX_GROUPS, missing_securitgroups_info, attrs=['securitygroup-name', 'securitygroup-id', 'nsx-securitygroup-id']) return bool(missing_securitgroups_info) def _find_missing_sections(): nsxlib = v3_utils.get_connected_nsxlib() fw_sections = nsxlib.firewall_section.list() sg_mappings = plugin_utils.get_security_groups_mappings(neutron_sg.context) missing_sections = {} for sg_db in sg_mappings: for fw_section in fw_sections: if fw_section['id'] == sg_db['section-id']: break else: missing_sections[sg_db['id']] = sg_db return missing_sections @admin_utils.list_mismatches_handler(constants.FIREWALL_SECTIONS) @admin_utils.output_header def list_missing_firewall_sections(resource, event, trigger, **kwargs): """List security groups with missing sections on the NSX backend""" sgs_with_missing_section = _find_missing_sections() missing_sections_info = [{'securitygroup-name': sg['name'], 'securitygroup-id': sg['id'], 'section-id': sg['section-id']} for sg in sgs_with_missing_section.values()] _log_info(constants.FIREWALL_SECTIONS, missing_sections_info, attrs=['securitygroup-name', 'securitygroup-id', 'section-id']) return bool(missing_sections_info) @admin_utils.fix_mismatches_handler(constants.SECURITY_GROUPS) @admin_utils.output_header def fix_security_groups(resource, event, trigger, **kwargs): """Fix mismatch security groups by recreating missing sections & NS groups on the NSX backend """ context_ = neutron_context.get_admin_context() inconsistent_secgroups = _find_missing_sections() inconsistent_secgroups.update(_find_missing_security_groups()) nsxlib = v3_utils.get_connected_nsxlib() with v3_utils.NsxV3PluginWrapper() as plugin: for sg_id, sg in inconsistent_secgroups.items(): secgroup = plugin.get_security_group(context_, sg_id) try: # FIXME(roeyc): try..except clause should be removed once the # api will return 404 response code instead 400 for trying to # delete a non-existing firewall section. nsxlib.firewall_section.delete(sg['section-id']) except Exception: pass nsxlib.ns_group.delete(sg['nsx-securitygroup-id']) neutron_sg.delete_security_group_section_mapping(sg_id) neutron_sg.delete_security_group_backend_mapping(sg_id) nsgroup, fw_section = ( plugin._create_security_group_backend_resources(secgroup)) nsx_db.save_sg_mappings( context_, sg_id, nsgroup['id'], fw_section['id']) # If version > 1.1 then we use dynamic criteria tags, and the port # should already have them. if not nsxlib.feature_supported(consts.FEATURE_DYNAMIC_CRITERIA): members = [] for port_id in neutron_sg.get_ports_in_security_group(sg_id): lport_id = neutron_sg.get_logical_port_id(port_id) members.append(lport_id) nsxlib.ns_group.add_members( nsgroup['id'], consts.TARGET_TYPE_LOGICAL_PORT, members) for rule in secgroup['security_group_rules']: rule_mapping = (context_.session.query( nsx_models.NeutronNsxRuleMapping).filter_by( neutron_id=rule['id']).one()) with context_.session.begin(subtransactions=True): context_.session.delete(rule_mapping) action = (consts.FW_ACTION_DROP if secgroup.get(provider_sg.PROVIDER) else consts.FW_ACTION_ALLOW) rules = plugin._create_firewall_rules( context_, fw_section['id'], nsgroup['id'], secgroup.get(sg_logging.LOGGING, False), action, secgroup['security_group_rules']) plugin.save_security_group_rule_mappings(context_, rules['rules']) def _update_ports_dynamic_criteria_tags(): nsxlib = v3_utils.get_connected_nsxlib() for port in neutron_db.get_ports(): secgroups = neutron_sg.get_port_security_groups(port['id']) # Nothing to do with ports that are not associated with any sec-group. if not secgroups: continue _, lport_id = neutron_db.get_lswitch_and_lport_id(port['id']) criteria_tags = nsxlib.ns_group.get_lport_tags(secgroups) nsxlib.logical_port.update( lport_id, False, tags_update=criteria_tags) def _update_security_group_dynamic_criteria(): nsxlib = v3_utils.get_connected_nsxlib() secgroups = neutron_sg.get_security_groups() for sg in secgroups: nsgroup_id = neutron_sg.get_nsgroup_id(sg['id']) membership_criteria = nsxlib.ns_group.get_port_tag_expression( security.PORT_SG_SCOPE, sg['id']) try: # We want to add the dynamic criteria and remove all direct members # they will be added by the manager using the new criteria. nsxlib.ns_group.update(nsgroup_id, membership_criteria=membership_criteria, members=[]) except Exception as e: LOG.warning("Failed to update membership criteria for nsgroup " "%(nsgroup_id)s, request to backend returned " "with error: %(error)s", {'nsgroup_id': nsgroup_id, 'error': str(e)}) @admin_utils.output_header def migrate_nsgroups_to_dynamic_criteria(resource, event, trigger, **kwargs): """Update NSX security groups dynamic criteria for NSXv3 CrossHairs""" nsxlib = v3_utils.get_connected_nsxlib() if not nsxlib.feature_supported(consts.FEATURE_DYNAMIC_CRITERIA): LOG.error("Dynamic criteria grouping feature isn't supported by " "this NSX version.") return # First, we add the criteria tags for all ports. _update_ports_dynamic_criteria_tags() # Update security-groups with dynamic criteria and remove direct members. _update_security_group_dynamic_criteria() def list_orphaned_sections(resource, event, trigger, **kwargs): """List orphaned firewall sections""" nsxlib = v3_utils.get_connected_nsxlib() orphaned_sections = plugin_utils.get_orphaned_firewall_sections( neutron_sg.context, nsxlib) _log_info(constants.ORPHANED_FIREWALL_SECTIONS, orphaned_sections, attrs=['id', 'display_name']) def list_orphaned_section_rules(resource, event, trigger, **kwargs): """List orphaned firewall section rules""" nsxlib = v3_utils.get_connected_nsxlib() orphaned_rules = plugin_utils.get_orphaned_firewall_section_rules( neutron_sg.context, nsxlib) _log_info("orphaned-firewall-section-rules", orphaned_rules, attrs=['security-group-name', 'security-group-id', 'section-id', 'rule-id']) def clean_orphaned_sections(resource, event, trigger, **kwargs): """Delete orphaned firewall sections from the NSX backend""" nsxlib = v3_utils.get_connected_nsxlib() orphaned_sections = plugin_utils.get_orphaned_firewall_sections( neutron_sg.context, nsxlib) if not orphaned_sections: LOG.info("No orphaned nsx sections were found.") for sec in orphaned_sections: try: nsxlib.firewall_section.delete(sec['id']) except Exception as e: LOG.error("Failed to delete backend firewall section %(id)s : " "%(e)s.", {'id': sec['id'], 'e': e}) else: LOG.info("Backend firewall section %s was deleted.", sec['id']) def clean_orphaned_section_rules(resource, event, trigger, **kwargs): """Delete orphaned firewall section rules from the NSX backend""" nsxlib = v3_utils.get_connected_nsxlib() orphaned_rules = plugin_utils.get_orphaned_firewall_section_rules( neutron_sg.context, nsxlib) if not orphaned_rules: LOG.info("No orphaned nsx rules were found.") for rule in orphaned_rules: try: nsxlib.firewall_section.delete_rule( rule['section-id'], rule['rule-id']) except Exception as e: LOG.error("Failed to delete backend firewall section %(sect)s " "rule %(rule)s: %(e)s.", {'sect': rule['section-id'], 'rule': rule['rule-id'], 'e': e}) else: LOG.info("Backend firewall rule %s was deleted.", rule['rule-id']) def update_security_groups_logging(resource, event, trigger, **kwargs): """Update allowed traffic logging for all neutron security group rules""" errmsg = ("Need to specify log-allowed-traffic property. Add --property " "log-allowed-traffic=true/false") if not kwargs.get('property'): LOG.error("%s", errmsg) return properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) log_allowed_str = properties.get('log-allowed-traffic') if not log_allowed_str or log_allowed_str.lower() not in ['true', 'false']: LOG.error("%s", errmsg) return log_allowed = log_allowed_str.lower() == 'true' context = neutron_context.get_admin_context() nsxlib = v3_utils.get_connected_nsxlib() with v3_utils.NsxV3PluginWrapper() as plugin: secgroups = plugin.get_security_groups(context, fields=['id', sg_logging.LOGGING]) LOG.info("Going to update logging of %s sections", len(secgroups)) for sg in [sg for sg in secgroups if sg.get(sg_logging.LOGGING) is False]: nsgroup_id, section_id = nsx_db.get_sg_mappings( context.session, sg['id']) if section_id: try: nsxlib.firewall_section.set_rule_logging( section_id, logging=log_allowed) except nsx_lib_exc.ManagerError: LOG.error("Failed to update firewall rule logging " "for rule in section %s", section_id) def reuse_default_section(resource, event, trigger, **kwargs): """Reuse existing NSX default section & NS group that might already exist on the NSX from a previous installation. """ # first check if the backend has a default OS section nsxlib = v3_utils.get_connected_nsxlib() fw_sections = nsxlib.firewall_section.list() section_name = v3_plugin.NSX_V3_FW_DEFAULT_SECTION section_id = None for section in fw_sections: if section['display_name'] == section_name: if section_id is not None: # Multiple sections already exist! LOG.error("Multiple default OS NSX sections already exist. " "Please delete unused ones") return False section_id = section['id'] if not section_id: LOG.error("No OS NSX section found") return False # Get existing default NS group from the NSX ns_groups = nsxlib.ns_group.find_by_display_name( v3_plugin.NSX_V3_FW_DEFAULT_NS_GROUP) if len(ns_groups) > 1: LOG.error("Multiple default OS NS groups already exist. " "Please delete unused ones") return False if not ns_groups: LOG.error("No OS NS group found") return False nsgroup_id = ns_groups[0]['id'] # Reuse this section by adding it to the DB mapping context = neutron_context.get_admin_context() # Add global SG to the neutron DB try: neutron_sg.get_security_group(plugin_utils.NSX_V3_OS_DFW_UUID) except ext_sg.SecurityGroupNotFound: sec_group = {'security_group': {'id': plugin_utils.NSX_V3_OS_DFW_UUID, 'tenant_id': nsx_constants.INTERNAL_V3_TENANT_ID, 'name': 'NSX Internal', 'description': ''}} neutron_sg.create_security_group( sec_group, default_sg=True) # Get existing mapping from the DB db_nsgroup_id, db_section_id = nsx_db.get_sg_mappings( context.session, plugin_utils.NSX_V3_OS_DFW_UUID) if db_nsgroup_id or db_section_id: if db_nsgroup_id == nsgroup_id and db_section_id == section_id: LOG.info('Neutron DB is already configured correctly with section ' '%s and NS group %s', section_id, nsgroup_id) return True else: LOG.info('Deleting old DB mappings for section %s and NS group %s', db_section_id, db_nsgroup_id) nsx_db.delete_sg_mappings( context, plugin_utils.NSX_V3_OS_DFW_UUID, db_nsgroup_id, db_section_id) # Add mappings to the neutron DB LOG.info('Creating new DB mappings for section %s and NS group %s', section_id, nsgroup_id) nsx_db.save_sg_mappings( context, plugin_utils.NSX_V3_OS_DFW_UUID, nsgroup_id, section_id) # The DB mappings were changed. # The user must restart neutron to avoid failures. LOG.info("Please restart neutron service") return True registry.subscribe(update_security_groups_logging, constants.SECURITY_GROUPS, shell.Operations.UPDATE_LOGGING.value) registry.subscribe(migrate_nsgroups_to_dynamic_criteria, constants.FIREWALL_NSX_GROUPS, shell.Operations.MIGRATE_TO_DYNAMIC_CRITERIA.value) registry.subscribe(fix_security_groups, constants.FIREWALL_SECTIONS, shell.Operations.NSX_UPDATE.value) registry.subscribe(list_orphaned_sections, constants.ORPHANED_FIREWALL_SECTIONS, shell.Operations.NSX_LIST.value) registry.subscribe(list_orphaned_section_rules, constants.ORPHANED_FIREWALL_SECTIONS, shell.Operations.NSX_LIST.value) registry.subscribe(clean_orphaned_sections, constants.ORPHANED_FIREWALL_SECTIONS, shell.Operations.NSX_CLEAN.value) registry.subscribe(clean_orphaned_section_rules, constants.ORPHANED_FIREWALL_SECTIONS, shell.Operations.NSX_CLEAN.value) registry.subscribe(reuse_default_section, constants.FIREWALL_SECTIONS, shell.Operations.REUSE.value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/plugins/nsxv3/resources/utils.py0000644000175000017500000001563400000000000030066 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.db import db_base_plugin_v2 from neutron.db import l3_dvr_db # noqa from neutron import manager from neutron_lib import context from neutron_lib.plugins import constants as const from neutron_lib.plugins import directory from vmware_nsx.common import config from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import projectpluginmap from vmware_nsx.plugins.nsx_v3 import plugin from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.services.fwaas.nsx_v3 import fwaas_callbacks_v2 from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsxlib.v3 import nsx_constants _NSXLIB = None def get_nsxv3_client(nsx_username=None, nsx_password=None, use_basic_auth=False, plugin_conf=None): return get_connected_nsxlib(nsx_username, nsx_password, use_basic_auth, plugin_conf).client def get_connected_nsxlib(nsx_username=None, nsx_password=None, use_basic_auth=False, plugin_conf=None): global _NSXLIB # for non-default agruments, initiate new lib if nsx_username or use_basic_auth: return v3_utils.get_nsxlib_wrapper(nsx_username, nsx_password, use_basic_auth, plugin_conf) if _NSXLIB is None: _NSXLIB = v3_utils.get_nsxlib_wrapper(plugin_conf=plugin_conf) return _NSXLIB def get_plugin_filters(context): return admin_utils.get_plugin_filters( context, projectpluginmap.NsxPlugins.NSX_T) class NeutronDbClient(db_base_plugin_v2.NeutronDbPluginV2): def __init__(self): super(NeutronDbClient, self).__init__() self.context = context.get_admin_context() self.filters = get_plugin_filters(self.context) def _update_filters(self, requested_filters): filters = self.filters.copy() if requested_filters: filters.update(requested_filters) return filters def get_ports(self, filters=None, fields=None): filters = self._update_filters(filters) return super(NeutronDbClient, self).get_ports( self.context, filters=filters, fields=fields) def get_networks(self, filters=None, fields=None): filters = self._update_filters(filters) return super(NeutronDbClient, self).get_networks( self.context, filters=filters, fields=fields) def get_network(self, context, network_id): if not context: context = self.context return super(NeutronDbClient, self).get_network(context, network_id) def get_subnet(self, context, subnet_id): if not context: context = self.context return super(NeutronDbClient, self).get_subnet(context, subnet_id) def get_lswitch_and_lport_id(self, port_id): return nsx_db.get_nsx_switch_and_port_id(self.context.session, port_id) def net_id_to_lswitch_id(self, net_id): lswitch_ids = nsx_db.get_nsx_switch_ids(self.context.session, net_id) return lswitch_ids[0] if lswitch_ids else None def add_dhcp_service_binding(self, network_id, port_id, server_id): return nsx_db.add_neutron_nsx_service_binding( self.context.session, network_id, port_id, nsx_constants.SERVICE_DHCP, server_id) def add_dhcp_static_binding(self, port_id, subnet_id, ip_address, server_id, binding_id): return nsx_db.add_neutron_nsx_dhcp_binding( self.context.session, port_id, subnet_id, ip_address, server_id, binding_id) class NsxV3PluginWrapper(plugin.NsxV3Plugin): def __init__(self): # initialize the availability zones config.register_nsxv3_azs(cfg.CONF, cfg.CONF.nsx_v3.availability_zones) super(NsxV3PluginWrapper, self).__init__() self.context = context.get_admin_context() def __enter__(self): directory.add_plugin(const.CORE, self) return self def __exit__(self, exc_type, exc_value, traceback): directory.add_plugin(const.CORE, None) def _init_fwaas_plugin(self, provider, callbacks_class, plugin_callbacks): fwaas_plugin_class = manager.NeutronManager.load_class_for_provider( 'neutron.service_plugins', provider) fwaas_plugin = fwaas_plugin_class() self.fwaas_callbacks = callbacks_class(False) # override the fwplugin_rpc since there is no RPC support in adminutils if plugin_callbacks: self.fwaas_callbacks.fwplugin_rpc = plugin_callbacks(fwaas_plugin) self.init_is_complete = True def init_fwaas_for_admin_utils(self): # initialize the FWaaS plugin and callbacks self.fwaas_callbacks = None # This is an ugly patch to find out if it is v1 or v2 service_plugins = cfg.CONF.service_plugins for srv_plugin in service_plugins: if 'firewall' in srv_plugin or 'fwaas' in srv_plugin: if 'v2' in srv_plugin: # FWaaS V2 self._init_fwaas_plugin( 'firewall_v2', fwaas_callbacks_v2.Nsxv3FwaasCallbacksV2, None) return def _init_dhcp_metadata(self): pass def _extend_get_network_dict_provider(self, context, net): self._extend_network_dict_provider(context, net) # skip getting the Qos policy ID because get_object calls # plugin init again on admin-util environment def _extend_get_port_dict_binding(self, context, port): self._extend_port_dict_binding(context, port) # skip getting the Qos policy ID because get_object calls # plugin init again on admin-util environment def delete_network(self, context, network_id): if not context: context = self.context return super(NsxV3PluginWrapper, self).delete_network( context, network_id) def remove_router_interface(self, context, router_id, interface): if not context: context = self.context return super(NsxV3PluginWrapper, self).remove_router_interface( context, router_id, interface) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/admin/version.py0000644000175000017500000000120700000000000023626 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __version__ = '0.1' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/commands.py0000644000175000017500000000506000000000000022653 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutronclient.neutron import v2_0 as client from vmware_nsx._i18n import _ LSN_PATH = '/lsns' def print_report(write_func, report): write_func(_("\nService type = %s\n") % report['report']['type']) services = ','.join(report['report']['services']) ports = ','.join(report['report']['ports']) write_func(_("Service uuids = %s\n") % services) write_func(_("Port uuids = %s\n\n") % ports) class NetworkReport(client.NeutronCommand): """Retrieve network migration report.""" def get_parser(self, prog_name): parser = super(NetworkReport, self).get_parser(prog_name) parser.add_argument('network', metavar='network', help=_('ID or name of network to run report on')) return parser def run(self, parsed_args): net = parsed_args.network net_id = client.find_resourceid_by_name_or_id(self.app.client, 'network', net) res = self.app.client.get("%s/%s" % (LSN_PATH, net_id)) if res: self.app.stdout.write(_('Migration report is:\n')) print_report(self.app.stdout.write, res['lsn']) class NetworkMigrate(client.NeutronCommand): """Perform network migration.""" def get_parser(self, prog_name): parser = super(NetworkMigrate, self).get_parser(prog_name) parser.add_argument('network', metavar='network', help=_('ID or name of network to migrate')) return parser def run(self, parsed_args): net = parsed_args.network net_id = client.find_resourceid_by_name_or_id(self.app.client, 'network', net) body = {'network': net_id} res = self.app.client.post(LSN_PATH, body={'lsn': body}) if res: self.app.stdout.write(_('Migration has been successful:\n')) print_report(self.app.stdout.write, res['lsn']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/hk_trigger.sh0000644000175000017500000000225400000000000023163 0ustar00coreycorey00000000000000#!/bin/bash # Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # # Trigger execution of NSX plugin's housekeeper # NEUTRON_ENDPOINT=`openstack endpoint list | awk '/network/{print $14}'` if [ -z "$NEUTRON_ENDPOINT" ]; then echo "Couldn't locate Neutron endpoint" exit 1 fi AUTH_TOKEN=`openstack token issue | awk '/ id /{print $4}'` if [ -z "$AUTH_TOKEN" ]; then echo "Couldn't acquire authentication token" exit 1 fi curl -X PUT -s -H "X-Auth-Token: $AUTH_TOKEN" -H 'Content-Type: application/json' -H 'Accept: application/json' -d '{"housekeeper": {}}' ${NEUTRON_ENDPOINT}/v2.0/housekeepers/all exit 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/nsx_instance_if_migrate.py0000644000175000017500000002054700000000000025743 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import getopt import logging import re import sys import xml.etree.ElementTree as et from keystoneauth1 import identity from keystoneauth1 import session import libvirt from neutronclient.v2_0 import client import nova.conf CONF = nova.conf.CONF logging.basicConfig(level=logging.INFO) LOG = logging.getLogger(__name__) def usage(): print("python nsx_instance_if_migrate.py --username= " "--password= --project= " "--auth-url= " "[--project-domain-id=] " "[--user-domain-id=] " "[--machine-type=] " "[--nsx-bridge=]\n\n" "Convert libvirt interface definitions on a KVM host, to NSX " "managed vSwitch definitions\n\n" " username: Admin user's username\n" " password: Admin user's password\n" " keystone auth URL: URL to keystone's authentication service\n" " project domain: Keystone project domain\n" " user domain: Keystone user domain\n" " migrated machine type: Overwrites libvirt's machine type\n" " log file: Output log of the command execution\n" " NSX managed vSwitch: vSwitch on host, managed by NSX\n\n") sys.exit() def get_opts(): opts = {} o = [] p = re.compile('^-+') try: o, a = getopt.getopt(sys.argv[1:], 'h', ['help', 'username=', 'password=', 'project=', 'project-domain-id=', 'user-domain-id=', 'auth-url=', 'machine-type=', 'logfile=', 'nsx-bridge=']) except getopt.GetoptError as err: LOG.error(err) usage() for opt, val in o: if opt in ('h', 'help'): usage() else: opts[p.sub('', opt)] = val for mandatory_key in ['username', 'password', 'project', 'auth-url']: if opts.get(mandatory_key) is None: LOG.error("%s must be specified!", mandatory_key) usage() return opts def xmltag_text_get(obj, tag_name): tag_obj = obj.find(tag_name) if tag_obj is not None: return tag_obj.text def xmltag_attr_get(obj, tag, attr): tag_obj = obj.find(tag) if tag_obj is not None: return tag_obj.get(attr) def xmltag_set(elem, tag, **kwargs): sub_elem = elem.find(tag) if sub_elem is None: sub_elem = et.SubElement(elem, tag) for attr in kwargs.keys(): sub_elem.set(attr, kwargs.get(attr)) return sub_elem def iface_migrate(neutron, instance_name, iface, nsx_switch): iface.set('type', 'bridge') xmltag_set(iface, 'source', bridge=nsx_switch) virt_port = xmltag_set(iface, 'virtualport', type='openvswitch') instance_mac = xmltag_attr_get(iface, 'mac', 'address') if instance_mac is None: LOG.error("Couldn't find MAC address for instance %s", instance_name) return ports = neutron.list_ports(fields=['id'], mac_address=instance_mac) if len(ports['ports']) != 1: LOG.error('For instance %(vm)s, invalid ports received from neutron: ' '%(ports)s', {'vm': instance_name, 'ports': ports}) return neutron_port_id = ports['ports'][0]['id'] xmltag_set(virt_port, 'parameters', interfaceid=neutron_port_id) xmltag_set(iface, 'driver', name='qemu') tap_dev = xmltag_attr_get(iface, 'target', 'dev') if tap_dev is None: LOG.error("For instance %(vm)s, couldn't find tap device for " "interface", instance_name) # remove script tag if found script_tag = iface.find('script') if script_tag is not None: iface.remove(script_tag) def is_valid_os_data(libvirt_conn, os_type, os_arch, os_machine): caps_xml = libvirt_conn.getCapabilities() caps_root = et.fromstring(caps_xml) for guest_tag in caps_root.findall('guest'): if (xmltag_text_get(guest_tag, 'os_type') == os_type and xmltag_attr_get(guest_tag, 'arch', 'name') == os_arch): for machine_tag in guest_tag.find('arch').findall('machine'): if machine_tag.text == os_machine: return True return False def instance_migrate(libvirt_conn, neutron, instance, machine_type, nsx_switch): xml = instance.XMLDesc() root = et.fromstring(xml) instance_name = xmltag_text_get(root, 'name') if instance_name is None: LOG.error("Couldn't find instance name in XML") return instance_uuid = xmltag_text_get(root, 'uuid') if instance_uuid is None: LOG.error("Couldn't find UUID for instance %s", instance_name) return # Validate that os is supported by hypervisor os_tag = root.find('os') if os_tag is None: LOG.error("Couldn't find OS tag for instance %s", instance_name) return type_tag = os_tag.find('type') if not is_valid_os_data(libvirt_conn, type_tag.text, type_tag.get('arch'), type_tag.get('machine')): LOG.error("Instance %s OS data is invalid or not supported by " "hypervisor", instance_name) return if machine_type is not None: type_tag.set('machine', machine_type) devs = root.find('devices') ifaces = devs.findall('interface') if not ifaces: LOG.error('No interfaces to migrate for instance %s', instance_name) for iface in ifaces: iface_migrate(neutron, instance_name, iface, nsx_switch) instance.undefine() libvirt_conn.defineXML(et.tostring(root)) LOG.info('Migrated instance %(vm)s (%(uuid)s) successfully!', {'vm': instance_name, 'uuid': instance_uuid}) def main(): opts = get_opts() if opts.get('logfile'): f_handler = logging.FileHandler(opts.get('logfile')) f_formatter = logging.Formatter( '%(asctime)s %(levelname)s %(message)s') f_handler.setFormatter(f_formatter) LOG.addHandler(f_handler) conn = libvirt.open('qemu:///system') if conn is None: LOG.error('Failed to connect to libvirt') exit(1) auth = identity.Password(username=opts['username'], password=opts['password'], project_name=opts['project'], project_domain_id=opts.get('project-domain-id', 'default'), user_domain_id=opts.get('user-domain-id', 'default'), auth_url=opts['auth-url']) if auth is None: LOG.error('Failed to authenticate with keystone') exit(1) sess = session.Session(auth=auth) if sess is None: LOG.error('Failed to create keystone session') exit(1) neutron = client.Client(session=sess) if neutron is None: LOG.error('Failed to create neutron session') exit(1) instances = conn.listAllDomains() if not instances: LOG.error('No instances to migrate') for instance in instances: try: instance_migrate(conn, neutron, instance, opts.get('machine-type'), opts.get('nsx-bridge', CONF.neutron.ovs_bridge)) except Exception as e: LOG.error('Failed to migrate instance with exception %s', e) if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/nsxadmin.py0000644000175000017500000001426300000000000022700 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Purpose of this script is to build a framework which can be leveraged to build utilities to help the on-field ops in system debugging. TODO: Use Cliff https://pypi.org/project/cliff TODO: Define commands instead of -r -o like get-security-groups, delete-security-groups, nsx neutron nsxv3 can be options TODO: Autocomplete command line args """ import sys from neutron.common import config as neutron_config from neutron.conf import common as neutron_common_config from neutron_lib.callbacks import registry from oslo_config import cfg from oslo_log import _options from oslo_log import log as logging import requests from vmware_nsx.common import config # noqa from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin import version from vmware_nsx.shell import resources # Suppress the Insecure request warning requests.packages.urllib3.disable_warnings() LOG = logging.getLogger(__name__) def _init_cfg(): # NOTE(gangila): neutron.common.config registers some options by default # which are then shown in the help message. We don't need them # so we unregister these options cfg.CONF.unregister_opts(_options.common_cli_opts) cfg.CONF.unregister_opts(_options.logging_cli_opts) cfg.CONF.unregister_opts(neutron_common_config.core_cli_opts) # register must come after above unregister to avoid duplicates cfg.CONF.register_cli_opts(resources.cli_opts) # Init the neutron config neutron_config.init(args=['--config-file', constants.NEUTRON_CONF, '--config-file', constants.NSX_INI]) cfg.CONF(args=sys.argv[1:], project='NSX', prog='Admin Utility', version=version.__version__, usage='nsxadmin -r -o ', default_config_files=[constants.NEUTRON_CONF, constants.NSX_INI]) def _validate_resource_choice(resource, nsx_plugin): if nsx_plugin == 'nsxv' and resource not in resources.nsxv_resources: LOG.error('Supported list of NSX-V resources: %s', resources.nsxv_resources_names) sys.exit(1) elif nsx_plugin == 'nsxv3'and resource not in resources.nsxv3_resources: LOG.error('Supported list of NSX-V3 resources: %s', resources.nsxv3_resources_names) sys.exit(1) elif nsx_plugin == 'nsxtvd'and resource not in resources.nsxtvd_resources: LOG.error('Supported list of NSX-TVD resources: %s', resources.nsxtvd_resources_names) sys.exit(1) elif nsx_plugin == 'nsxp'and resource not in resources.nsxp_resources: LOG.error('Supported list of NSX-P resources: %s', resources.nsxp_resources_names) sys.exit(1) def _validate_op_choice(choice, nsx_plugin): if nsx_plugin == 'nsxv': supported_resource_ops = \ resources.nsxv_resources[cfg.CONF.resource].supported_ops if choice not in supported_resource_ops: LOG.error('Supported list of operations for the NSX-V ' 'resource %s', supported_resource_ops) sys.exit(1) elif nsx_plugin == 'nsxv3': supported_resource_ops = \ resources.nsxv3_resources[cfg.CONF.resource].supported_ops if choice not in supported_resource_ops: LOG.error('Supported list of operations for the NSX-V3 ' 'resource %s', supported_resource_ops) sys.exit(1) elif nsx_plugin == 'nsxtvd': supported_resource_ops = \ resources.nsxtvd_resources[cfg.CONF.resource].supported_ops if choice not in supported_resource_ops: LOG.error('Supported list of operations for the NSX-TVD ' 'resource %s', supported_resource_ops) sys.exit(1) elif nsx_plugin == 'nsxp': supported_resource_ops = \ resources.nsxp_resources[cfg.CONF.resource].supported_ops if choice not in supported_resource_ops: LOG.error('Supported list of operations for the NSX-P ' 'resource %s', supported_resource_ops) sys.exit(1) def _validate_plugin_choice(selected_plugin, nsx_plugin): if nsx_plugin == 'nsxtvd': if selected_plugin: if selected_plugin != 'nsxv' and selected_plugin != 'nsxv3': LOG.error('Illegal plugin %s. please select nsxv or nsxv3', selected_plugin) sys.exit(1) # use nsxv or nsxv3 plugins return selected_plugin else: # use the TVD pluging return nsx_plugin else: if selected_plugin: LOG.error('Cannot select plugin. The current plugin is %s', nsx_plugin) sys.exit(1) return nsx_plugin def main(argv=sys.argv[1:]): _init_cfg() nsx_plugin_in_use = resources.get_plugin() LOG.info('NSX Plugin in use: %s', nsx_plugin_in_use) # the user can select the specific plugin selected_plugin = _validate_plugin_choice(cfg.CONF.plugin, nsx_plugin_in_use) resources.init_resource_plugin( selected_plugin, resources.get_plugin_dir(selected_plugin)) _validate_resource_choice(cfg.CONF.resource, selected_plugin) _validate_op_choice(cfg.CONF.operation, selected_plugin) registry.notify(cfg.CONF.resource, cfg.CONF.operation, 'nsxadmin', force=cfg.CONF.force, property=cfg.CONF.property, verbose=cfg.CONF.verbose) if __name__ == "__main__": sys.exit(main(sys.argv[1:])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/shell/resources.py0000644000175000017500000004345100000000000023072 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum import glob import importlib import os from oslo_config import cfg from oslo_log import log as logging import requests from vmware_nsx.common import config # noqa from vmware_nsx.shell.admin.plugins.common import constants # Suppress the Insecure request warning requests.packages.urllib3.disable_warnings() LOG = logging.getLogger(__name__) class Operations(enum.Enum): LIST = 'list' SET = 'set' CLEAN = 'clean' CLEAN_ALL = 'clean-all' CREATE = 'create' DELETE = 'delete' LIST_MISMATCHES = 'list-mismatches' FIX_MISMATCH = 'fix-mismatch' LIST_UNUSED = 'list-unused' NEUTRON_LIST = 'neutron-list' NEUTRON_CLEAN = 'neutron-clean' NEUTRON_UPDATE = 'neutron-update' NSX_LIST = 'nsx-list' NSX_CLEAN = 'nsx-clean' NSX_UPDATE = 'nsx-update' NSX_UPDATE_ALL = 'nsx-update-all' NSX_UPDATE_SECRET = 'nsx-update-secret' NSX_UPDATE_RULES = 'nsx-update-rules' NSX_UPDATE_DHCP_RELAY = 'nsx-update-dhcp-relay' NSX_UPDATE_STATE = 'nsx-update-state' NSX_ENABLE_STANDBY_RELOCATION = 'nsx-enable-standby-relocation' NSX_UPDATE_IP = 'nsx-update-ip' NSX_RECREATE = 'nsx-recreate' NSX_REDISTRIBURE = 'nsx-redistribute' NSX_REORDER = 'nsx-reorder' NSX_TAG_DEFAULT = 'nsx-tag-default' MIGRATE_TO_DYNAMIC_CRITERIA = 'migrate-to-dynamic-criteria' NSX_MIGRATE_V_V3 = 'nsx-migrate-v-v3' MIGRATE_TO_POLICY = 'migrate-to-policy' LIST_POLICIES = 'list-policies' UPDATE_LOGGING = 'update-logging' NSX_MIGRATE_EXCLUDE_PORTS = 'migrate-exclude-ports' MIGRATE_VDR_DHCP = 'migrate-vdr-dhcp' STATUS = 'status' GENERATE = 'generate' IMPORT = 'import' SHOW = 'show' VALIDATE = 'validate' REUSE = 'reuse' UPDATE_TIER0 = 'update-tier0' UPDATE_FIREWALL_MATCH = 'update-nat-firewall-match' ops = [op.value for op in Operations] class Resource(object): def __init__(self, name, ops): self.name = name self.supported_ops = ops # Add supported NSX-V3 resources in this dictionary nsxv3_resources = { constants.SECURITY_GROUPS: Resource(constants.SECURITY_GROUPS, [Operations.LIST.value, Operations.FIX_MISMATCH.value, Operations.UPDATE_LOGGING.value]), constants.FIREWALL_SECTIONS: Resource(constants.FIREWALL_SECTIONS, [Operations.LIST.value, Operations.LIST_MISMATCHES.value, Operations.REUSE.value]), constants.FIREWALL_NSX_GROUPS: Resource( constants.FIREWALL_NSX_GROUPS, [ Operations.LIST.value, Operations.LIST_MISMATCHES.value, Operations.MIGRATE_TO_DYNAMIC_CRITERIA.value]), constants.ORPHANED_FIREWALL_SECTIONS: Resource( constants.ORPHANED_FIREWALL_SECTIONS, [ Operations.NSX_LIST.value, Operations.NSX_CLEAN.value]), constants.NETWORKS: Resource(constants.NETWORKS, [Operations.LIST_MISMATCHES.value]), constants.PORTS: Resource(constants.PORTS, [Operations.LIST_MISMATCHES.value, Operations.NSX_TAG_DEFAULT.value, Operations.NSX_MIGRATE_V_V3.value, Operations.NSX_MIGRATE_EXCLUDE_PORTS.value]), constants.ROUTERS: Resource( constants.ROUTERS, [ Operations.LIST_MISMATCHES.value, Operations.NSX_UPDATE_RULES.value, Operations.NSX_UPDATE_DHCP_RELAY.value, Operations.NSX_ENABLE_STANDBY_RELOCATION.value, Operations.UPDATE_TIER0.value]), constants.DHCP_BINDING: Resource(constants.DHCP_BINDING, [Operations.LIST.value, Operations.NSX_UPDATE.value, Operations.NSX_RECREATE.value]), constants.METADATA_PROXY: Resource(constants.METADATA_PROXY, [Operations.LIST.value, Operations.NSX_UPDATE.value, Operations.NSX_UPDATE_IP.value]), constants.ORPHANED_DHCP_SERVERS: Resource(constants.ORPHANED_DHCP_SERVERS, [Operations.NSX_LIST.value, Operations.NSX_CLEAN.value]), constants.CERTIFICATE: Resource(constants.CERTIFICATE, [Operations.GENERATE.value, Operations.SHOW.value, Operations.CLEAN.value, Operations.IMPORT.value, Operations.NSX_LIST.value]), constants.CONFIG: Resource(constants.CONFIG, [Operations.VALIDATE.value]), constants.ORPHANED_NETWORKS: Resource(constants.ORPHANED_NETWORKS, [Operations.LIST.value, Operations.NSX_CLEAN.value]), constants.ORPHANED_ROUTERS: Resource(constants.ORPHANED_ROUTERS, [Operations.LIST.value, Operations.NSX_CLEAN.value]), constants.LB_SERVICES: Resource(constants.LB_SERVICES, [Operations.LIST.value]), constants.LB_VIRTUAL_SERVERS: Resource(constants.LB_VIRTUAL_SERVERS, [Operations.LIST.value]), constants.LB_POOLS: Resource(constants.LB_POOLS, [Operations.LIST.value]), constants.LB_MONITORS: Resource(constants.LB_MONITORS, [Operations.LIST.value]), constants.RATE_LIMIT: Resource(constants.RATE_LIMIT, [Operations.SHOW.value, Operations.NSX_UPDATE.value]), constants.LB_ADVERTISEMENT: Resource(constants.LB_ADVERTISEMENT, [Operations.NSX_UPDATE.value]), constants.CLUSTER: Resource(constants.CLUSTER, [Operations.SHOW.value]) } # Add supported NSX-V resources in this dictionary nsxv_resources = { constants.EDGES: Resource(constants.EDGES, [Operations.NSX_LIST.value, Operations.NEUTRON_LIST.value, Operations.NSX_UPDATE.value, Operations.NSX_UPDATE_ALL.value]), constants.BACKUP_EDGES: Resource(constants.BACKUP_EDGES, [Operations.LIST.value, Operations.CLEAN.value, Operations.CLEAN_ALL.value, Operations.LIST_MISMATCHES.value, Operations.FIX_MISMATCH.value, Operations.NEUTRON_CLEAN.value]), constants.ORPHANED_EDGES: Resource(constants.ORPHANED_EDGES, [Operations.LIST.value, Operations.CLEAN.value]), constants.ORPHANED_BINDINGS: Resource(constants.ORPHANED_BINDINGS, [Operations.LIST.value, Operations.CLEAN.value]), constants.MISSING_EDGES: Resource(constants.MISSING_EDGES, [Operations.LIST.value]), constants.SPOOFGUARD_POLICY: Resource(constants.SPOOFGUARD_POLICY, [Operations.LIST.value, Operations.CLEAN.value, Operations.LIST_MISMATCHES.value, Operations.FIX_MISMATCH.value]), constants.DHCP_BINDING: Resource(constants.DHCP_BINDING, [Operations.LIST.value, Operations.NSX_UPDATE.value, Operations.NSX_REDISTRIBURE.value, Operations.NSX_RECREATE.value]), constants.NETWORKS: Resource(constants.NETWORKS, [Operations.LIST.value, Operations.NSX_UPDATE.value]), constants.MISSING_NETWORKS: Resource(constants.MISSING_NETWORKS, [Operations.LIST.value]), constants.ORPHANED_NETWORKS: Resource(constants.ORPHANED_NETWORKS, [Operations.LIST.value, Operations.NSX_CLEAN.value]), constants.NSX_PORTGROUPS: Resource(constants.NSX_PORTGROUPS, [Operations.LIST.value, Operations.NSX_CLEAN.value]), constants.SECURITY_GROUPS: Resource(constants.SECURITY_GROUPS, [Operations.LIST.value, Operations.FIX_MISMATCH.value, Operations.MIGRATE_TO_POLICY.value, Operations.LIST_POLICIES.value, Operations.UPDATE_LOGGING.value]), constants.FIREWALL_NSX_GROUPS: Resource( constants.FIREWALL_NSX_GROUPS, [Operations.LIST.value, Operations.LIST_MISMATCHES.value]), constants.FIREWALL_SECTIONS: Resource(constants.FIREWALL_SECTIONS, [Operations.LIST.value, Operations.LIST_MISMATCHES.value, Operations.NSX_UPDATE.value, Operations.NSX_REORDER.value, Operations.LIST_UNUSED.value, Operations.NSX_CLEAN.value]), constants.ORPHANED_RULES: Resource(constants.ORPHANED_RULES, [Operations.LIST.value, Operations.NSX_CLEAN.value]), constants.METADATA: Resource( constants.METADATA, [Operations.NSX_UPDATE.value, Operations.NSX_UPDATE_SECRET.value, Operations.STATUS.value]), constants.ROUTERS: Resource(constants.ROUTERS, [Operations.NSX_RECREATE.value, Operations.NSX_REDISTRIBURE.value, Operations.MIGRATE_VDR_DHCP.value]), constants.ORPHANED_VNICS: Resource(constants.ORPHANED_VNICS, [Operations.NSX_LIST.value, Operations.NSX_CLEAN.value]), constants.CONFIG: Resource(constants.CONFIG, [Operations.VALIDATE.value]), constants.BGP_GW_EDGE: Resource(constants.BGP_GW_EDGE, [Operations.CREATE.value, Operations.DELETE.value, Operations.LIST.value]), constants.ROUTING_REDIS_RULE: Resource(constants.ROUTING_REDIS_RULE, [Operations.CREATE.value, Operations.DELETE.value]), constants.BGP_NEIGHBOUR: Resource(constants.BGP_NEIGHBOUR, [Operations.CREATE.value, Operations.DELETE.value]), constants.NSX_MIGRATE_V_T: Resource(constants.NSX_MIGRATE_V_T, [Operations.VALIDATE.value]), } # Add supported NSX-TVD resources in this dictionary nsxtvd_resources = { constants.PROJECTS: Resource(constants.PROJECTS, [Operations.IMPORT.value, Operations.NSX_MIGRATE_V_V3.value]), } nsxp_resources = { constants.SECURITY_GROUPS: Resource(constants.SECURITY_GROUPS, [Operations.LIST.value]), constants.NETWORKS: Resource(constants.NETWORKS, [Operations.LIST.value, Operations.NSX_UPDATE_STATE.value]), constants.DHCP_BINDING: Resource(constants.DHCP_BINDING, [Operations.MIGRATE_TO_POLICY.value]), constants.ROUTERS: Resource(constants.ROUTERS, [Operations.LIST.value, Operations.UPDATE_TIER0.value, Operations.UPDATE_FIREWALL_MATCH.value]), constants.CERTIFICATE: Resource(constants.CERTIFICATE, [Operations.GENERATE.value, Operations.SHOW.value, Operations.CLEAN.value, Operations.IMPORT.value, Operations.NSX_LIST.value]), constants.SYSTEM: Resource(constants.SYSTEM, [Operations.SET.value]), } nsxv3_resources_names = list(nsxv3_resources.keys()) nsxv_resources_names = list(nsxv_resources.keys()) nsxtvd_resources_names = list(nsxtvd_resources.keys()) nsxp_resources_names = list(nsxp_resources.keys()) def get_resources(plugin_dir): modules = glob.glob(plugin_dir + "/*.py") return map(lambda module: os.path.splitext(os.path.basename(module))[0], modules) def get_plugin(): plugin = cfg.CONF.core_plugin plugin_name = '' if plugin in (constants.NSXV3_PLUGIN, constants.VMWARE_NSXV3): plugin_name = 'nsxv3' elif plugin in (constants.NSXV_PLUGIN, constants.VMWARE_NSXV): plugin_name = 'nsxv' elif plugin in (constants.NSXTVD_PLUGIN, constants.VMWARE_NSXTVD): plugin_name = 'nsxtvd' elif plugin in (constants.NSXP_PLUGIN, constants.VMWARE_NSXP): plugin_name = 'nsxp' return plugin_name def _get_choices(): plugin = get_plugin() if plugin == 'nsxv3': return nsxv3_resources_names elif plugin == 'nsxv': return nsxv_resources_names elif plugin == 'nsxtvd': return nsxtvd_resources_names def _get_resources(): plugin = get_plugin() if plugin == 'nsxv3': return 'NSX-V3 resources: %s' % (', '.join(nsxv3_resources_names)) elif plugin == 'nsxv': return 'NSX-V resources: %s' % (', '.join(nsxv_resources_names)) elif plugin == 'nsxtvd': return 'NSX-TVD resources: %s' % (', '.join(nsxtvd_resources_names)) cli_opts = [cfg.StrOpt('fmt', short='f', default='psql', choices=['psql', 'json'], help='Supported output formats: json, psql'), cfg.StrOpt('resource', short='r', choices=_get_choices(), help=_get_resources()), cfg.StrOpt('operation', short='o', help='Supported list of operations: {}' .format(', '.join(ops))), cfg.StrOpt('plugin', help='nsxv or nsxv3 if the tvd plugin is used'), cfg.BoolOpt('force', default=False, help='Enables \'force\' mode. No confirmations will ' 'be made before deletions.'), cfg.MultiStrOpt('property', short='p', help='Key-value pair containing the information ' 'to be updated. For ex: key=value.'), cfg.BoolOpt('verbose', short='v', default=False, help='Triggers detailed output for some commands') ] # Describe dependencies between admin utils resources and external libraries # that are not always installed resources_dependencies = { 'nsxv': {'gw_edges': ['neutron_dynamic_routing.extensions']}} def verify_external_dependencies(plugin_name, resource): if plugin_name in resources_dependencies: deps = resources_dependencies[plugin_name] if resource in deps: for d in deps[resource]: try: importlib.import_module(d) except ImportError: return False return True def init_resource_plugin(plugin_name, plugin_dir): plugin_resources = get_resources(plugin_dir) for resource in plugin_resources: if (resource != '__init__'): # skip unsupported resources if not verify_external_dependencies(plugin_name, resource): LOG.info("Skipping resource %s because of dependencies", resource) continue # load the resource importlib.import_module( "vmware_nsx.shell.admin.plugins." "{}.resources.".format(plugin_name) + resource) def get_plugin_dir(plugin_name): plugin_dir = (os.path.dirname(os.path.realpath(__file__)) + "/admin/plugins") return '{}/{}/resources'.format(plugin_dir, plugin_name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2302547 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/0000755000175000017500000000000000000000000020532 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/__init__.py0000644000175000017500000000000000000000000022631 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2302547 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/functional/0000755000175000017500000000000000000000000022674 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/functional/__init__.py0000644000175000017500000000000000000000000024773 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/functional/requirements.txt0000644000175000017500000000045200000000000026161 0ustar00coreycorey00000000000000# Additional requirements for functional tests # The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. psycopg2 PyMySQL>=0.6.2 # MIT License ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2302547 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/0000755000175000017500000000000000000000000021511 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/__init__.py0000644000175000017500000000451400000000000023626 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import time import mock from vmware_nsx.api_client import client as nsx_client from vmware_nsx.api_client import eventlet_client from vmware_nsx import extensions import vmware_nsx.plugin as neutron_plugin from vmware_nsx.plugins.nsx_v.vshield.common import ( VcnsApiClient as vcnsapi) from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.plugins.nsx_v.vshield import vcns import vmware_nsx.plugins.nsx_v.vshield.vcns_driver as vcnsdriver plugin = neutron_plugin.NsxV3Plugin api_client = nsx_client.NsxApiClient evt_client = eventlet_client.EventletApiClient vcns_class = vcns.Vcns vcns_driver = vcnsdriver.VcnsDriver vcns_api_helper = vcnsapi.VcnsApiHelper edge_manage_class = edge_utils.EdgeManager STUBS_PATH = os.path.join(os.path.dirname(__file__), 'etc') NSXEXT_PATH = os.path.dirname(extensions.__file__) NSXAPI_NAME = '%s.%s' % (api_client.__module__, api_client.__name__) PLUGIN_NAME = '%s.%s' % (plugin.__module__, plugin.__name__) CLIENT_NAME = '%s.%s' % (evt_client.__module__, evt_client.__name__) VCNS_NAME = '%s.%s' % (vcns_class.__module__, vcns_class.__name__) VCNS_DRIVER_NAME = '%s.%s' % (vcns_driver.__module__, vcns_driver.__name__) VCNSAPI_NAME = '%s.%s' % (vcns_api_helper.__module__, vcns_api_helper.__name__) EDGE_MANAGE_NAME = '%s.%s' % (edge_manage_class.__module__, edge_manage_class.__name__) # Mock for the tenacity retrying sleeping method mocked_retry_sleep = mock.patch.object(time, 'sleep') mocked_retry_sleep.start() def get_fake_conf(filename): return os.path.join(STUBS_PATH, filename) def nsx_method(method_name, module_name='nsxlib'): return '%s.%s.%s' % ('vmware_nsx', module_name, method_name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2302547 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/common_plugin/0000755000175000017500000000000000000000000024357 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/common_plugin/__init__.py0000644000175000017500000000000000000000000026456 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/common_plugin/common_v3.py0000644000175000017500000003714000000000000026636 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import decorator from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron_lib import constants class FixExternalNetBaseTest(object): """Base class providing utilities for handling tests which require updating a network to be external, which is not supported for the NSX-v3 and NSX-P plugins. """ def setUp(self, *args, **kwargs): self.original_subnet = self.subnet self.original_create_subnet = self._create_subnet self.original_network = self.network self.subnet_calls = [] super(FixExternalNetBaseTest, self).setUp(*args, **kwargs) def _set_net_external(self, net_id): # This action is not supported by the V3 plugin pass def _create_external_network(self): data = {'network': {'name': 'net1', 'router:external': 'True', 'tenant_id': 'tenant_one', 'provider:physical_network': 'stam'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) return network def external_subnet(self, network=None, **kwargs): # External subnet ,ust have dhcp disabled kwargs['enable_dhcp'] = False if network: return self.original_subnet(network=network, **kwargs) ext_net = self._create_external_network() return self.original_subnet(network=ext_net, **kwargs) def create_external_subnet(self, *args, **kwargs): kwargs['enable_dhcp'] = False return super(FixExternalNetBaseTest, self)._create_subnet( *args, **kwargs) def no_dhcp_subnet(self, *args, **kwargs): if 'enable_dhcp' in kwargs: return self.original_subnet(*args, **kwargs) return self.original_subnet(*args, enable_dhcp=False, **kwargs) def external_subnet_by_list(self, *args, **kwargs): if len(self.subnet_calls) > 0: result = self.subnet_calls[0](*args, **kwargs) del self.subnet_calls[0] else: # back to normal self.subnet = self.original_subnet result = self.subnet(*args, **kwargs) return result @contextlib.contextmanager def floatingip_with_assoc(self, port_id=None, fmt=None, fixed_ip=None, public_cidr='11.0.0.0/24', set_context=False, tenant_id=None, **kwargs): # Override super implementation to avoid changing the network to # external after creation with self._create_l3_ext_network() as ext_net,\ self.subnet(network=ext_net, cidr=public_cidr, set_context=set_context, tenant_id=tenant_id, enable_dhcp=False) as public_sub: private_port = None if port_id: private_port = self._show('ports', port_id) with test_plugin.optional_ctx( private_port, self.port, set_context=set_context, tenant_id=tenant_id) as private_port: with self.router(set_context=set_context, tenant_id=tenant_id) as r: sid = private_port['port']['fixed_ips'][0]['subnet_id'] private_sub = {'subnet': {'id': sid}} floatingip = None self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action( 'add', r['router']['id'], private_sub['subnet']['id'], None) floatingip = self._make_floatingip( fmt or self.fmt, public_sub['subnet']['network_id'], port_id=private_port['port']['id'], fixed_ip=fixed_ip, tenant_id=tenant_id, set_context=set_context, **kwargs) yield floatingip if floatingip: self._delete('floatingips', floatingip['floatingip']['id']) @contextlib.contextmanager def floatingip_no_assoc(self, private_sub, fmt=None, set_context=False, flavor_id=None, **kwargs): # override super code to create an external subnet in advanced with self.external_subnet(cidr='12.0.0.0/24') as public_sub: with self.floatingip_no_assoc_with_public_sub( private_sub, fmt, set_context, public_sub, flavor_id, **kwargs) as (f, r): # Yield only the floating ip object yield f # Override subnet/network creation in some tests to create external # networks immediately instead of updating it post creation, which the # v3 plugin does not support @decorator.decorator def with_external_subnet(f, *args, **kwargs): obj = args[0] obj.subnet = obj.external_subnet result = f(*args, **kwargs) obj.subnet = obj.original_subnet return result @decorator.decorator def with_disable_dhcp(f, *args, **kwargs): obj = args[0] obj.force_disable_dhcp = True result = f(*args, **kwargs) obj.force_disable_dhcp = False return result @decorator.decorator def with_disable_dhcp_once(f, *args, **kwargs): obj = args[0] obj.force_disable_dhcp = True obj.force_disable_dhcp_once = True result = f(*args, **kwargs) obj.force_disable_dhcp = False obj.force_disable_dhcp_once = False return result def init_subnet_calls(self, n): self.subnet_calls = [] for i in range(0, n - 1): self.subnet_calls.append(self.subnet) self.subnet_calls.append(self.external_subnet) def call_with_subnet_calls(self, f, *args, **kwargs): self.subnet = self.external_subnet_by_list result = f(*args, **kwargs) self.subnet = self.original_subnet return result @decorator.decorator def with_external_subnet_once(f, *args, **kwargs): obj = args[0] init_subnet_calls(obj, 1) return call_with_subnet_calls(obj, f, *args, **kwargs) @decorator.decorator def with_external_subnet_second_time(f, *args, **kwargs): obj = args[0] init_subnet_calls(obj, 2) return call_with_subnet_calls(obj, f, *args, **kwargs) @decorator.decorator def with_external_subnet_third_time(f, *args, **kwargs): obj = args[0] init_subnet_calls(obj, 3) return call_with_subnet_calls(obj, f, *args, **kwargs) @decorator.decorator def with_external_network(f, *args, **kwargs): obj = args[0] obj.network = obj.external_network obj.subnet = obj.external_subnet obj._create_subnet = obj.create_external_subnet result = f(*args, **kwargs) obj._create_subnet = obj.original_create_subnet obj.subnet = obj.original_subnet obj.network = obj.original_network return result # Override subnet creation in some tests to create a subnet with dhcp # disabled @decorator.decorator def with_no_dhcp_subnet(f, *args, **kwargs): obj = args[0] obj.subnet = obj.no_dhcp_subnet result = f(*args, **kwargs) obj.subnet = obj.original_subnet return result # TODO(annak): remove this when DHCPv6 is supported @decorator.decorator def with_force_slaac(f, *args, **kwargs): obj = args[0] obj.force_slaac = True result = f(*args, **kwargs) obj.force_slaac = False return result class NsxV3SubnetMixin(object): def setUp(self, *args, **kwargs): super(NsxV3SubnetMixin, self).setUp(*args, **kwargs) self.force_slaac = False self.force_disable_dhcp = False self.force_disable_dhcp_once = False def _test_create_subnet(self, network=None, expected=None, **kwargs): # Until DHCPv6 is supported, switch all test to slaac-only if (self.force_slaac and 'ipv6_ra_mode' in kwargs and 'ipv6_address_mode' in kwargs): kwargs['ipv6_ra_mode'] = constants.IPV6_SLAAC kwargs['ipv6_address_mode'] = constants.IPV6_SLAAC return super(NsxV3SubnetMixin, self)._test_create_subnet(network, expected, **kwargs) def _create_subnet(self, fmt, net_id, cidr, expected_res_status=None, **kwargs): if self.force_disable_dhcp: kwargs['enable_dhcp'] = False if self.force_disable_dhcp_once: self.force_disable_dhcp = False return super(NsxV3SubnetMixin, self)._create_subnet( fmt, net_id, cidr, expected_res_status, **kwargs) class NsxV3TestSubnets(NsxV3SubnetMixin, test_plugin.TestSubnetsV2): @with_disable_dhcp def test_list_subnets_filtering_by_project_id(self): super(NsxV3TestSubnets, self).test_list_subnets_filtering_by_project_id() @with_disable_dhcp def test_list_subnets_filtering_by_cidr_used_on_create(self): super(NsxV3TestSubnets, self).test_list_subnets_filtering_by_cidr_used_on_create() @with_disable_dhcp def test_list_subnets(self): super(NsxV3TestSubnets, self).test_list_subnets() @with_disable_dhcp def test_list_subnets_with_parameter(self): super(NsxV3TestSubnets, self).test_list_subnets_with_parameter() def test_create_subnet_ipv6_pd_gw_values(self): self.skipTest('Test not suited to the plugin DHCP code') def test_create_subnet_ipv6_slaac_with_port_not_found(self): self.skipTest('Test not suited to the plugin DHCP code') def test_bulk_create_subnet_ipv6_auto_addr_with_port_on_network(self): self.skipTest('No Multiple v6 subnets support yet') def test_create_subnet_dhcpv6_stateless_with_ip_already_allocated(self): self.skipTest('Test not suited to the plugin DHCP code') def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): self.skipTest('Test not suited to the plugin DHCP code') def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): self.skipTest('Test not suited to the plugin DHCP code') def test_delete_subnet_port_exists_owned_by_network(self): self.skipTest('Test not suited to the plugin DHCP code') def test_create_subnets_bulk_native_ipv6(self): self.skipTest('Multiple IPv6 subnets on one network is not supported') @with_disable_dhcp def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): super(NsxV3TestSubnets, self).test_update_subnet_inconsistent_ipv6_hostroute_dst_v4() @with_disable_dhcp def test_create_two_subnets(self): super(NsxV3TestSubnets, self).test_create_two_subnets() @with_disable_dhcp def test_create_subnets_bulk_emulated(self): super(NsxV3TestSubnets, self).test_create_subnets_bulk_emulated() @with_disable_dhcp def test_create_subnets_bulk_native(self): super(NsxV3TestSubnets, self).test_create_subnets_bulk_native() @with_disable_dhcp def test_get_subnets_count(self): super(NsxV3TestSubnets, self).test_get_subnets_count() @with_disable_dhcp def test_get_subnets_count_filter_by_project_id(self): super(NsxV3TestSubnets, self).test_get_subnets_count_filter_by_project_id() @with_disable_dhcp def test_get_subnets_count_filter_by_unknown_filter(self): super(NsxV3TestSubnets, self).test_get_subnets_count_filter_by_unknown_filter() @with_disable_dhcp def test_delete_subnet_dhcp_port_associated_with_other_subnets(self): super(NsxV3TestSubnets, self).test_get_subnets_count_filter_by_unknown_filter() @with_disable_dhcp def test_delete_subnet_with_other_subnet_on_network_still_in_use(self): super(NsxV3TestSubnets, self).\ test_delete_subnet_with_other_subnet_on_network_still_in_use() @with_force_slaac def test_create_subnet_ipv6_gw_values(self): super(NsxV3TestSubnets, self).test_create_subnet_ipv6_gw_values() @with_force_slaac def test_create_subnet_ipv6_out_of_cidr_global(self): super(NsxV3TestSubnets, self).test_create_subnet_ipv6_out_of_cidr_global() @with_disable_dhcp def test_update_subnet_inconsistent_ipv6_gatewayv4(self): super(NsxV3TestSubnets, self).test_update_subnet_inconsistent_ipv6_gatewayv4() @with_disable_dhcp def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): super(NsxV3TestSubnets, self).test_update_subnet_inconsistent_ipv6_hostroute_np_v4() def test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets(self): self.skipTest('Multiple fixed ips on a port are not supported') class NsxV3TestPorts(test_plugin.TestPortsV2): def test_create_port_with_ipv6_dhcp_stateful_subnet_in_fixed_ips(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_update_ip_address_only(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_update_port_with_new_ipv6_slaac_subnet_in_fixed_ips(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_update_port_mac_v6_slaac(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_requested_invalid_fixed_ips(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_requested_subnet_id_v4_and_v6_slaac(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_range_allocation(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_create_port_anticipating_allocation(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_update_port_add_additional_ip(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_delete_network_port_exists_owned_by_network_race(self): self.skipTest('Skip need to address in future') def test_delete_network_port_exists_owned_by_network_port_not_found(self): self.skipTest('Skip need to address in future') def test_delete_network_port_exists_owned_by_network(self): self.skipTest('Skip need to address in future') def test_duplicate_mac_generation(self): self.skipTest('No DHCP v6 Support yet') @with_disable_dhcp def test_update_port_update_ip(self): return super(NsxV3TestPorts, self).test_update_port_update_ip() def test_create_router_port_ipv4_and_ipv6_slaac_no_fixed_ips(self): self.skipTest('No DHCP v6 Support yet') def test_ip_allocation_for_ipv6_2_subnet_slaac_mode(self): self.skipTest('Only one ipv6 subnet per network is supported') def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): self.skipTest('Only one ipv6 subnet per network is supported') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/common_plugin/test_housekeeper.py0000644000175000017500000001354700000000000030321 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from neutron_lib import exceptions as n_exc from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.common.housekeeper import housekeeper class TestJob1(base_job.BaseJob): def __init__(self, global_readonly, readonly_jobs): super(TestJob1, self).__init__(global_readonly, readonly_jobs) def get_name(self): return 'test_job1' def get_project_plugin(self, plugin): return 'Dummy' def get_description(self): return 'test' def run(self, context, readonly=False): pass class TestJob2(TestJob1): def get_name(self): return 'test_job2' class TestHousekeeper(base.BaseTestCase): def setUp(self): self.jobs = ['test_job1', 'test_job2'] self.readonly_jobs = ['test_job1'] self.readonly = False self.housekeeper = housekeeper.NsxHousekeeper( hk_ns='stevedore.test.extension', hk_jobs=self.jobs, hk_readonly=self.readonly, hk_readonly_jobs=self.readonly_jobs) self.job1 = TestJob1(self.readonly, self.readonly_jobs) self.job2 = TestJob2(self.readonly, self.readonly_jobs) self.housekeeper.jobs = {'test_job1': self.job1, 'test_job2': self.job2} self.context = mock.Mock() self.context.session = mock.Mock() super(TestHousekeeper, self).setUp() def test_run_job_readonly(self): with mock.patch.object(self.job1, 'run') as run1,\ mock.patch.object(self.job2, 'run') as run2: self.housekeeper.run(self.context, 'test_job1', readonly=True) run1.assert_called_with(mock.ANY, readonly=True) self.housekeeper.run(self.context, 'test_job2', readonly=True) run2.assert_called_with(mock.ANY, readonly=True) def test_run_job_readwrite(self): with mock.patch.object(self.job1, 'run') as run1,\ mock.patch.object(self.job2, 'run') as run2: # job1 is configured as a readonly job so this should fail self.assertRaises( n_exc.ObjectNotFound, self.housekeeper.run, self.context, 'test_job1', readonly=False) self.assertFalse(run1.called) # job2 should run self.housekeeper.run(self.context, 'test_job2', readonly=False) run2.assert_called_with(mock.ANY, readonly=False) def test_run_all_readonly(self): with mock.patch.object(self.job1, 'run') as run1,\ mock.patch.object(self.job2, 'run') as run2: self.housekeeper.run(self.context, 'all', readonly=True) run1.assert_called_with(mock.ANY, readonly=True) run2.assert_called_with(mock.ANY, readonly=True) def test_run_all_readwrite(self): with mock.patch.object(self.job1, 'run') as run1,\ mock.patch.object(self.job2, 'run') as run2: self.housekeeper.run(self.context, 'all', readonly=False) # job1 is configured as a readonly job so it was not called self.assertFalse(run1.called) # job2 should run run2.assert_called_with(mock.ANY, readonly=False) class TestHousekeeperReadOnly(TestHousekeeper): def setUp(self): super(TestHousekeeperReadOnly, self).setUp() self.housekeeper.global_readonly = True def test_run_job_readonly(self): with mock.patch.object(self.job1, 'run') as run1,\ mock.patch.object(self.job2, 'run') as run2: self.housekeeper.run(self.context, 'test_job1', readonly=True) run1.assert_called_with(mock.ANY, readonly=True) self.housekeeper.run(self.context, 'test_job2', readonly=True) run2.assert_called_with(mock.ANY, readonly=True) def test_run_job_readwrite(self): with mock.patch.object(self.job1, 'run') as run1,\ mock.patch.object(self.job2, 'run') as run2: # job1 is configured as a readonly job so this should fail self.assertRaises( n_exc.ObjectNotFound, self.housekeeper.run, self.context, 'test_job1', readonly=False) self.assertFalse(run1.called) # global readonly flag so job2 should also fail self.assertRaises( n_exc.ObjectNotFound, self.housekeeper.run, self.context, 'test_job2', readonly=False) self.assertFalse(run2.called) def test_run_all_readonly(self): with mock.patch.object(self.job1, 'run') as run1,\ mock.patch.object(self.job2, 'run') as run2: self.housekeeper.run(self.context, 'all', readonly=True) run1.assert_called_with(mock.ANY, readonly=True) run2.assert_called_with(mock.ANY, readonly=True) def test_run_all_readwrite(self): with mock.patch.object(self.job1, 'run') as run1,\ mock.patch.object(self.job2, 'run') as run2: # global readonly flag so 'all' should fail self.assertRaises( n_exc.ObjectNotFound, self.housekeeper.run, self.context, 'all', readonly=False) self.assertFalse(run1.called) self.assertFalse(run2.called) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2302547 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/db/0000755000175000017500000000000000000000000022076 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/db/__init__.py0000644000175000017500000000000000000000000024175 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/db/test_migrations.py0000644000175000017500000001004600000000000025664 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.tests.functional.db import test_migrations from neutron.tests.unit import testlib_api from vmware_nsx.db.migration import alembic_migrations from vmware_nsx.db.migration.models import head #TODO(abhiraut): Remove this list from here once *aaS repos forms its # own list. # Add *aaS tables to EXTERNAL_TABLES since they should not be # tested. LBAAS_TABLES = { 'nsxv_edge_monitor_mappings', 'nsxv_edge_pool_mappings', 'nsxv_edge_vip_mappings', # LBaaS v2 tables 'lbaas_healthmonitors', 'lbaas_l7policies', 'lbaas_l7rules', 'lbaas_listeners', 'lbaas_loadbalancer_statistics', 'lbaas_loadbalanceragentbindings', 'lbaas_loadbalancers', 'lbaas_members', 'lbaas_pools', 'lbaas_sessionpersistences', 'lbaas_sni', } L2GW_TABLES = { 'l2gw_alembic_version', 'physical_locators', 'physical_switches', 'physical_ports', 'logical_switches', 'ucast_macs_locals', 'ucast_macs_remotes', 'vlan_bindings', 'l2gatewayconnections', 'l2gatewayinterfaces', 'l2gatewaydevices', 'l2gateways', 'pending_ucast_macs_remotes' } SFC_TABLES = { 'sfc_flow_classifier_l7_parameters', 'sfc_flow_classifiers', 'sfc_port_chain_parameters', 'sfc_service_function_params', 'sfc_port_pair_group_params', 'sfc_chain_classifier_associations', 'sfc_port_pairs', 'sfc_chain_group_associations', 'sfc_port_pair_groups', 'sfc_port_chains', 'sfc_uuid_intid_associations', 'sfc_path_port_associations', 'sfc_portpair_details', 'sfc_path_nodes', } TAAS_TABLES = { 'tap_services', 'tap_flows', 'tap_id_associations', } FWAAS_TABLES = { 'firewall_router_associations', 'cisco_firewall_associations', } VPNAAS_TABLES = { 'vpn_endpoint_groups', 'vpn_endpoints', } # EXTERNAL_TABLES should contain all names of tables that are not related to # current repo. EXTERNAL_TABLES = (set(external.TABLES) | LBAAS_TABLES | VPNAAS_TABLES | L2GW_TABLES | SFC_TABLES | TAAS_TABLES | FWAAS_TABLES) class _TestModelsMigrationsFoo(test_migrations._TestModelsMigrations): def db_sync(self, engine): cfg.CONF.set_override('connection', engine.url, group='database') for conf in migration.get_alembic_configs(): self.alembic_config = conf self.alembic_config.neutron_config = cfg.CONF migration.do_alembic_command(conf, 'upgrade', 'heads') def get_metadata(self): return head.get_metadata() def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name.startswith('alembic') or name == alembic_migrations.VERSION_TABLE or name in EXTERNAL_TABLES): return False if type_ == 'index' and reflected and name.startswith("idx_autoinc_"): return False return True class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, _TestModelsMigrationsFoo, testlib_api.SqlTestCaseLight): pass class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin, _TestModelsMigrationsFoo, testlib_api.SqlTestCaseLight): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2302547 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/dvs/0000755000175000017500000000000000000000000022305 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/dvs/__init__.py0000644000175000017500000000000000000000000024404 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/dvs/test_plugin.py0000644000175000017500000005174400000000000025227 0ustar00coreycorey00000000000000# Copyright (c) 2014 VMware. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron_lib import context from oslo_config import cfg from oslo_utils import uuidutils from neutron.tests import base import neutron.tests.unit.db.test_db_base_plugin_v2 as test_plugin from neutron_lib.api.definitions import portbindings from neutron_lib import exceptions as exp from neutron_lib.plugins import directory from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.dvs import dvs from vmware_nsx.dvs import dvs_utils PLUGIN_NAME = 'vmware_nsx.plugin.NsxDvsPlugin' class fake_session(object): def __init__(self, *ret): self._vim = mock.Mock() def invoke_api(self, *args, **kwargs): pass def wait_for_task(self, task): pass def vim(self): return self._vim class DvsTestCase(base.BaseTestCase): @mock.patch.object(dvs_utils, 'dvs_create_session', return_value=fake_session()) @mock.patch.object(dvs.SingleDvsManager, '_get_dvs_moref_by_name', return_value=mock.MagicMock()) def setUp(self, mock_moref, mock_session): super(DvsTestCase, self).setUp() cfg.CONF.set_override('dvs_name', 'fake_dvs', group='dvs') self._dvs = dvs.SingleDvsManager() self.assertEqual(mock_moref.return_value, self._dvs._dvs_moref) mock_moref.assert_called_once_with(mock_session.return_value, 'fake_dvs') @mock.patch.object(dvs_utils, 'dvs_create_session', return_value=fake_session()) def test_dvs_not_found(self, mock_session): self.assertRaises(nsx_exc.DvsNotFound, dvs.SingleDvsManager) @mock.patch.object(dvs.DvsManager, '_get_port_group_spec', return_value='fake-spec') def test_add_port_group(self, fake_get_spec): self._dvs.add_port_group('fake-uuid', vlan_tag=7) fake_get_spec.assert_called_once_with('fake-uuid', 7, trunk_mode=False) @mock.patch.object(dvs.DvsManager, '_get_port_group_spec', return_value='fake-spec') def test_add_port_group_with_exception(self, fake_get_spec): with ( mock.patch.object(self._dvs.dvs._session, 'wait_for_task', side_effect=exp.NeutronException()) ): self.assertRaises(exp.NeutronException, self._dvs.add_port_group, 'fake-uuid', 7, trunk_mode=False) fake_get_spec.assert_called_once_with('fake-uuid', 7, trunk_mode=False) @mock.patch.object(dvs.DvsManager, '_net_id_to_moref', return_value='fake-moref') def test_delete_port_group(self, fake_get_moref): self._dvs.delete_port_group('fake-uuid') fake_get_moref.assert_called_once_with(mock.ANY, 'fake-uuid') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref', return_value='fake-moref') def test_delete_port_group_with_exception(self, fake_get_moref): with ( mock.patch.object(self._dvs.dvs._session, 'wait_for_task', side_effect=exp.NeutronException()) ): self.assertRaises(exp.NeutronException, self._dvs.delete_port_group, 'fake-uuid') fake_get_moref.assert_called_once_with(mock.ANY, 'fake-uuid') @mock.patch.object(dvs.DvsManager, '_update_vxlan_port_groups_config') @mock.patch.object(dvs.DvsManager, '_get_port_group_spec', return_value='fake-spec') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref', return_value='fake-moref') def test_update_vxlan_net_group_conf(self, fake_get_moref, fake_get_spec, fake_update_vxlan): net_id = 'vxlan-uuid' vlan = 7 self._dvs.add_port_group(net_id, vlan) self._dvs.net_id_to_moref(net_id) fake_get_moref.assert_called_once_with(mock.ANY, net_id) fake_get_spec.assert_called_once_with(net_id, vlan, trunk_mode=False) @mock.patch.object(dvs.DvsManager, '_update_net_port_groups_config') @mock.patch.object(dvs.DvsManager, '_get_port_group_spec', return_value='fake-spec') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref', return_value='dvportgroup-fake-moref') def test_update_flat_net_conf(self, fake_get_moref, fake_get_spec, fake_update_net): net_id = 'flat-uuid' vlan = 7 self._dvs.add_port_group(net_id, vlan) self._dvs.net_id_to_moref(net_id) fake_get_moref.assert_called_once_with(mock.ANY, net_id) fake_get_spec.assert_called_once_with(net_id, vlan, trunk_mode=False) class NeutronSimpleDvsTestCase(test_plugin.NeutronDbPluginV2TestCase): @mock.patch.object(dvs_utils, 'dvs_create_session', return_value=fake_session()) @mock.patch.object(dvs.SingleDvsManager, '_get_dvs_moref_by_name', return_value=mock.MagicMock()) def setUp(self, mock_moref, mock_session, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None, **kwargs): # Ensure that DVS is enabled cfg.CONF.set_override('host_ip', 'fake_ip', group='dvs') cfg.CONF.set_override('host_username', 'fake_user', group='dvs') cfg.CONF.set_override('host_password', 'fake_password', group='dvs') cfg.CONF.set_override('dvs_name', 'fake_dvs', group='dvs') super(NeutronSimpleDvsTestCase, self).setUp(plugin=plugin) self._plugin = directory.get_plugin() class NeutronSimpleDvsTest(NeutronSimpleDvsTestCase): def _create_and_delete_dvs_network(self, network_type='flat', vlan_tag=0, trunk_mode=False): params = {'provider:network_type': network_type, 'provider:physical_network': 'fake-moid', 'name': 'fake-name'} if network_type == 'vlan': params['provider:segmentation_id'] = vlan_tag if trunk_mode: params['vlan_transparent'] = True params['arg_list'] = tuple(params.keys()) with mock.patch.object(self._plugin._dvs, 'add_port_group') as mock_add,\ mock.patch.object(self._plugin._dvs, 'delete_port_group') as mock_delete,\ mock.patch.object(dvs.DvsManager, 'add_port_group') as mock_dvs_add,\ mock.patch.object(dvs.DvsManager, 'delete_port_group'),\ mock.patch.object(dvs.DvsManager, 'get_dvs_moref_by_name', return_value=mock.MagicMock() ) as mock_dvs_moref,\ mock.patch.object(dvs.DvsManager, '_get_trunk_vlan_spec') as mock_trunk_vlan: with self.network(**params) as network: ctx = context.get_admin_context() id = network['network']['id'] dvs_id = '%s-%s' % (network['network']['name'], id) binding = nsx_db.get_network_bindings(ctx.session, id) self.assertIsNotNone(binding) if network_type == 'flat': self.assertEqual('flat', binding[0].binding_type) self.assertEqual(0, binding[0].vlan_id) self.assertEqual('fake-moid', binding[0].phy_uuid) elif network_type == 'vlan': self.assertEqual('vlan', binding[0].binding_type) self.assertEqual(vlan_tag, binding[0].vlan_id) self.assertEqual('fake-moid', binding[0].phy_uuid) elif network_type == 'portgroup': self.assertEqual('portgroup', binding[0].binding_type) self.assertEqual(0, binding[0].vlan_id) self.assertEqual('fake-moid', binding[0].phy_uuid) else: self.fail() if network_type != 'portgroup': mock_dvs_add.assert_called_once_with( mock_dvs_moref.return_value, dvs_id, vlan_tag, trunk_mode=trunk_mode) else: mock_add.call_count = 0 mock_delete.call_count = 0 if trunk_mode: mock_trunk_vlan.called_once_with(start=0, end=4094) else: mock_trunk_vlan.call_count = 0 def test_create_and_delete_dvs_network_tag(self): self._create_and_delete_dvs_network(network_type='vlan', vlan_tag=7) def test_create_and_delete_dvs_network_flat(self): self._create_and_delete_dvs_network() def test_create_and_delete_dvs_network_flat_vlan_transparent(self): self._create_and_delete_dvs_network(trunk_mode=True) @mock.patch.object(dvs.DvsManager, 'get_port_group_info') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref') def test_create_and_delete_dvs_network_portgroup(self, fake_moref, fake_pg_info): fake_pg_info.return_value = {'name': 'fake-name'}, fake_moref self._create_and_delete_dvs_network(network_type='portgroup') self.assertTrue(fake_pg_info.call_count) @mock.patch.object(dvs.DvsManager, 'get_port_group_info') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref') def test_create_and_delete_dvs_network_portgroup_vlan(self, fake_get_moref, fake_pg_info): fake_pg_info.return_value = {'name': 'fake-name'}, fake_get_moref self._create_and_delete_dvs_network(network_type='portgroup', vlan_tag=7) self.assertTrue(fake_pg_info.call_count) def test_create_dvs_vlan_network_no_physical_network(self): params = {'provider:network_type': 'vlan', 'provider:segmentation_id': 10, 'admin_state_up': True, 'name': 'fake-name', 'tenant_id': 'fake_tenant', 'shared': False, 'port_security_enabled': False} params['arg_list'] = tuple(params.keys()) ctx = context.get_admin_context() with mock.patch.object(self._plugin._dvs, 'add_port_group'),\ mock.patch.object(dvs.DvsManager, 'add_port_group'),\ mock.patch.object(dvs.DvsManager, 'get_dvs_moref_by_name', return_value=mock.MagicMock()): network = self._plugin.create_network(ctx, {'network': params}) # Should work and take the default dvs self.assertIn('id', network) def test_create_dvs_pg_network_no_physical_network(self): params = {'provider:network_type': 'portgroup', 'provider:segmentation_id': 10, 'admin_state_up': True, 'name': 'fake-name', 'tenant_id': 'fake_tenant', 'shared': False, 'port_security_enabled': False} params['arg_list'] = tuple(params.keys()) ctx = context.get_admin_context() with mock.patch.object(self._plugin._dvs, 'add_port_group'),\ mock.patch.object(dvs.DvsManager, 'add_port_group'),\ mock.patch.object(dvs.DvsManager, 'get_dvs_moref_by_name', return_value=mock.MagicMock()): self.assertRaises(exp.InvalidInput, self._plugin.create_network, ctx, {'network': params}) def test_create_and_delete_dvs_port(self): params = {'provider:network_type': 'vlan', 'provider:physical_network': 'dvs', 'provider:segmentation_id': 7} params['arg_list'] = tuple(params.keys()) with mock.patch.object(self._plugin._dvs, 'add_port_group'),\ mock.patch.object(self._plugin._dvs, 'delete_port_group'),\ mock.patch.object(dvs.DvsManager, 'get_dvs_moref_by_name'),\ mock.patch.object(dvs.DvsManager, 'add_port_group'),\ mock.patch.object(dvs.DvsManager, 'delete_port_group'): with self.network(**params) as network,\ self.subnet(network) as subnet,\ self.port(subnet) as port: self.assertEqual('dvs', port['port'][portbindings.VIF_TYPE]) port_status = port['port']['status'] self.assertEqual(port_status, 'ACTIVE') def test_create_dvs_port_vlan_no_port_security(self): params = {'provider:network_type': 'vlan', 'provider:physical_network': 'dvs', 'provider:segmentation_id': 7} params['arg_list'] = tuple(params.keys()) with mock.patch.object(self._plugin._dvs, 'add_port_group'),\ mock.patch.object(self._plugin._dvs, 'delete_port_group'),\ mock.patch.object(dvs.DvsManager, 'get_dvs_moref_by_name'),\ mock.patch.object(dvs.DvsManager, 'add_port_group'),\ mock.patch.object(dvs.DvsManager, 'delete_port_group'): with self.network(**params) as network,\ self.subnet(network) as subnet,\ self.port(subnet) as port: self.assertEqual('dvs', port['port'][portbindings.VIF_TYPE]) port_security = port['port']['port_security_enabled'] security_groups = port['port']['security_groups'] self.assertEqual(port_security, False) self.assertEqual(security_groups, []) def test_update_dvs_port_vlan_no_port_security(self): params = {'provider:network_type': 'vlan', 'provider:physical_network': 'dvs', 'provider:segmentation_id': 7} params['arg_list'] = tuple(params.keys()) with mock.patch.object(self._plugin._dvs, 'add_port_group'),\ mock.patch.object(self._plugin._dvs, 'delete_port_group'),\ mock.patch.object(dvs.DvsManager, 'get_dvs_moref_by_name'),\ mock.patch.object(dvs.DvsManager, 'add_port_group'),\ mock.patch.object(dvs.DvsManager, 'delete_port_group'): with self.network(**params) as network,\ self.subnet(network) as subnet,\ self.port(subnet) as port: self.assertEqual('dvs', port['port'][portbindings.VIF_TYPE]) data = {'port': {'port_security_enabled': True}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertIn('NeutronError', res) def test_create_router_only_dvs_backend(self): data = {'router': {'tenant_id': 'whatever'}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = {'network_id': 'whatever'} self.assertRaises(exp.BadRequest, self._plugin.create_router, context.get_admin_context(), data) def test_dvs_get_id(self): id = uuidutils.generate_uuid() net = {'name': '', 'id': id} expected = id self.assertEqual(expected, self._plugin._dvs_get_id(net)) net = {'name': 'pele', 'id': id} expected = '%s-%s' % ('pele', id) self.assertEqual(expected, self._plugin._dvs_get_id(net)) name = 'X' * 500 net = {'name': name, 'id': id} expected = '%s-%s' % (name[:43], id) self.assertEqual(expected, self._plugin._dvs_get_id(net)) def test_update_dvs_network(self): """Test update of a DVS network """ params = {'provider:network_type': 'flat', 'admin_state_up': True, 'name': 'test_net', 'tenant_id': 'fake_tenant', 'shared': False, 'port_security_enabled': False} with mock.patch.object(self._plugin._dvs, 'add_port_group'): ctx = context.get_admin_context() # create the initial network network = self._plugin.create_network(ctx, {'network': params}) id = network['id'] # update the different attributes of the DVS network # cannot update the provider type self.assertRaises( exp.InvalidInput, self._plugin.update_network, ctx, id, {'network': {'provider:network_type': 'vlan'}}) # update the Shared attribute self.assertEqual(False, network['shared']) updated_net = self._plugin.update_network( ctx, id, {'network': {'shared': True}}) self.assertEqual(True, updated_net['shared']) # Update the description attribute self.assertIsNone(network['description']) updated_net = self._plugin.update_network( ctx, id, {'network': {'description': 'test'}}) self.assertEqual('test', updated_net['description']) # update the port security attribute self.assertEqual(False, network['port_security_enabled']) updated_net = self._plugin.update_network( ctx, id, {'network': {'port_security_enabled': True}}) self.assertEqual(True, updated_net['port_security_enabled']) @mock.patch.object(dvs.DvsManager, 'get_port_group_info') @mock.patch.object(dvs.DvsManager, '_net_id_to_moref') def test_create_and_delete_portgroup_network_invalid_name(self, fake_get_moref, fake_pg_info): fake_pg_info.return_value = {'name': 'invalid-name'}, fake_get_moref data = {'network': {'provider:network_type': 'portgroup', 'name': 'fake-name', 'admin_state_up': True}} self.assertRaises(exp.BadRequest, self._plugin.create_network, context.get_admin_context(), data) def test_create_vlan_network_fail_duplicate_dvs(self): params = {'provider:network_type': 'vlan', 'admin_state_up': True, 'name': 'test_net', 'tenant_id': 'fake_tenant', 'shared': False, 'provider:physical_network': 'fake-moid', 'provider:segmentation_id': 7, 'port_security_enabled': False} with mock.patch.object(self._plugin._dvs, 'add_port_group'),\ mock.patch.object(dvs.DvsManager, 'add_port_group'),\ mock.patch.object(dvs.DvsManager, 'get_dvs_moref_by_name', return_value=mock.MagicMock()): ctx = context.get_admin_context() self._plugin.create_network(ctx, {'network': params}) self.assertRaises(exp.InvalidInput, self._plugin.create_network, ctx, {'network': params}) def test_create_external_network_fail(self): params = {'provider:network_type': 'vlan', 'admin_state_up': True, 'name': 'test_net', 'tenant_id': 'fake_tenant', 'router:external': True, 'shared': False, 'provider:physical_network': 'fake-moid', 'provider:segmentation_id': 7, 'port_security_enabled': False} with mock.patch.object(self._plugin._dvs, 'add_port_group'),\ mock.patch.object(dvs.DvsManager, 'add_port_group'),\ mock.patch.object(dvs.DvsManager, 'get_dvs_moref_by_name', return_value=mock.MagicMock()): ctx = context.get_admin_context() self.assertRaises(exp.InvalidInput, self._plugin.create_network, ctx, {'network': params}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/dvs/test_utils.py0000644000175000017500000000474100000000000025064 0ustar00coreycorey00000000000000# Copyright (c) 2014 VMware. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_vmware import api from neutron.tests import base from vmware_nsx.dvs import dvs_utils class DvsUtilsTestCase(base.BaseTestCase): def test_default_configuration(self): self.assertFalse(dvs_utils.dvs_is_enabled()) def _dvs_fake_cfg_set(self): cfg.CONF.set_override('host_ip', 'fake_host_ip', group='dvs') cfg.CONF.set_override('host_username', 'fake_host_user_name', group='dvs') cfg.CONF.set_override('host_password', 'fake_host_password', group='dvs') cfg.CONF.set_override('dvs_name', 'fake_dvs', group='dvs') cfg.CONF.set_override('host_port', '443', group='dvs') cfg.CONF.set_override('ca_file', 'cacert', group='dvs') cfg.CONF.set_override('insecure', False, group='dvs') def test_dvs_set(self): self._dvs_fake_cfg_set() self.assertTrue(dvs_utils.dvs_is_enabled()) @mock.patch.object(api.VMwareAPISession, '__init__', return_value=None) def test_dvs_create_session(self, fake_init): dvs_utils.dvs_create_session() fake_init.assert_called_once_with(cfg.CONF.dvs.host_ip, cfg.CONF.dvs.host_username, cfg.CONF.dvs.host_password, cfg.CONF.dvs.api_retry_count, cfg.CONF.dvs.task_poll_interval, port=cfg.CONF.dvs.host_port, cacert=cfg.CONF.dvs.ca_file, insecure=cfg.CONF.dvs.insecure) def test_dvs_name_get(self): cfg.CONF.set_override('dvs_name', 'fake-dvs', group='dvs') self.assertEqual('fake-dvs', dvs_utils.dvs_name_get()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2342546 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/0000755000175000017500000000000000000000000022264 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_get_gwservice.json0000644000175000017500000000056600000000000027011 0ustar00coreycorey00000000000000{ "display_name": "%(display_name)s", "_href": "/ws.v1/gateway-service/%(uuid)s", "tags": %(tags_json)s, "_schema": "/ws.v1/schema/L2GatewayServiceConfig", "gateways": [ { "transport_node_uuid": "%(transport_node_uuid)s", "type": "L2Gateway", "device_id": "%(device_id)s" } ], "type": "L2GatewayServiceConfig", "uuid": "%(uuid)s" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_get_lqueue.json0000644000175000017500000000053500000000000026307 0ustar00coreycorey00000000000000{ "display_name": "%(display_name)s", "uuid": "%(uuid)s", "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalQueueConfig", "dscp": "%(dscp)s", "max_bandwidth_rate": "%(max_bandwidth_rate)s", "min_bandwidth_rate": "%(min_bandwidth_rate)s", "qos_marking": "%(qos_marking)s", "_href": "/ws.v1/lqueue/%(uuid)s" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_get_lrouter.json0000644000175000017500000000162400000000000026503 0ustar00coreycorey00000000000000{ "display_name": "%(display_name)s", %(distributed_json)s "uuid": "%(uuid)s", "tags": %(tags_json)s, "routing_config": { "type": "SingleDefaultRouteImplicitRoutingConfig", "_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig", "default_route_next_hop": { "type": "RouterNextHop", "_schema": "/ws.v1/schema/RouterNextHop", "gateway_ip_address": "%(default_next_hop)s" } }, "_schema": "/ws.v1/schema/LogicalRouterConfig", "_relations": { "LogicalRouterStatus": { "_href": "/ws.v1/lrouter/%(uuid)s/status", "lport_admin_up_count": %(lport_count)d, "_schema": "/ws.v1/schema/LogicalRouterStatus", "lport_count": %(lport_count)d, "fabric_status": %(status)s, "type": "LogicalRouterStatus", "lport_link_up_count": %(lport_count)d } }, "type": "LogicalRouterConfig", "_href": "/ws.v1/lrouter/%(uuid)s" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_get_lrouter_lport.json0000644000175000017500000000065100000000000027722 0ustar00coreycorey00000000000000{ "display_name": "%(display_name)s", "admin_status_enabled": "%(admin_status_enabled)s", "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s", "tags": [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"}, {"scope": "os_tid", "tag": "%(tenant_id)s"}], "ip_addresses": %(ip_addresses_json)s, "_schema": "/ws.v1/schema/LogicalRouterPortConfig", "type": "LogicalRouterPortConfig", "uuid": "%(uuid)s" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_get_lrouter_lport_att.json0000644000175000017500000000034500000000000030572 0ustar00coreycorey00000000000000{ "LogicalPortAttachment": { %(peer_port_href_field)s %(peer_port_uuid_field)s %(l3_gateway_service_uuid_field)s %(vlan_id)s "type": "%(type)s", "schema": "/ws.v1/schema/%(type)s" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_get_lrouter_nat.json0000644000175000017500000000017700000000000027347 0ustar00coreycorey00000000000000{ "_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s", "type": "%(type)s", "match": %(match_json)s, "uuid": "%(uuid)s" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_get_lswitch.json0000644000175000017500000000070600000000000026464 0ustar00coreycorey00000000000000{"display_name": "%(display_name)s", "_href": "/ws.v1/lswitch/%(uuid)s", "_schema": "/ws.v1/schema/LogicalSwitchConfig", "_relations": {"LogicalSwitchStatus": {"fabric_status": %(status)s, "type": "LogicalSwitchStatus", "lport_count": %(lport_count)d, "_href": "/ws.v1/lswitch/%(uuid)s/status", "_schema": "/ws.v1/schema/LogicalSwitchStatus"}}, "type": "LogicalSwitchConfig", "tags": %(tags_json)s, "uuid": "%(uuid)s"} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_get_lswitch_lport.json0000644000175000017500000000170600000000000027705 0ustar00coreycorey00000000000000{"display_name": "%(display_name)s", "_relations": {"LogicalPortStatus": {"type": "LogicalSwitchPortStatus", "admin_status_enabled": true, "fabric_status_up": %(status)s, "link_status_up": %(status)s, "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s/status", "_schema": "/ws.v1/schema/LogicalSwitchPortStatus"}, "LogicalSwitchConfig": {"uuid": "%(ls_uuid)s"}, "LogicalPortAttachment": { "type": "%(att_type)s", %(att_info_json)s "schema": "/ws.v1/schema/%(att_type)s" } }, "tags": [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"}, {"scope": "vm_id", "tag": "%(neutron_device_id)s"}, {"scope": "os_tid", "tag": "%(tenant_id)s"}], "uuid": "%(uuid)s", "admin_status_enabled": "%(admin_status_enabled)s", "type": "LogicalSwitchPortConfig", "_schema": "/ws.v1/schema/LogicalSwitchPortConfig", "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_get_lswitch_lport_att.json0000644000175000017500000000016500000000000030553 0ustar00coreycorey00000000000000{ "LogicalPortAttachment": { "type": "%(att_type)s", "schema": "/ws.v1/schema/%(att_type)s" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_get_lswitch_lport_status.json0000644000175000017500000000127300000000000031307 0ustar00coreycorey00000000000000{"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s", "lswitch": {"display_name": "%(ls_name)s", "uuid": "%(ls_uuid)s", "tags": [ {"scope": "os_tid", "tag": "%(ls_tenant_id)s"} ], "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalSwitchConfig", "port_isolation_enabled": false, "transport_zones": [ {"zone_uuid": "%(ls_zone_uuid)s", "transport_type": "stt"} ], "_href": "/ws.v1/lswitch/%(ls_uuid)s"}, "link_status_up": false, "_schema": "/ws.v1/schema/LogicalSwitchPortStatus", "admin_status_enabled": true, "fabric_status_up": true, "link_status_up": true, "type": "LogicalSwitchPortStatus" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_get_security_profile.json0000644000175000017500000000064600000000000030401 0ustar00coreycorey00000000000000{ "display_name": "%(display_name)s", "_href": "/ws.v1/security-profile/%(uuid)s", "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}, {"scope": "nova_spid", "tag": "%(nova_spid)s"}], "logical_port_egress_rules": %(logical_port_egress_rules_json)s, "_schema": "/ws.v1/schema/SecurityProfileConfig", "logical_port_ingress_rules": %(logical_port_ingress_rules_json)s, "uuid": "%(uuid)s" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_post_gwservice.json0000644000175000017500000000046300000000000027213 0ustar00coreycorey00000000000000{ "display_name": "%(display_name)s", "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}], "gateways": [ { "transport_node_uuid": "%(transport_node_uuid)s", "device_id": "%(device_id)s", "type": "L2Gateway" } ], "type": "L2GatewayServiceConfig", "uuid": "%(uuid)s" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_post_lqueue.json0000644000175000017500000000053500000000000026515 0ustar00coreycorey00000000000000{ "display_name": "%(display_name)s", "uuid": "%(uuid)s", "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalQueueConfig", "dscp": "%(dscp)s", "max_bandwidth_rate": "%(max_bandwidth_rate)s", "min_bandwidth_rate": "%(min_bandwidth_rate)s", "qos_marking": "%(qos_marking)s", "_href": "/ws.v1/lqueue/%(uuid)s" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_post_lrouter.json0000644000175000017500000000114300000000000026705 0ustar00coreycorey00000000000000{ "display_name": "%(display_name)s", %(distributed_json)s "uuid": "%(uuid)s", "tags": [ { "scope": "os_tid", "tag": "%(tenant_id)s" } ], "routing_config": { "type": "SingleDefaultRouteImplicitRoutingConfig", "_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig", "default_route_next_hop": { "type": "RouterNextHop", "_schema": "/ws.v1/schema/RouterNextHop", "gateway_ip_address": "%(default_next_hop)s" } }, "_schema": "/ws.v1/schema/LogicalRouterConfig", "type": "LogicalRouterConfig", "_href": "/ws.v1/lrouter/%(uuid)s" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_post_lrouter_lport.json0000644000175000017500000000050000000000000030121 0ustar00coreycorey00000000000000{ "display_name": "%(display_name)s", "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s", "_schema": "/ws.v1/schema/LogicalRouterPortConfig", "mac_address": "00:00:00:00:00:00", "admin_status_enabled": true, "ip_addresses": %(ip_addresses_json)s, "type": "LogicalRouterPortConfig", "uuid": "%(uuid)s" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_post_lrouter_nat.json0000644000175000017500000000017700000000000027555 0ustar00coreycorey00000000000000{ "_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s", "type": "%(type)s", "match": %(match_json)s, "uuid": "%(uuid)s" }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_post_lswitch.json0000644000175000017500000000057100000000000026672 0ustar00coreycorey00000000000000{ "display_name": "%(display_name)s", "uuid": "%(uuid)s", "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}], "type": "LogicalSwitchConfig", "_schema": "/ws.v1/schema/LogicalSwitchConfig", "port_isolation_enabled": false, "transport_zones": [ {"zone_uuid": "%(zone_uuid)s", "transport_type": "stt"}], "_href": "/ws.v1/lswitch/%(uuid)s" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_post_lswitch_lport.json0000644000175000017500000000100600000000000030104 0ustar00coreycorey00000000000000{ "display_name": "%(uuid)s", "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s", "security_profiles": [], "tags": [{"scope": "q_port_id", "tag": "%(neutron_port_id)s"}, {"scope": "vm_id", "tag": "%(neutron_device_id)s"}, {"scope": "os_tid", "tag": "%(tenant_id)s"}], "portno": 1, "queue_uuid": null, "_schema": "/ws.v1/schema/LogicalSwitchPortConfig", "mirror_targets": [], "allowed_address_pairs": [], "admin_status_enabled": true, "type": "LogicalSwitchPortConfig", "uuid": "%(uuid)s" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_post_security_profile.json0000644000175000017500000000054500000000000030605 0ustar00coreycorey00000000000000{ "display_name": "%(display_name)s", "_href": "/ws.v1/security-profile/%(uuid)s", "tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}, {"scope": "nova_spid", "tag": "%(nova_spid)s"}], "logical_port_egress_rules": [], "_schema": "/ws.v1/schema/SecurityProfileConfig", "logical_port_ingress_rules": [], "uuid": "%(uuid)s" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_put_lrouter_lport_att.json0000644000175000017500000000046500000000000030626 0ustar00coreycorey00000000000000{ "LogicalPortAttachment": { %(peer_port_href_field)s %(peer_port_uuid_field)s %(l3_gateway_service_uuid_field)s %(vlan_id_field)s "_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(lp_uuid)s/attachment", "type": "%(type)s", "schema": "/ws.v1/schema/%(type)s" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/fake_put_lswitch_lport_att.json0000644000175000017500000000041600000000000030603 0ustar00coreycorey00000000000000{ "LogicalPortAttachment": { %(peer_port_href_field)s %(peer_port_uuid_field)s %(vif_uuid_field)s "_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(lp_uuid)s/attachment", "type": "%(type)s", "schema": "/ws.v1/schema/%(type)s" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/neutron.conf.test0000644000175000017500000000070500000000000025605 0ustar00coreycorey00000000000000[DEFAULT] # Show debugging output in logs (sets DEBUG log level output) debug = False # Address to bind the API server bind_host = 0.0.0.0 # Port the bind the API server to bind_port = 9696 # MISSING Path to the extensions # api_extensions_path = # Paste configuration file api_paste_config = api-paste.ini.test # The messaging module to use, defaults to kombu. rpc_backend = fake lock_path = $state_path/lock [database] connection = 'sqlite://' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/nsx.ini.agentless.test0000644000175000017500000000055200000000000026541 0ustar00coreycorey00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nova_zone_id = whatever nsx_controllers = fake_1, fake_2 nsx_user = foo nsx_password = bar default_l3_gw_service_uuid = whatever default_l2_gw_service_uuid = whatever default_service_cluster_uuid = whatever nsx_default_interface_name = whatever http_timeout = 13 redirects = 12 retries = 11 [NSX] agent_mode = agentless ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/nsx.ini.basic.test0000644000175000017500000000014500000000000025633 0ustar00coreycorey00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nsx_controllers=fake_1,fake_2 nsx_user=foo nsx_password=bar ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/nsx.ini.combined.test0000644000175000017500000000055100000000000026333 0ustar00coreycorey00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nova_zone_id = whatever nsx_controllers = fake_1, fake_2 nsx_user = foo nsx_password = bar default_l3_gw_service_uuid = whatever default_l2_gw_service_uuid = whatever default_service_cluster_uuid = whatever nsx_default_interface_name = whatever http_timeout = 13 redirects = 12 retries = 11 [NSX] agent_mode = combined ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/nsx.ini.full.test0000644000175000017500000000044400000000000025516 0ustar00coreycorey00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nova_zone_id = whatever nsx_controllers = fake_1, fake_2 nsx_user = foo nsx_password = bar default_l3_gw_service_uuid = whatever default_l2_gw_service_uuid = whatever nsx_default_interface_name = whatever http_timeout = 13 redirects = 12 retries = 11 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/nsx.ini.test0000644000175000017500000000047500000000000024561 0ustar00coreycorey00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nsx_controllers=fake_1, fake_2 nsx_user=foo nsx_password=bar default_l3_gw_service_uuid = whatever default_l2_gw_service_uuid = whatever [nsxv] manager_uri = https://fake_manager user = fake_user password = fake_password vdn_scope_id = fake_vdn_scope_id dvs_id = fake_dvs_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/nvp.ini.full.test0000644000175000017500000000044100000000000025506 0ustar00coreycorey00000000000000[DEFAULT] default_tz_uuid = fake_tz_uuid nova_zone_id = whatever nvp_controllers = fake_1, fake_2 nvp_user = foo nvp_password = bar default_l3_gw_service_uuid = whatever default_l2_gw_service_uuid = whatever nsx_default_interface_name = whatever http_timeout = 3 redirects = 2 retries = 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/etc/vcns.ini.test0000644000175000017500000000035600000000000024720 0ustar00coreycorey00000000000000[nsxv] manager_uri = https://fake-host user = fake-user passwordd = fake-password datacenter_moid = fake-moid resource_pool_id = fake-resgroup datastore_id = fake-datastore external_network = fake-ext-net task_status_check_interval = 100 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2342546 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extension_drivers/0000755000175000017500000000000000000000000025263 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extension_drivers/__init__.py0000644000175000017500000000000000000000000027362 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extension_drivers/test_dns_integration.py0000644000175000017500000001264400000000000032072 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import dns from neutron_lib import context from neutron_lib.plugins import directory from oslo_config import cfg from vmware_nsx.extension_drivers import dns_integration from vmware_nsx.tests.unit.nsx_v import test_plugin as test_v_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin NETWORK_DOMAIN_NAME = 'net-domain.com.' NEW_NETWORK_DOMAIN_NAME = 'new-net-domain.com.' PORT_DNS_NAME = 'port-dns-name' NEW_PORT_DNS_NAME = 'new-port-dns-name' class NsxDNSIntegrationTestCase(object): _domain = 'domain.com.' dns_integration.DNS_DRIVER = None def test_create_network_dns_domain(self): with self.network(dns_domain=NETWORK_DOMAIN_NAME, arg_list=(dns.DNSDOMAIN,)) as network: self.assertEqual(NETWORK_DOMAIN_NAME, network['network'][dns.DNSDOMAIN]) def test_update_network_dns_domain(self): with self.network(dns_domain=NETWORK_DOMAIN_NAME, arg_list=(dns.DNSDOMAIN,)) as network: update_data = {'network': {dns.DNSDOMAIN: NEW_NETWORK_DOMAIN_NAME}} updated_network = directory.get_plugin().update_network( context.get_admin_context(), network['network']['id'], update_data) self.assertEqual(NEW_NETWORK_DOMAIN_NAME, updated_network[dns.DNSDOMAIN]) def test_create_port_dns_name(self): with self.port(dns_name=PORT_DNS_NAME, arg_list=(dns.DNSNAME,)) as port: port_data = port['port'] dns_assignment = port_data[dns.DNSASSIGNMENT][0] self.assertEqual(PORT_DNS_NAME, port_data[dns.DNSNAME]) self.assertEqual(PORT_DNS_NAME, dns_assignment['hostname']) self.assertEqual(port_data['fixed_ips'][0]['ip_address'], dns_assignment['ip_address']) self.assertEqual(PORT_DNS_NAME + '.' + self._domain, dns_assignment['fqdn']) def test_update_port_dns_name_ip(self): with self.subnet(cidr='10.0.0.0/24') as subnet: fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}] with self.port(subnet=subnet, fixed_ips=fixed_ips, dns_name=PORT_DNS_NAME, arg_list=(dns.DNSNAME,)) as port: update_data = {'port': { dns.DNSNAME: NEW_PORT_DNS_NAME, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.4'}]}} updated_port = directory.get_plugin().update_port( context.get_admin_context(), port['port']['id'], update_data) dns_assignment = updated_port[dns.DNSASSIGNMENT][0] self.assertEqual(NEW_PORT_DNS_NAME, updated_port[dns.DNSNAME]) self.assertEqual(NEW_PORT_DNS_NAME, dns_assignment['hostname']) self.assertEqual(updated_port['fixed_ips'][0]['ip_address'], dns_assignment['ip_address']) self.assertEqual(NEW_PORT_DNS_NAME + '.' + self._domain, dns_assignment['fqdn']) class NsxVDNSIntegrationTestCase(NsxDNSIntegrationTestCase, test_v_plugin.NsxVPluginV2TestCase): def setUp(self): cfg.CONF.set_override('nsx_extension_drivers', ['vmware_nsxv_dns']) cfg.CONF.set_override('dns_domain', self._domain) super(NsxVDNSIntegrationTestCase, self).setUp() class NsxV3DNSIntegrationTestCase(NsxDNSIntegrationTestCase, test_v3_plugin.NsxV3PluginTestCaseMixin): def setUp(self): cfg.CONF.set_override('nsx_extension_drivers', ['vmware_nsxv3_dns']) cfg.CONF.set_override('dns_domain', self._domain, 'nsx_v3') super(NsxV3DNSIntegrationTestCase, self).setUp() def test_create_port_dns_domain_name(self): with self.network(dns_domain=NETWORK_DOMAIN_NAME, arg_list=(dns.DNSDOMAIN,)) as network,\ self.subnet(network=network, cidr='10.0.0.0/24') as subnet,\ self.port(subnet=subnet, dns_name=PORT_DNS_NAME, arg_list=(dns.DNSNAME,)) as port: port_data = port['port'] dns_assignment = port_data[dns.DNSASSIGNMENT][0] self.assertEqual(PORT_DNS_NAME, port_data[dns.DNSNAME]) self.assertEqual(PORT_DNS_NAME, dns_assignment['hostname']) self.assertEqual(port_data['fixed_ips'][0]['ip_address'], dns_assignment['ip_address']) self.assertEqual(PORT_DNS_NAME + '.' + NETWORK_DOMAIN_NAME, dns_assignment['fqdn']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2342546 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/0000755000175000017500000000000000000000000023710 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/__init__.py0000644000175000017500000000000000000000000026007 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_addresspairs.py0000644000175000017500000002207400000000000030012 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import port_security as psec from oslo_config import cfg from neutron.tests.unit.db import test_allowedaddresspairs_db as ext_pairs from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsx_v_plugin from vmware_nsx.tests.unit.nsx_v3 import test_constants as v3_constants from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin class TestAllowedAddressPairsNSXv2(test_v3_plugin.NsxV3PluginTestCaseMixin, ext_pairs.TestAllowedAddressPairs): # TODO(arosen): move to ext_pairs.TestAllowedAddressPairs once all # plugins do this correctly. def test_create_port_no_allowed_address_pairs(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], []) self._delete('ports', port['port']['id']) def test_create_port_security_false_allowed_address_pairs(self): self.skipTest('TBD') class TestAllowedAddressPairsNSXv3(test_v3_plugin.NsxV3PluginTestCaseMixin, ext_pairs.TestAllowedAddressPairs): def setUp(self, plugin=v3_constants.PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(TestAllowedAddressPairsNSXv3, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) def test_create_bad_address_pairs_with_cidr(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1/24'}] self._create_port_with_address_pairs(address_pairs, 400) def test_create_port_allowed_address_pairs_v6(self): with self.network() as net: address_pairs = [{'ip_address': '1001::12'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=(addr_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) address_pairs[0]['mac_address'] = port['port']['mac_address'] self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def test_update_add_bad_address_pairs_with_cidr(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1/24'}] update_port = {'port': {addr_apidef.ADDRESS_PAIRS: address_pairs}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, 400) self._delete('ports', port['port']['id']) def test_create_port_security_false_allowed_address_pairs(self): self.skipTest('TBD') class TestAllowedAddressPairsNSXv(test_nsx_v_plugin.NsxVPluginV2TestCase, ext_pairs.TestAllowedAddressPairs): def setUp(self, plugin='vmware_nsx.plugin.NsxVPlugin', ext_mgr=None, service_plugins=None): super(TestAllowedAddressPairsNSXv, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) def test_create_port_security_false_allowed_address_pairs(self): self.skipTest('TBD') def test_update_port_security_off_address_pairs(self): self.skipTest('Not supported') def test_create_overlap_with_fixed_ip(self): address_pairs = [{'ip_address': '10.0.0.2'}] with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=False) as subnet: fixed_ips = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.2'}] res = self._create_port(self.fmt, network['network']['id'], arg_list=(addr_apidef.ADDRESS_PAIRS, 'fixed_ips'), allowed_address_pairs=address_pairs, fixed_ips=fixed_ips) self.assertEqual(res.status_int, 201) port = self.deserialize(self.fmt, res) self._delete('ports', port['port']['id']) def test_create_port_allowed_address_pairs(self): with self.network() as net: address_pairs = [{'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=(addr_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) address_pairs[0]['mac_address'] = port['port']['mac_address'] self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def _test_create_port_remove_allowed_address_pairs(self, update_value): with self.network() as net: address_pairs = [{'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=(addr_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) update_port = {'port': {addr_apidef.ADDRESS_PAIRS: []}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], []) self._delete('ports', port['port']['id']) def test_update_add_address_pairs(self): with self.network() as net: res = self._create_port(self.fmt, net['network']['id']) port = self.deserialize(self.fmt, res) address_pairs = [{'ip_address': '10.0.0.1'}] update_port = {'port': {addr_apidef.ADDRESS_PAIRS: address_pairs}} req = self.new_update_request('ports', update_port, port['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) address_pairs[0]['mac_address'] = port['port']['mac_address'] self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) def test_mac_configuration(self): address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '10.0.0.1'}] self._create_port_with_address_pairs(address_pairs, 400) def test_equal_to_max_allowed_address_pair(self): cfg.CONF.set_default('max_allowed_address_pair', 3) address_pairs = [{'ip_address': '10.0.0.1'}, {'ip_address': '10.0.0.2'}, {'ip_address': '10.0.0.3'}] self._create_port_with_address_pairs(address_pairs, 201) def test_create_port_security_true_allowed_address_pairs(self): if self._skip_port_security: self.skipTest("Plugin does not implement port-security extension") with self.network() as net: address_pairs = [{'ip_address': '10.0.0.1'}] res = self._create_port(self.fmt, net['network']['id'], arg_list=('port_security_enabled', addr_apidef.ADDRESS_PAIRS,), port_security_enabled=True, allowed_address_pairs=address_pairs) port = self.deserialize(self.fmt, res) self.assertTrue(port['port'][psec.PORTSECURITY]) address_pairs[0]['mac_address'] = port['port']['mac_address'] self.assertEqual(port['port'][addr_apidef.ADDRESS_PAIRS], address_pairs) self._delete('ports', port['port']['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_dhcp_mtu.py0000644000175000017500000001755200000000000027136 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db from neutron_lib.db import api as db_api from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import dhcp_mtu as ext_dhcp_mtu from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.tests.unit.nsx_v import test_plugin from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' class DhcpMtuExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return ext_dhcp_mtu.get_extended_resources(version) class DhcpMtuExtensionTestCase(test_plugin.NsxVPluginV2TestCase): """Test API extension dhcp-mtu attribute of subnets.""" @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') def setUp(self, plugin=PLUGIN_NAME): ext_mgr = DhcpMtuExtensionManager() # This feature is enabled only since 6.2.3 with mock.patch.object(fake_vcns.FakeVcns, 'get_version', return_value="6.2.3"): super(DhcpMtuExtensionTestCase, self).setUp(ext_mgr=ext_mgr) def _create_subnet_with_dhcp_mtu(self, dhcp_mtu): with self.network() as net: tenant_id = net['network']['tenant_id'] net_id = net['network']['id'] data = {'subnet': {'network_id': net_id, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'test-mtu-subnet', 'tenant_id': tenant_id, 'dhcp_mtu': dhcp_mtu}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) return res def test_subnet_create_with_dhcp_mtu(self): for mtu in (68, 2000, 65535): res = self._create_subnet_with_dhcp_mtu(mtu) sub = self.deserialize(self.fmt, res) self.assertEqual(mtu, sub['subnet']['dhcp_mtu']) def test_subnet_create_with_invalid_dhcp_mtu_fail(self): res = self._create_subnet_with_dhcp_mtu(67) self.assertEqual(400, res.status_int) res = self._create_subnet_with_dhcp_mtu(100000) self.assertEqual(400, res.status_int) def test_subnet_update_with_dhcp_mtu(self): res = self._create_subnet_with_dhcp_mtu(2000) sub = self.deserialize(self.fmt, res) data = {'subnet': {'dhcp_mtu': 3000}} req = self.new_update_request('subnets', data, sub['subnet']['id']) updated_sub = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(3000, updated_sub['subnet']['dhcp_mtu']) def _create_subnet_with_dhcp_mtu_and_dns(self, dhcp_mtu, dns_search_domain): with self.network() as net: tenant_id = net['network']['tenant_id'] net_id = net['network']['id'] data = {'subnet': {'network_id': net_id, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'test-mtu-subnet', 'tenant_id': tenant_id, 'dhcp_mtu': dhcp_mtu, 'dns_search_domain': dns_search_domain}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) return res def test_subnet_create_with_dhcp_mtu_and_dns(self): res = self._create_subnet_with_dhcp_mtu_and_dns(2000, 'vmware.com') sub = self.deserialize(self.fmt, res) self.assertEqual(2000, sub['subnet']['dhcp_mtu']) self.assertEqual('vmware.com', sub['subnet']['dns_search_domain']) def test_subnet_update_with_dhcp_mtu_and_dns(self): res = self._create_subnet_with_dhcp_mtu_and_dns(2000, 'vmware.com') sub = self.deserialize(self.fmt, res) data = {'subnet': {'dhcp_mtu': 3000, 'dns_search_domain': 'eng.vmware.com'}} req = self.new_update_request('subnets', data, sub['subnet']['id']) updated_sub = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(3000, updated_sub['subnet']['dhcp_mtu']) self.assertEqual('eng.vmware.com', updated_sub['subnet']['dns_search_domain']) class DhcpMtuDBTestCase(test_db.NeutronDbPluginV2TestCase): def setUp(self): super(DhcpMtuDBTestCase, self).setUp() self.session = db_api.get_writer_session() def test_get_nsxv_subnet_ext_attributes_no_dhcp_mtu(self): with self.subnet() as sub: sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id']) self.assertIsNone(sub_binding) def test_add_nsxv_subnet_ext_attributes_dhcp_mtu(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=2000) sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id']) self.assertEqual(2000, sub_binding.dhcp_mtu) self.assertEqual(sub['subnet']['id'], sub_binding.subnet_id) def test_update_nsxv_subnet_ext_attributes_dhcp_mtu(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=2000) sub_binding = nsxv_db.update_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=3000) self.assertEqual(3000, sub_binding.dhcp_mtu) def test_add_nsxv_subnet_ext_attributes_dhcp_mtu_and_dns(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=2000, dns_search_domain='eng.vmware.com') sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id']) self.assertEqual(2000, sub_binding.dhcp_mtu) self.assertEqual('eng.vmware.com', sub_binding.dns_search_domain) self.assertEqual(sub['subnet']['id'], sub_binding.subnet_id) def test_update_nsxv_subnet_ext_attributes_dhcp_mtu_and_dns(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=2000, dns_search_domain='eng.vmware.com') sub_binding = nsxv_db.update_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dhcp_mtu=3000, dns_search_domain='nsx.vmware.com') self.assertEqual(3000, sub_binding.dhcp_mtu) self.assertEqual('nsx.vmware.com', sub_binding.dns_search_domain) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_dns_search_domain.py0000644000175000017500000001154600000000000030770 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db from neutron_lib.db import api as db_api from vmware_nsx.db import nsxv_db from vmware_nsx.extensions import dns_search_domain as ext_dns_search_domain from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.tests.unit.nsx_v import test_plugin PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' class DnsSearchDomainExtensionManager(object): def get_resources(self): return [] def get_actions(self): return [] def get_request_extensions(self): return [] def get_extended_resources(self, version): return ext_dns_search_domain.get_extended_resources(version) class DnsSearchDomainExtensionTestCase(test_plugin.NsxVPluginV2TestCase): """Test API extension dns-search-domain attribute.""" @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') def setUp(self, plugin=PLUGIN_NAME): ext_mgr = DnsSearchDomainExtensionManager() super(DnsSearchDomainExtensionTestCase, self).setUp(ext_mgr=ext_mgr) def _create_subnet_with_dns_search_domain(self, dns_search_domain): with self.network() as net: tenant_id = net['network']['tenant_id'] net_id = net['network']['id'] data = {'subnet': {'network_id': net_id, 'cidr': '10.0.0.0/24', 'ip_version': 4, 'name': 'test-dns-search-domain-subnet', 'tenant_id': tenant_id, 'dns_search_domain': dns_search_domain}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) return res def test_subnet_create_with_dns_search_domain(self): res = self._create_subnet_with_dns_search_domain('vmware.com') sub = self.deserialize(self.fmt, res) self.assertEqual('vmware.com', sub['subnet']['dns_search_domain']) def test_subnet_create_with_invalid_dns_search_domain_fail(self): res = self._create_subnet_with_dns_search_domain('vmw@re.com') self.assertEqual(400, res.status_int) def test_subnet_update_with_dns_search_domain(self): res = self._create_subnet_with_dns_search_domain('vmware.com') sub = self.deserialize(self.fmt, res) data = {'subnet': {'dns_search_domain': 'eng.vmware.com'}} req = self.new_update_request('subnets', data, sub['subnet']['id']) updated_sub = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual('eng.vmware.com', updated_sub['subnet']['dns_search_domain']) class DnsSearchDomainDBTestCase(test_db.NeutronDbPluginV2TestCase): def setUp(self): super(DnsSearchDomainDBTestCase, self).setUp() self.session = db_api.get_writer_session() def test_get_nsxv_subnet_ext_attributes_no_dns_search_domain(self): with self.subnet() as sub: sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id']) self.assertIsNone(sub_binding) def test_add_nsxv_subnet_ext_attributes_dns_search_domain(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dns_search_domain='eng.vmware.com') sub_binding = nsxv_db.get_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id']) self.assertEqual('eng.vmware.com', sub_binding.dns_search_domain) self.assertEqual(sub['subnet']['id'], sub_binding.subnet_id) def test_update_nsxv_subnet_ext_attributes_dns_search_domain(self): with self.subnet() as sub: nsxv_db.add_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dns_search_domain='eng.vmware.com') sub_binding = nsxv_db.update_nsxv_subnet_ext_attributes( session=self.session, subnet_id=sub['subnet']['id'], dns_search_domain='nsx.vmware.com') self.assertEqual('nsx.vmware.com', sub_binding.dns_search_domain) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_maclearning.py0000644000175000017500000001232600000000000027605 0ustar00coreycorey00000000000000# Copyright (c) 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.extensions import agent from neutron_lib import context from oslo_config import cfg from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3 from vmware_nsx.tests.unit import test_utils class MacLearningExtensionManager(object): def get_resources(self): return agent.Agent.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class MacLearningDBTestCase(test_nsxv3.NsxV3PluginTestCaseMixin): fmt = 'json' def setUp(self): test_utils.override_nsx_ini_full_test() cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) ext_mgr = MacLearningExtensionManager() cfg.CONF.set_override('metadata_mode', None, 'NSX') super(MacLearningDBTestCase, self).setUp(plugin=vmware.PLUGIN_NAME, ext_mgr=ext_mgr) self.adminContext = context.get_admin_context() def test_create_with_mac_learning(self): with self.port(arg_list=('mac_learning_enabled', 'port_security_enabled'), mac_learning_enabled=True, port_security_enabled=False) as port: # Validate create operation response self.assertEqual(True, port['port']['mac_learning_enabled']) # Verify that db operation successfully set mac learning state req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(True, sport['port']['mac_learning_enabled']) def test_create_and_show_port_without_mac_learning(self): with self.port() as port: req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertNotIn('mac_learning_enabled', sport['port']) def test_update_port_with_mac_learning(self): with self.port(arg_list=('mac_learning_enabled', 'port_security_enabled'), mac_learning_enabled=False, port_security_enabled=False) as port: data = {'port': {'mac_learning_enabled': True}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(True, res['port']['mac_learning_enabled']) def test_update_preexisting_port_with_mac_learning(self): with self.port(arg_list=('port_security_enabled',), port_security_enabled=False) as port: req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertNotIn('mac_learning_enabled', sport['port']) data = {'port': {'mac_learning_enabled': True}} req = self.new_update_request('ports', data, port['port']['id']) # Validate update operation response res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(True, res['port']['mac_learning_enabled']) # Verify that db operation successfully updated mac learning state req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(True, sport['port']['mac_learning_enabled']) def test_list_ports(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) no_mac_learning_p = (lambda: self.port(arg_list=('mac_learning_enabled', 'port_security_enabled'), mac_learning_enabled=True, port_security_enabled=False)) with no_mac_learning_p(), no_mac_learning_p(), no_mac_learning_p(): for port in self._list('ports')['ports']: self.assertEqual(True, port['mac_learning_enabled']) def test_show_port(self): with self.port(arg_list=('mac_learning_enabled', 'port_security_enabled'), mac_learning_enabled=True, port_security_enabled=False) as p: port_res = self._show('ports', p['port']['id'])['port'] self.assertEqual(True, port_res['mac_learning_enabled']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_metadata.py0000644000175000017500000003672600000000000027117 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from neutron_lib import constants from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from oslo_config import cfg import webob.exc from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import config class MetaDataTestCase(object): def _metadata_setup(self, mode=config.MetadataModes.DIRECT, on_demand=False): cfg.CONF.set_override('metadata_mode', mode, self.plugin.cfg_group) if hasattr(getattr(cfg.CONF, self.plugin.cfg_group), 'metadata_on_demand'): cfg.CONF.set_override('metadata_on_demand', on_demand, self.plugin.cfg_group) def _metadata_teardown(self): cfg.CONF.set_override('metadata_mode', None, self.plugin.cfg_group) if hasattr(getattr(cfg.CONF, self.plugin.cfg_group), 'metadata_on_demand'): cfg.CONF.set_override('metadata_on_demand', False, self.plugin.cfg_group) def _check_metadata(self, expected_subnets, expected_ports): subnets = self._list('subnets')['subnets'] self.assertEqual(len(subnets), expected_subnets) meta_net_id, meta_sub_id = None, None meta_cidr = netaddr.IPNetwork('169.254.0.0/16') for subnet in subnets: cidr = netaddr.IPNetwork(subnet['cidr']) if meta_cidr == cidr or meta_cidr in cidr.supernet(16): meta_sub_id = subnet['id'] meta_net_id = subnet['network_id'] break ports = self._list( 'ports', query_params='network_id=%s' % meta_net_id)['ports'] self.assertEqual(len(ports), expected_ports) meta_port_id = ports[0]['id'] if ports else None return meta_net_id, meta_sub_id, meta_port_id def test_router_add_interface_subnet_with_metadata_access(self): self._metadata_setup() self.test_router_add_interface_subnet() self._metadata_teardown() def test_router_add_interface_port_with_metadata_access(self): self._metadata_setup() self.test_router_add_interface_port() self._metadata_teardown() def test_router_add_interface_dupsubnet_returns_400_with_metadata(self): self._metadata_setup() self.test_router_add_interface_dup_subnet1_returns_400() self._metadata_teardown() def test_router_add_interface_overlapped_cidr_returns_400_with(self): self._metadata_setup() self.test_router_add_interface_overlapped_cidr_returns_400() self._metadata_teardown() def test_router_remove_interface_inuse_returns_409_with_metadata(self): self._metadata_setup() self.test_router_remove_interface_inuse_returns_409() self._metadata_teardown() def test_router_remove_iface_wrong_sub_returns_400_with_metadata(self): self._metadata_setup() self.test_router_remove_interface_wrong_subnet_returns_400() self._metadata_teardown() def test_router_delete_with_metadata_access(self): self._metadata_setup() self.test_router_delete() self._metadata_teardown() def test_router_delete_with_port_existed_returns_409_with_metadata(self): self._metadata_setup() self.test_router_delete_with_port_existed_returns_409() self._metadata_teardown() def test_delete_port_with_metadata(self): self._metadata_setup(config.MetadataModes.INDIRECT) with self.subnet() as s: with self.port(subnet=s, fixed_ips=[], device_id='1234', device_owner=constants.DEVICE_OWNER_DHCP) as port: self._delete('ports', port['port']['id']) self._metadata_teardown() def test_metadatata_network_created_with_router_interface_add(self): self._metadata_setup() with mock.patch.object(self._plugin_class, 'schedule_network') as f: with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) r_ports = self._list('ports')['ports'] self.assertEqual(len(r_ports), 2) ips = [] for port in r_ports: ips.extend([netaddr.IPAddress(fixed_ip['ip_address']) for fixed_ip in port['fixed_ips']]) meta_cidr = netaddr.IPNetwork('169.254.0.0/16') self.assertTrue(any([ip in meta_cidr for ip in ips])) # Needed to avoid 409. self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None) # Verify that there has been a schedule_network all for the # metadata network expected_net_name = 'meta-%s' % r['router']['id'] found = False for call in f.call_args_list: # The network data are the last of the positional arguments net_dict = call[0][-1] if net_dict['name'] == expected_net_name: self.assertFalse(net_dict['port_security_enabled']) self.assertFalse(net_dict['shared']) self.assertFalse(net_dict['tenant_id']) found = True break else: self.fail("Expected schedule_network call for metadata " "network %s not found" % expected_net_name) self.assertTrue(found) self._metadata_teardown() def test_metadata_network_create_rollback_on_create_subnet_failure(self): self._metadata_setup() with self.router() as r: with self.subnet() as s: # Raise a NeutronException (eg: NotFound). with mock.patch.object(self._plugin_class, 'create_subnet', side_effect=n_exc.NotFound): self._router_interface_action( 'add', r['router']['id'], s['subnet']['id'], None) # Ensure metadata network was removed. nets = self._list('networks')['networks'] self.assertEqual(len(nets), 1) # Needed to avoid 409. self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None) self._metadata_teardown() def test_metadata_network_create_rollback_on_add_rtr_iface_failure(self): self._metadata_setup() with self.router() as r: with self.subnet() as s: # Save function being mocked. real_func = self._plugin_class.add_router_interface plugin_instance = directory.get_plugin() # Raise a NeutronException when adding metadata subnet # to router. def side_effect(*args): if args[-1]['subnet_id'] == s['subnet']['id']: # Do the real thing. return real_func(plugin_instance, *args) # Otherwise raise. raise api_exc.NsxApiException() with mock.patch.object(self._plugin_class, 'add_router_interface', side_effect=side_effect): self._router_interface_action( 'add', r['router']['id'], s['subnet']['id'], None) # Ensure metadata network was removed. nets = self._list('networks')['networks'] self.assertEqual(len(nets), 1) # Needed to avoid 409. self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None) self._metadata_teardown() def test_metadata_network_removed_with_router_interface_remove(self): self._metadata_setup() with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) meta_net_id, meta_sub_id, meta_port_id = self._check_metadata( expected_subnets=2, expected_ports=1) self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None) self._show('networks', meta_net_id, webob.exc.HTTPNotFound.code) self._show('ports', meta_port_id, webob.exc.HTTPNotFound.code) self._show('subnets', meta_sub_id, webob.exc.HTTPNotFound.code) self._metadata_teardown() def test_metadata_network_remove_rollback_on_failure(self): self._metadata_setup() with self.router() as r: with self.subnet() as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) networks = self._list('networks')['networks'] for network in networks: if network['id'] != s['subnet']['network_id']: meta_net_id = network['id'] ports = self._list( 'ports', query_params='network_id=%s' % meta_net_id)['ports'] meta_port_id = ports[0]['id'] # Save function being mocked. real_func = self._plugin_class.remove_router_interface plugin_instance = directory.get_plugin() # Raise a NeutronException when removing metadata subnet # from router. def side_effect(*args): if args[-1].get('subnet_id') == s['subnet']['id']: # Do the real thing. return real_func(plugin_instance, *args) # Otherwise raise. raise api_exc.NsxApiException() with mock.patch.object(self._plugin_class, 'remove_router_interface', side_effect=side_effect): self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None) # Metadata network and subnet should still be there. self._show('networks', meta_net_id, webob.exc.HTTPOk.code) self._show('ports', meta_port_id, webob.exc.HTTPOk.code) self._metadata_teardown() def test_metadata_network_with_update_subnet_dhcp_enable(self): self._metadata_setup(on_demand=True) with self.router() as r: # Create a DHCP-disabled subnet. with self.subnet(enable_dhcp=False) as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) meta_net_id, meta_sub_id, meta_port_id = self._check_metadata( expected_subnets=2, expected_ports=1) # Update subnet to DHCP-enabled. data = {'subnet': {'enable_dhcp': True}} req = self.new_update_request('subnets', data, s['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(True, res['subnet']['enable_dhcp']) self._check_metadata(expected_subnets=1, expected_ports=0) self._show('networks', meta_net_id, webob.exc.HTTPNotFound.code) self._show('ports', meta_port_id, webob.exc.HTTPNotFound.code) self._show('subnets', meta_sub_id, webob.exc.HTTPNotFound.code) self._metadata_teardown() def test_metadata_network_with_update_subnet_dhcp_disable(self): self._metadata_setup(on_demand=True) with self.router() as r: # Create a DHCP-enabled subnet. with self.subnet(enable_dhcp=True) as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) self._check_metadata(expected_subnets=1, expected_ports=0) # Update subnet to DHCP-disabled. data = {'subnet': {'enable_dhcp': False}} req = self.new_update_request('subnets', data, s['subnet']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(False, res['subnet']['enable_dhcp']) meta_net_id, meta_sub_id, meta_port_id = self._check_metadata( expected_subnets=2, expected_ports=1) self._show('networks', meta_net_id, webob.exc.HTTPOk.code) self._show('ports', meta_port_id, webob.exc.HTTPOk.code) self._show('subnets', meta_sub_id, webob.exc.HTTPOk.code) self._metadata_teardown() def test_metadata_dhcp_host_route(self): self._metadata_setup(config.MetadataModes.INDIRECT) subnets = self._list('subnets')['subnets'] with self.subnet() as s: with self.port(subnet=s, device_id='1234', device_owner=constants.DEVICE_OWNER_DHCP) as port: subnets = self._list('subnets')['subnets'] self.assertEqual(len(subnets), 1) subnet_ip_net = netaddr.IPNetwork(s['subnet']['cidr']) self.assertIn(netaddr.IPAddress( subnets[0]['host_routes'][0]['nexthop']), subnet_ip_net) self.assertEqual(subnets[0]['host_routes'][0]['destination'], '169.254.169.254/32') self._delete('ports', port['port']['id']) subnets = self._list('subnets')['subnets'] # Test that route is deleted after dhcp port is removed. self.assertEqual(len(subnets[0]['host_routes']), 0) self._metadata_teardown() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_portsecurity.py0000644000175000017500000000277700000000000030112 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from neutron.tests.unit.extensions import test_portsecurity as psec from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_v3 import test_constants as v3_constants from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3 from vmware_nsx.tests.unit import test_utils class PortSecurityTestCaseNSXv2(psec.PortSecurityDBTestCase, test_nsxv3.NsxV3PluginTestCaseMixin): def setUp(self): test_utils.override_nsx_ini_test() super(PortSecurityTestCaseNSXv2, self).setUp(vmware.PLUGIN_NAME) class TestPortSecurityNSXv2(PortSecurityTestCaseNSXv2, psec.TestPortSecurity): pass class TestPortSecurityNSXv3(psec.TestPortSecurity, test_nsxv3.NsxV3PluginTestCaseMixin): def setUp(self, plugin=v3_constants.PLUGIN_NAME): super(TestPortSecurityNSXv3, self).setUp(plugin=plugin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_provider_security_groups.py0000644000175000017500000004760500000000000032515 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import webob.exc from neutron.db import db_base_plugin_v2 from neutron.db import securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_securitygroup from neutron_lib import context from neutron_lib.db import api as db_api from vmware_nsx.db import extended_security_group from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.tests.unit.nsx_p import test_plugin as test_nsxp_plugin from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsxv_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3_plugin PLUGIN_NAME = ('vmware_nsx.tests.unit.extensions.' 'test_provider_security_groups.ProviderSecurityGroupTestPlugin') # FIXME(arosen): make common mixin for extended_security_group_properties and # security_group_db_minxin. class ProviderSecurityGroupTestPlugin( db_base_plugin_v2.NeutronDbPluginV2, extended_security_group.ExtendedSecurityGroupPropertiesMixin, securitygroups_db.SecurityGroupDbMixin): supported_extension_aliases = ["security-group", provider_sg.ALIAS] def create_security_group(self, context, security_group, default_sg=False): secgroup = security_group['security_group'] with db_api.CONTEXT_WRITER.using(context): # NOTE(arosen): a neutron security group by default adds rules # that allow egress traffic. We do not want this behavior for # provider security_groups if secgroup.get(provider_sg.PROVIDER) is True: secgroup_db = self.create_provider_security_group( context, security_group) else: secgroup_db = ( super(ProviderSecurityGroupTestPlugin, self ).create_security_group(context, security_group, default_sg)) self._process_security_group_properties_create(context, secgroup_db, secgroup, default_sg) return secgroup_db def create_port(self, context, port, l2gw_port_check=False): port_data = port['port'] with db_api.CONTEXT_WRITER.using(context): self._ensure_default_security_group_on_port(context, port) (sgids, provider_groups) = self._get_port_security_groups_lists( context, port) port_db = super(ProviderSecurityGroupTestPlugin, self).create_port( context, port) port_data.update(port_db) # handle adding security groups to port self._process_port_create_security_group( context, port_db, sgids) # handling adding provider security group to port if there are any self._process_port_create_provider_security_group( context, port_data, provider_groups) return port_data def update_port(self, context, id, port): with db_api.CONTEXT_WRITER.using(context): original_port = super(ProviderSecurityGroupTestPlugin, self).get_port(context, id) updated_port = super(ProviderSecurityGroupTestPlugin, self).update_port(context, id, port) self.update_security_group_on_port(context, id, port, original_port, updated_port) self._process_port_update_provider_security_group( context, port, original_port, updated_port) return self.get_port(context, id) def _make_port_dict(self, port, fields=None, process_extensions=True): port_data = super( ProviderSecurityGroupTestPlugin, self)._make_port_dict( port, fields=fields, process_extensions=process_extensions) self._remove_provider_security_groups_from_list(port_data) return port_data def delete_security_group(self, context, id): self._prevent_non_admin_edit_provider_sg(context, id) super(ProviderSecurityGroupTestPlugin, self).delete_security_group(context, id) def delete_security_group_rule(self, context, id): rule_db = self._get_security_group_rule(context, id) sg_id = rule_db['security_group_id'] self._prevent_non_admin_edit_provider_sg(context, sg_id) return super(ProviderSecurityGroupTestPlugin, self).delete_security_group_rule(context, id) def create_security_group_rule(self, context, security_group_rule): id = security_group_rule['security_group_rule']['security_group_id'] self._prevent_non_admin_edit_provider_sg(context, id) return super(ProviderSecurityGroupTestPlugin, self).create_security_group_rule(context, security_group_rule) class ProviderSecurityGroupExtTestCase( test_securitygroup.SecurityGroupDBTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): super(ProviderSecurityGroupExtTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr) self._tenant_id = test_db_base_plugin_v2.TEST_TENANT_ID # add provider group attributes ext_sg.Securitygroup().update_attributes_map( provider_sg.EXTENDED_ATTRIBUTES_2_0) def _create_provider_security_group(self): body = {'security_group': {'name': 'provider-deny', 'tenant_id': self._tenant_id, 'description': 'foobarzzkk', 'provider': True}} security_group_req = self.new_create_request('security-groups', body) return self.deserialize(self.fmt, security_group_req.get_response(self.ext_api)) def test_create_provider_security_group(self): # confirm this attribute is true provider_secgroup = self._create_provider_security_group() self.assertTrue(provider_secgroup['security_group']['provider']) # provider security groups have no rules by default which is different # from normal neutron security groups which by default include a rule # to allow egress traffic. We confirm this here. self.assertEqual( provider_secgroup['security_group']['security_group_rules'], []) def test_create_provider_security_groups_same_tenant(self): provider_secgroup = self._create_provider_security_group() self.assertTrue(provider_secgroup['security_group']['provider']) # Verify that another one can also be created for the same tenant provider_secgroup2 = self._create_provider_security_group() self.assertTrue(provider_secgroup2['security_group']['provider']) def test_create_port_gets_provider_sg(self): # need to create provider security group first. provider_secgroup = self._create_provider_security_group() with self.port(tenant_id=self._tenant_id) as p: # check that the provider security group is on port resource. self.assertEqual(1, len(p['port']['provider_security_groups'])) self.assertEqual(provider_secgroup['security_group']['id'], p['port']['provider_security_groups'][0]) # confirm there is still a default security group. self.assertEqual(len(p['port']['security_groups']), 1) def test_create_port_gets_multi_provider_sg(self): # need to create provider security groups first. provider_secgroup1 = self._create_provider_security_group() provider_secgroup2 = self._create_provider_security_group() with self.port(tenant_id=self._tenant_id) as p: # check that the provider security group is on port resource. self.assertEqual(2, len(p['port']['provider_security_groups'])) self.assertIn(provider_secgroup1['security_group']['id'], p['port']['provider_security_groups']) self.assertIn(provider_secgroup2['security_group']['id'], p['port']['provider_security_groups']) # confirm there is still a default security group. self.assertEqual(len(p['port']['security_groups']), 1) def test_create_port_with_no_provider_sg(self): self._create_provider_security_group() with self.port(tenant_id=self._tenant_id, arg_list=('provider_security_groups', ), provider_security_groups=[]) as p1: self.assertEqual([], p1['port']['provider_security_groups']) with self.port(tenant_id=self._tenant_id, arg_list=('provider_security_groups', ), provider_security_groups=None) as p1: self.assertEqual([], p1['port']['provider_security_groups']) def test_update_port_remove_provider_sg_with_empty_list(self): # need to create provider security group first. self._create_provider_security_group() with self.port(tenant_id=self._tenant_id) as p: body = {'port': {'provider_security_groups': []}} req = self.new_update_request('ports', body, p['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) # confirm that the group has been removed. self.assertEqual([], port['port']['provider_security_groups']) def test_update_port_remove_provider_sg_with_none(self): # need to create provider security group first. self._create_provider_security_group() with self.port(tenant_id=self._tenant_id) as p: body = {'port': {'provider_security_groups': None}} req = self.new_update_request('ports', body, p['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) # confirm that the group has been removed. self.assertEqual([], port['port']['provider_security_groups']) def test_cannot_update_port_with_provider_group_as_sec_group(self): with self.port(tenant_id=self._tenant_id) as p: provider_secgroup = self._create_provider_security_group() sg_id = provider_secgroup['security_group']['id'] body = {'port': {'security_groups': [sg_id]}} req = self.new_update_request('ports', body, p['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_cannot_update_port_with_sec_group_as_provider(self): with self.security_group() as sg1: with self.port(tenant_id=self._tenant_id) as p: sg_id = sg1['security_group']['id'] body = {'port': {'provider_security_groups': [sg_id]}} req = self.new_update_request('ports', body, p['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_cannot_update_port_with_different_tenant_provider_secgroup(self): with self.port(tenant_id=self._tenant_id) as p: tmp_tenant_id = self._tenant_id self._tenant_id += "-alt" pvd_sg = self._create_provider_security_group() self._tenant_id = tmp_tenant_id body = {'port': {'provider_security_groups': [ pvd_sg['security_group']['id']]}} ctx = context.Context('', self._tenant_id) req = self.new_update_request('ports', body, p['port']['id'], context=ctx) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) def test_update_port_security_groups_only(self): # We want to make sure that modifying security-groups on the port # doesn't impact the provider security-group on this port. provider_secgroup = self._create_provider_security_group() with self.security_group() as sg1: with self.port(tenant_id=self._tenant_id) as p: sg_id = sg1['security_group']['id'] body = {'port': {'security_groups': [sg_id]}} req = self.new_update_request('ports', body, p['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual( [provider_secgroup['security_group']['id']], port['port']['provider_security_groups']) def test_update_port_security_groups(self): with self.security_group() as sg1: with self.port(tenant_id=self._tenant_id) as p: # Port created before provider secgroup is created, so the port # would not be associated with the pvd secgroup at this point. provider_secgroup = self._create_provider_security_group() pvd_sg_id = provider_secgroup['security_group']['id'] sg_id = sg1['security_group']['id'] body = {'port': { 'security_groups': [sg_id], 'provider_security_groups': [pvd_sg_id]} } req = self.new_update_request('ports', body, p['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual([pvd_sg_id], port['port']['provider_security_groups']) self.assertEqual([sg_id], port['port']['security_groups']) def test_non_admin_cannot_delete_provider_sg_and_admin_can(self): provider_secgroup = self._create_provider_security_group() pvd_sg_id = provider_secgroup['security_group']['id'] # Try deleting the request as the normal tenant returns forbidden # as a tenant is not allowed to delete this. ctx = context.Context('', self._tenant_id) self._delete('security-groups', pvd_sg_id, expected_code=webob.exc.HTTPForbidden.code, neutron_context=ctx) # can be deleted though as admin self._delete('security-groups', pvd_sg_id, expected_code=webob.exc.HTTPNoContent.code) def test_non_admin_cannot_delete_provider_sg_rule(self): provider_secgroup = self._create_provider_security_group() pvd_sg_id = provider_secgroup['security_group']['id'] data = {'security_group_rule': {'security_group_id': pvd_sg_id, 'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'tenant_id': self._tenant_id}} req = self.new_create_request('security-group-rules', data) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) sg_rule_id = res['security_group_rule']['id'] # Try deleting the request as the normal tenant returns forbidden # as a tenant is not allowed to delete this. ctx = context.Context('', self._tenant_id) self._delete('security-group-rules', sg_rule_id, expected_code=webob.exc.HTTPForbidden.code, neutron_context=ctx) # can be deleted though as admin self._delete('security-group-rules', sg_rule_id, expected_code=webob.exc.HTTPNoContent.code) def test_non_admin_cannot_add_provider_sg_rule(self): provider_secgroup = self._create_provider_security_group() pvd_sg_id = provider_secgroup['security_group']['id'] data = {'security_group_rule': {'security_group_id': pvd_sg_id, 'direction': 'ingress', 'protocol': 'tcp', 'ethertype': 'IPv4', 'tenant_id': self._tenant_id}} req = self.new_create_request( 'security-group-rules', data) req.environ['neutron.context'] = context.Context('', self._tenant_id) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) class TestNSXv3ProviderSecurityGrp(test_nsxv3_plugin.NsxV3PluginTestCaseMixin, ProviderSecurityGroupExtTestCase): def test_update_port_remove_provider_sg(self): # need to create provider security group first. self._create_provider_security_group() with self.port(tenant_id=self._tenant_id) as p: body = {'port': {'provider_security_groups': []}} req = self.new_update_request('ports', body, p['port']['id']) port = self.deserialize(self.fmt, req.get_response(self.api)) # confirm that the group has been removed. self.assertEqual([], port['port']['provider_security_groups']) # make sure that the security groups did not contain the provider # security group self.assertEqual(p['port']['security_groups'], port['port']['security_groups']) class TestNSXvProviderSecurityGroup(test_nsxv_plugin.NsxVPluginV2TestCase, ProviderSecurityGroupExtTestCase): def test_create_provider_security_group(self): _create_section_tmp = self.fc2.create_section def _create_section(*args, **kwargs): return _create_section_tmp(*args, **kwargs) with mock.patch.object(self.fc2, 'create_section', side_effect=_create_section) as create_sec_mock: super(TestNSXvProviderSecurityGroup, self).test_create_provider_security_group() create_sec_mock.assert_called_with('ip', mock.ANY, insert_top=True, insert_before=mock.ANY) def test_create_provider_security_group_rule(self): provider_secgroup = self._create_provider_security_group() sg_id = provider_secgroup['security_group']['id'] _create_nsx_rule_tmp = self.plugin._create_nsx_rule def m_create_nsx_rule(*args, **kwargs): return _create_nsx_rule_tmp(*args, **kwargs) with mock.patch.object(self.plugin, '_create_nsx_rule', side_effect=m_create_nsx_rule) as create_rule_m: with self.security_group_rule(security_group_id=sg_id): create_rule_m.assert_called_with(mock.ANY, mock.ANY, logged=mock.ANY, action='deny') class TestNSXpProviderSecurityGrp(test_nsxp_plugin.NsxPPluginTestCaseMixin, ProviderSecurityGroupExtTestCase): def test_create_provider_security_group_rule(self): provider_secgroup = self._create_provider_security_group() sg_id = provider_secgroup['security_group']['id'] with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyCommunicationMapApi.update_with_entries" ) as entry_create,\ self.security_group_rule(security_group_id=sg_id): entry_create.assert_called_once() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_providernet.py0000644000175000017500000002226400000000000027670 0ustar00coreycorey00000000000000# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import webob.exc from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef from neutron_lib.api.definitions import provider_net as pnet from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsxv class TestProvidernet(test_nsxv.NsxVPluginV2TestCase): def test_create_delete_provider_network_default_physical_net(self): '''Leaves physical_net unspecified''' data = {'network': {'name': 'net1', 'admin_state_up': True, 'tenant_id': 'admin', pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 411}} network_req = self.new_create_request('networks', data, self.fmt) net = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def test_create_delete_provider_network_default_physical_net_2(self): '''Uses the 'default' keyword as physical_net''' data = {'network': {'name': 'net1', 'admin_state_up': True, 'tenant_id': 'admin', pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 411, pnet.PHYSICAL_NETWORK: 'default'}} network_req = self.new_create_request('networks', data, self.fmt) net = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def test_create_provider_network(self): data = {'network': {'name': 'net1', 'admin_state_up': True, 'tenant_id': 'admin', pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 411, pnet.PHYSICAL_NETWORK: 'physnet1'}} network_req = self.new_create_request('networks', data, self.fmt) net = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet1') # Test that we can create another provider network using the same # vlan_id on another physical network. data['network'][pnet.PHYSICAL_NETWORK] = 'physnet2' network_req = self.new_create_request('networks', data, self.fmt) net = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan') self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411) self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet2') class TestMultiProviderNetworks(test_nsxv.NsxVPluginV2TestCase): def setUp(self, plugin=None): cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) super(TestMultiProviderNetworks, self).setUp() def test_create_network_provider(self): data = {'network': {'name': 'net1', pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan') self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1') self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1) self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) def test_create_network_provider_flat(self): data = {'network': {'name': 'net1', pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'physnet1', 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE]) self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK]) self.assertEqual(0, network['network'][pnet.SEGMENTATION_ID]) self.assertNotIn(mpnet_apidef.SEGMENTS, network['network']) def test_create_network_single_multiple_provider(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}], 'tenant_id': 'tenant_one'}} net_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, net_req.get_response(self.api)) for provider_field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: self.assertNotIn(provider_field, network['network']) tz = network['network'][mpnet_apidef.SEGMENTS][0] self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan') self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1') self.assertEqual(tz[pnet.SEGMENTATION_ID], 1) # Tests get_network() net_req = self.new_show_request('networks', network['network']['id']) network = self.deserialize(self.fmt, net_req.get_response(self.api)) tz = network['network'][mpnet_apidef.SEGMENTS][0] self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan') self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1') self.assertEqual(tz[pnet.SEGMENTATION_ID], 1) def test_create_network_multprovider(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}, {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet2', pnet.SEGMENTATION_ID: 2}], 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) tz = network['network'][mpnet_apidef.SEGMENTS] for tz in data['network'][mpnet_apidef.SEGMENTS]: for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: self.assertEqual(tz.get(field), tz.get(field)) # Tests get_network() net_req = self.new_show_request('networks', network['network']['id']) network = self.deserialize(self.fmt, net_req.get_response(self.api)) tz = network['network'][mpnet_apidef.SEGMENTS] for tz in data['network'][mpnet_apidef.SEGMENTS]: for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID]: self.assertEqual(tz.get(field), tz.get(field)) def test_create_network_with_provider_and_multiprovider_fail(self): data = {'network': {'name': 'net1', mpnet_apidef.SEGMENTS: [{pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1}], pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'physnet1', pnet.SEGMENTATION_ID: 1, 'tenant_id': 'tenant_one'}} network_req = self.new_create_request('networks', data) res = network_req.get_response(self.api) self.assertEqual(res.status_int, 400) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_secgroup_rule_local_ip_prefix.py0000644000175000017500000002014600000000000033421 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import webob.exc from oslo_utils import uuidutils from neutron.db import db_base_plugin_v2 from neutron.db import securitygroups_db from neutron.extensions import securitygroup as ext_sg from neutron.tests.unit.extensions import test_securitygroup from neutron_lib import constants as const from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from vmware_nsx.db import extended_security_group_rule as ext_rule_db from vmware_nsx.extensions import secgroup_rule_local_ip_prefix as ext_loip from vmware_nsx.plugins.nsx_v.vshield import securitygroup_utils from vmware_nsx.tests.unit.nsx_p import test_plugin as test_nsxp_plugin from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsxv_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3_plugin PLUGIN_NAME = ('vmware_nsx.tests.unit.extensions.' 'test_secgroup_rule_local_ip_prefix.ExtendedRuleTestPlugin') _uuid = uuidutils.generate_uuid class ExtendedRuleTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, ext_rule_db.ExtendedSecurityGroupRuleMixin, securitygroups_db.SecurityGroupDbMixin): supported_extension_aliases = ["security-group", ext_loip.ALIAS] def create_security_group_rule(self, context, security_group_rule): rule = security_group_rule['security_group_rule'] self._check_local_ip_prefix(context, rule) with db_api.CONTEXT_WRITER.using(context): res = super(ExtendedRuleTestPlugin, self).create_security_group_rule( context, security_group_rule) self._process_security_group_rule_properties(context, res, rule) return res class LocalIPPrefixExtTestCase(test_securitygroup.SecurityGroupDBTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): super(LocalIPPrefixExtTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr) ext_sg.Securitygroup().update_attributes_map( ext_loip.RESOURCE_ATTRIBUTE_MAP) def _build_ingress_rule_with_local_ip_prefix(self, security_group_id, local_ip_prefix, remote_ip_prefix, direction='ingress'): rule = self._build_security_group_rule( security_group_id, remote_ip_prefix=remote_ip_prefix, direction=direction, proto=const.PROTO_NAME_UDP) rule['security_group_rule']['local_ip_prefix'] = local_ip_prefix return rule def test_raise_rule_not_ingress_when_local_ip_specified(self): local_ip_prefix = '239.255.0.0/16' remote_ip_prefix = '10.0.0.0/24' with self.security_group() as sg: rule = self._build_ingress_rule_with_local_ip_prefix( sg['security_group']['id'], local_ip_prefix, remote_ip_prefix, direction='egress') res = self._create_security_group_rule(self.fmt, rule) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_rule_with_local_ip_prefix(self): local_ip_prefix = '239.255.0.0/16' remote_ip_prefix = '10.0.0.0/24' with self.security_group() as sg: rule = self._build_ingress_rule_with_local_ip_prefix( sg['security_group']['id'], local_ip_prefix, remote_ip_prefix) res = self._make_security_group_rule(self.fmt, rule) self.assertEqual(local_ip_prefix, res['security_group_rule']['local_ip_prefix']) class TestNsxVExtendedSGRule(test_nsxv_plugin.NsxVSecurityGroupsTestCase, LocalIPPrefixExtTestCase): def test_create_rule_with_local_ip_prefix(self): sg_utils = securitygroup_utils.NsxSecurityGroupUtils(None) local_ip_prefix = '239.255.0.0/16' plugin = directory.get_plugin() dest = {'type': 'Ipv4Address', 'value': local_ip_prefix} plugin.nsx_sg_utils.get_rule_config = mock.Mock( side_effect=sg_utils.get_rule_config) super(TestNsxVExtendedSGRule, self).test_create_rule_with_local_ip_prefix() plugin.nsx_sg_utils.get_rule_config.assert_called_with( source=mock.ANY, destination=dest, services=mock.ANY, name=mock.ANY, applied_to_ids=mock.ANY, flags=mock.ANY, logged=mock.ANY, action=mock.ANY, tag=mock.ANY, notes=mock.ANY) class TestNSXv3ExtendedSGRule(test_nsxv3_plugin.NsxV3PluginTestCaseMixin, LocalIPPrefixExtTestCase): def test_create_rule_with_local_ip_prefix(self): sg_rules = [ {'tenant_id': mock.ANY, 'project_id': mock.ANY, 'id': mock.ANY, 'port_range_min': None, 'local_ip_prefix': '239.255.0.0/16', 'ethertype': 'IPv4', 'protocol': u'udp', 'remote_ip_prefix': '10.0.0.0/24', 'port_range_max': None, 'security_group_id': mock.ANY, 'remote_group_id': None, 'direction': u'ingress', 'description': ''}] with mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection." "create_section_rules", side_effect=test_nsxv3_plugin._mock_create_firewall_rules, ) as mock_rule: super(TestNSXv3ExtendedSGRule, self).test_create_rule_with_local_ip_prefix() mock_rule.assert_called_with( mock.ANY, # firewall_section_id mock.ANY, # ns_group_id False, # logging 'ALLOW', # action sg_rules, # sg_rules mock.ANY) # ruleid_2_remote_nsgroup_map def test_create_rule_with_remote_ip_prefix(self): remote_ip_prefix = '0.0.0.0/0' with self.security_group() as sg: rule = self._build_security_group_rule( sg['security_group']['id'], remote_ip_prefix=remote_ip_prefix, direction='ingress', proto=const.PROTO_NAME_UDP) res = self._make_security_group_rule(self.fmt, rule) self.assertEqual(remote_ip_prefix, res['security_group_rule']['remote_ip_prefix']) def test_create_nsx_rule_with_remote_ip_prefix_zeros(self): sg_rules = [ {'tenant_id': mock.ANY, 'project_id': mock.ANY, 'id': mock.ANY, 'port_range_min': None, 'local_ip_prefix': None, 'ethertype': 'IPv4', 'protocol': u'udp', 'remote_ip_prefix': None, 'port_range_max': None, 'security_group_id': mock.ANY, 'remote_group_id': None, 'direction': u'ingress', 'description': ''}] with mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection." "create_section_rules", side_effect=test_nsxv3_plugin._mock_create_firewall_rules, ) as mock_rule: self.test_create_rule_with_remote_ip_prefix() mock_rule.assert_called_with( mock.ANY, # firewall_section_id mock.ANY, # ns_group_id False, # logging 'ALLOW', # action sg_rules, # sg_rules mock.ANY) # ruleid_2_remote_nsgroup_map class TestNSXpExtendedSGRule(test_nsxp_plugin.NsxPPluginTestCaseMixin, LocalIPPrefixExtTestCase): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_security_group_policy.py0000644000175000017500000002527000000000000031771 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg import webob.exc from neutron.extensions import securitygroup as ext_sg from neutron.tests.unit.api import test_extensions from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_securitygroup from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from vmware_nsx.extensions import nsxpolicy from vmware_nsx.extensions import securitygrouplogging as ext_logging from vmware_nsx.extensions import securitygrouppolicy as ext_policy from vmware_nsx.tests.unit.nsx_v import test_plugin from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' class SecGroupPolicyExtensionTestCase( test_plugin.NsxVPluginV2TestCase, test_securitygroup.SecurityGroupDBTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): cfg.CONF.set_override('use_nsx_policies', True, group='nsxv') cfg.CONF.set_override('default_policy_id', 'policy-1', group='nsxv') # This feature is enabled only since 6.2 with mock.patch.object(fake_vcns.FakeVcns, 'get_version', return_value="6.2.3"): super(SecGroupPolicyExtensionTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr) self._tenant_id = test_db_base_plugin_v2.TEST_TENANT_ID # add policy & logging security group attribute ext_sg.Securitygroup().update_attributes_map( ext_policy.RESOURCE_ATTRIBUTE_MAP) ext_sg.Securitygroup().update_attributes_map( ext_logging.RESOURCE_ATTRIBUTE_MAP) def _create_secgroup_with_policy(self, policy_id, description=None, logging=False): body = {'security_group': {'name': 'sg-policy', 'tenant_id': self._tenant_id, 'policy': policy_id, 'description': description if description else '', 'logging': logging}} return self._create_security_group_response(self.fmt, body) def _get_secgroup_with_policy(self): policy_id = 'policy-5' res = self._create_secgroup_with_policy(policy_id) return self.deserialize(self.fmt, res) def test_secgroup_create_with_policy(self): policy_id = 'policy-5' res = self._create_secgroup_with_policy(policy_id) sg = self.deserialize(self.fmt, res) self.assertEqual(policy_id, sg['security_group']['policy']) self.assertEqual('dummy', sg['security_group']['description']) def test_secgroup_create_with_policyand_desc(self): policy_id = 'policy-5' desc = 'test' res = self._create_secgroup_with_policy(policy_id, description=desc) sg = self.deserialize(self.fmt, res) self.assertEqual(policy_id, sg['security_group']['policy']) self.assertEqual(desc, sg['security_group']['description']) def test_secgroup_create_without_policy(self): res = self._create_secgroup_with_policy(None) self.assertEqual(400, res.status_int) def test_secgroup_create_with_illegal_policy(self): policy_id = 'bad-policy' with mock.patch(PLUGIN_NAME + '.get_nsx_policy', side_effect=n_exc.ObjectNotFound(id=policy_id)): res = self._create_secgroup_with_policy(policy_id) self.assertEqual(400, res.status_int) def test_secgroup_create_with_policy_and_logging(self): # We do not support policy & logging together policy_id = 'policy-5' res = self._create_secgroup_with_policy(policy_id, logging=True) self.assertEqual(400, res.status_int) def test_secgroup_update_with_policy(self): # Test that updating the policy is allowed old_policy = 'policy-5' new_policy = 'policy-6' res = self._create_secgroup_with_policy(old_policy) sg = self.deserialize(self.fmt, res) data = {'security_group': {'policy': new_policy}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) updated_sg = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(new_policy, updated_sg['security_group']['policy']) # Verify the same result in 'get' req = self.new_show_request('security-groups', sg['security_group']['id']) shown_sg = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(new_policy, shown_sg['security_group']['policy']) def test_secgroup_update_no_policy_change(self): # Test updating without changing the policy old_policy = 'policy-5' desc = 'abc' res = self._create_secgroup_with_policy(old_policy) sg = self.deserialize(self.fmt, res) data = {'security_group': {'description': desc}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) updated_sg = self.deserialize(self.fmt, req.get_response(self.ext_api)) self.assertEqual(old_policy, updated_sg['security_group']['policy']) self.assertEqual(desc, updated_sg['security_group']['description']) def test_secgroup_update_remove_policy(self): # removing the policy is not allowed sg = self._get_secgroup_with_policy() data = {'security_group': {'policy': None}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_secgroup_update_add_logging(self): # We do not support policy & logging together sg = self._get_secgroup_with_policy() data = {'security_group': {'logging': True}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_non_admin_cannot_delete_policy_sg_and_admin_can(self): sg = self._get_secgroup_with_policy() sg_id = sg['security_group']['id'] # Try deleting the request as a normal user returns forbidden # as a tenant is not allowed to delete this. ctx = context.Context('', self._tenant_id) self._delete('security-groups', sg_id, expected_code=webob.exc.HTTPForbidden.code, neutron_context=ctx) # can be deleted though as admin self._delete('security-groups', sg_id, expected_code=webob.exc.HTTPNoContent.code) def test_create_rule(self): sg = self._get_secgroup_with_policy() rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', constants.PROTO_NAME_TCP, '22', '22') res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(400, res.status_int) class SecGroupPolicyExtensionTestCaseWithRules( SecGroupPolicyExtensionTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): cfg.CONF.set_override('allow_tenant_rules_with_policy', True, group='nsxv') super(SecGroupPolicyExtensionTestCaseWithRules, self).setUp( plugin=plugin, ext_mgr=ext_mgr) def test_secgroup_create_without_policy(self): # in case allow_tenant_rules_with_policy is True, it is allowed to # create a regular sg desc = 'test' res = self._create_secgroup_with_policy(None, description=desc) sg = self.deserialize(self.fmt, res) self.assertIsNone(sg['security_group']['policy']) self.assertEqual(desc, sg['security_group']['description']) def test_secgroup_create_without_policy_update_policy(self): # Create a regular security group. adding the policy later should fail res = self._create_secgroup_with_policy(None) sg = self.deserialize(self.fmt, res) data = {'security_group': {'policy': 'policy-1'}} req = self.new_update_request('security-groups', data, sg['security_group']['id']) res = req.get_response(self.ext_api) self.assertEqual(400, res.status_int) def test_secgroup_create_without_policy_and_rule(self): # Test that regular security groups can have rules res = self._create_secgroup_with_policy(None) sg = self.deserialize(self.fmt, res) self.assertIsNone(sg['security_group']['policy']) rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', constants.PROTO_NAME_TCP, '22', '22') res = self._create_security_group_rule(self.fmt, rule) rule_data = self.deserialize(self.fmt, res) self.assertEqual( sg['security_group']['id'], rule_data['security_group_rule']['security_group_id']) class NsxPolExtensionManager(object): def get_resources(self): return nsxpolicy.Nsxpolicy.get_resources() def get_actions(self): return [] def get_request_extensions(self): return [] class TestNsxPolicies(test_plugin.NsxVPluginV2TestCase): def setUp(self, plugin=None): super(TestNsxPolicies, self).setUp() ext_mgr = NsxPolExtensionManager() self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr) def test_get_policy(self): id = 'policy-1' req = self.new_show_request('nsx-policies', id) res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) policy = res['nsx_policy'] self.assertEqual(id, policy['id']) def test_list_policies(self): req = self.new_list_request('nsx-policies') res = self.deserialize( self.fmt, req.get_response(self.ext_api) ) self.assertIn('nsx_policies', res) # the fake_vcns api returns 3 policies self.assertEqual(3, len(res['nsx_policies'])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_securitygroup.py0000644000175000017500000001017200000000000030246 0ustar00coreycorey00000000000000# Copyright (c) 2015 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests.unit.extensions import test_securitygroup as test_ext_sg from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3 from vmware_nsxlib.v3 import exceptions as nsxlib_exc from webob import exc # Pool of fake ns-groups uuids NSG_IDS = ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444', '55555555-5555-5555-5555-555555555555'] def _mock_create_and_list_nsgroups(test_method): nsgroups = [] def _create_nsgroup_mock(name, desc, tags, membership_criteria=None): nsgroup = {'id': NSG_IDS[len(nsgroups)], 'display_name': name, 'description': desc, 'tags': tags} nsgroups.append(nsgroup) return nsgroup def wrap(*args, **kwargs): with mock.patch( 'vmware_nsxlib.v3.security.NsxLibNsGroup.create' ) as create_nsgroup_mock: create_nsgroup_mock.side_effect = _create_nsgroup_mock with mock.patch( "vmware_nsxlib.v3.security.NsxLibNsGroup.list" ) as list_nsgroups_mock: list_nsgroups_mock.side_effect = lambda: nsgroups test_method(*args, **kwargs) return wrap class TestSecurityGroups(test_nsxv3.NsxV3PluginTestCaseMixin, test_ext_sg.TestSecurityGroups): def test_create_security_group_rule_icmp_with_type_and_code(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = "icmp" # port_range_min (ICMP type) is greater than port_range_max # (ICMP code) in order to confirm min <= max port check is # not called for ICMP. port_range_min = 14 port_range_max = None keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix) as rule: for k, v, in keys: self.assertEqual(rule['security_group_rule'][k], v) def test_create_security_group_with_manager_error(self): '''Reboot in multi-cluster environment may cause temporary 404 in firewall section APIs. We should return 503 and not 404 to the user ''' name = 'webservers' description = 'my webservers' fail = False with mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "create_section_rules", side_effect=nsxlib_exc.ResourceNotFound): try: with self.security_group(name, description): # This should not succeed # (assertRaises would not work with generators) self.assertTrue(fail) except exc.HTTPClientError: pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/extensions/test_vnic_index.py0000644000175000017500000001361600000000000027456 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_db import exception as d_exc from oslo_utils import uuidutils from neutron.db import db_base_plugin_v2 from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin from neutron_lib.api import validators from neutron_lib import context as neutron_context from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from vmware_nsx.db import vnic_index_db from vmware_nsx.extensions import vnicindex as vnicidx from vmware_nsx.tests import unit as vmware DB_PLUGIN_KLASS = ('vmware_nsx.tests.unit.extensions.' 'test_vnic_index.VnicIndexTestPlugin') _uuid = uuidutils.generate_uuid class VnicIndexTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, vnic_index_db.VnicIndexDbMixin): supported_extension_aliases = [vnicidx.ALIAS] def update_port(self, context, id, port): p = port['port'] current_port = super(VnicIndexTestPlugin, self).get_port(context, id) vnic_idx = p.get(vnicidx.VNIC_INDEX) device_id = current_port['device_id'] if validators.is_attr_set(vnic_idx) and device_id != '': self._set_port_vnic_index_mapping( context, id, device_id, vnic_idx) with db_api.CONTEXT_WRITER.using(context): p = port['port'] ret_port = super(VnicIndexTestPlugin, self).update_port( context, id, port) vnic_idx = current_port.get(vnicidx.VNIC_INDEX) if (validators.is_attr_set(vnic_idx) and device_id != ret_port['device_id']): self._delete_port_vnic_index_mapping( context, id) return ret_port def delete_port(self, context, id): port_db = self.get_port(context, id) vnic_idx = port_db.get(vnicidx.VNIC_INDEX) if validators.is_attr_set(vnic_idx): self._delete_port_vnic_index_mapping(context, id) with db_api.CONTEXT_WRITER.using(context): super(VnicIndexTestPlugin, self).delete_port(context, id) class VnicIndexDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase): def setUp(self, plugin=None, ext_mgr=None): plugin = plugin or DB_PLUGIN_KLASS cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) super(VnicIndexDbTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _port_index_update(self, port_id, index): data = {'port': {'vnic_index': index}} req = self.new_update_request('ports', data, port_id) res = self.deserialize('json', req.get_response(self.api)) return res def test_vnic_index_db(self): plugin = directory.get_plugin() vnic_index = 2 device_id = _uuid() context = neutron_context.get_admin_context() with self.port(device_id=device_id, device_owner='compute:None') as port: port_id = port['port']['id'] res = self._port_index_update(port_id, vnic_index) self.assertEqual(res['port'][vnicidx.VNIC_INDEX], vnic_index) # Port should be associated with at most one vnic index self.assertRaises(d_exc.DBDuplicateEntry, plugin._set_port_vnic_index_mapping, context, port_id, device_id, 1) # Check that the call for _delete_port_vnic_index_mapping remove # the row from the table plugin._delete_port_vnic_index_mapping(context, port_id) self.assertIsNone(plugin._get_port_vnic_index(context, port_id)) def test_vnic_index_db_duplicate(self): plugin = directory.get_plugin() vnic_index = 2 device_id = _uuid() context = neutron_context.get_admin_context() with self.port(device_id=device_id, device_owner='compute:None') as port: port_id = port['port']['id'] res = self._port_index_update(port_id, vnic_index) self.assertEqual(res['port'][vnicidx.VNIC_INDEX], vnic_index) plugin._set_port_vnic_index_mapping(context, port_id, device_id, vnic_index) def test_vnic_index_db_duplicate_new_port(self): plugin = directory.get_plugin() vnic_index = 2 device_id = _uuid() context = neutron_context.get_admin_context() with self.port(device_id=device_id, device_owner='compute:None') as port: with self.port(device_id=device_id, device_owner='compute:None') as port1: port_id = port['port']['id'] res = self._port_index_update(port_id, vnic_index) self.assertEqual(res['port'][vnicidx.VNIC_INDEX], vnic_index) port_id1 = port1['port']['id'] plugin._set_port_vnic_index_mapping(context, port_id1, device_id, 2) self.assertIsNone(plugin._get_port_vnic_index(context, port_id)) self.assertEqual(vnic_index, plugin._get_port_vnic_index(context, port_id1)) class TestVnicIndex(VnicIndexDbTestCase): pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2382548 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_p/0000755000175000017500000000000000000000000022640 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_p/__init__.py0000644000175000017500000000000000000000000024737 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_p/test_api_replay.py0000644000175000017500000001003200000000000026372 0ustar00coreycorey00000000000000# Copyright (c) 2019 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from vmware_nsx.extensions import api_replay from vmware_nsx.tests.unit.nsx_p import test_plugin from neutron_lib.api import attributes from neutron_lib.plugins import directory from oslo_config import cfg class TestApiReplay(test_plugin.NsxPTestL3NatTest): def setUp(self, plugin=None, ext_mgr=None, service_plugins=None): # enables api_replay_mode for these tests cfg.CONF.set_override('api_replay_mode', True) super(TestApiReplay, self).setUp() def tearDown(self): # disables api_replay_mode for these tests cfg.CONF.set_override('api_replay_mode', False) # remove the extension from the plugin directory.get_plugin().supported_extension_aliases.remove( api_replay.ALIAS) # Revert the attributes map back to normal for attr_name in ('ports', 'networks', 'security_groups', 'security_group_rules', 'routers', 'policies'): attr_info = attributes.RESOURCES[attr_name] attr_info['id']['allow_post'] = False super(TestApiReplay, self).tearDown() def test_create_port_specify_id(self): specified_network_id = '555e762b-d7a1-4b44-b09b-2a34ada56c9f' specified_port_id = 'e55e762b-d7a1-4b44-b09b-2a34ada56c9f' network_res = self._create_network(self.fmt, 'test-network', True, arg_list=('id',), id=specified_network_id) network = self.deserialize(self.fmt, network_res) self.assertEqual(specified_network_id, network['network']['id']) port_res = self._create_port(self.fmt, network['network']['id'], arg_list=('id',), id=specified_port_id) port = self.deserialize(self.fmt, port_res) self.assertEqual(specified_port_id, port['port']['id']) def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, arg_list=None, **kwargs): data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up: data['router']['admin_state_up'] = admin_state_up for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): # Arg must be present and not empty if kwargs.get(arg): data['router'][arg] = kwargs[arg] router_req = self.new_create_request('routers', data, fmt) return router_req.get_response(self.ext_api) def test_create_update_router(self): specified_router_id = '555e762b-d7a1-4b44-b09b-2a34ada56c9f' router_res = self._create_router(self.fmt, 'test-tenant', 'test-rtr', arg_list=('id',), id=specified_router_id) router = self.deserialize(self.fmt, router_res) self.assertEqual(specified_router_id, router['router']['id']) # This part tests _fixup_res_dict as well body = self._update('routers', specified_router_id, {'router': {'name': 'new_name'}}) body = self._show('routers', specified_router_id) self.assertEqual(body['router']['name'], 'new_name') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_p/test_availability_zones.py0000644000175000017500000001277300000000000030153 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import uuidutils from neutron.tests import base from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.plugins.nsx_p import availability_zones as nsx_az class NsxPAvailabilityZonesTestCase(base.BaseTestCase): def setUp(self): super(NsxPAvailabilityZonesTestCase, self).setUp() self.az_name = "zone1" self.group_name = "az:%s" % self.az_name config.register_nsxp_azs(cfg.CONF, [self.az_name]) self.global_md_proxy = uuidutils.generate_uuid() cfg.CONF.set_override( "metadata_proxy", self.global_md_proxy, group="nsx_p") self.global_dhcp_profile = uuidutils.generate_uuid() cfg.CONF.set_override( "dhcp_profile", self.global_dhcp_profile, group="nsx_p") cfg.CONF.set_override( "native_metadata_route", "1.1.1.1", group="nsx_p") cfg.CONF.set_override("dns_domain", "xxx.com", group="nsx_p") cfg.CONF.set_override("nameservers", ["10.1.1.1"], group="nsx_p") cfg.CONF.set_override( "default_tier0_router", "uuidrtr1", group="nsx_p") cfg.CONF.set_override("edge_cluster", "ec1", group="nsx_p") def _config_az(self, metadata_proxy="metadata_proxy1", dhcp_profile="dhcp_profile1", native_metadata_route="2.2.2.2", dns_domain="aaa.com", nameservers=["20.1.1.1"], default_overlay_tz='otz', default_vlan_tz='vtz', default_tier0_router="uuidrtr2", edge_cluster="ec2"): if metadata_proxy is not None: cfg.CONF.set_override("metadata_proxy", metadata_proxy, group=self.group_name) if dhcp_profile is not None: cfg.CONF.set_override("dhcp_profile", dhcp_profile, group=self.group_name) if native_metadata_route is not None: cfg.CONF.set_override("native_metadata_route", native_metadata_route, group=self.group_name) if dns_domain is not None: cfg.CONF.set_override("dns_domain", dns_domain, group=self.group_name) if nameservers is not None: cfg.CONF.set_override("nameservers", nameservers, group=self.group_name) if default_overlay_tz is not None: cfg.CONF.set_override("default_overlay_tz", default_overlay_tz, group=self.group_name) if default_vlan_tz is not None: cfg.CONF.set_override("default_vlan_tz", default_vlan_tz, group=self.group_name) if default_tier0_router is not None: cfg.CONF.set_override("default_tier0_router", default_tier0_router, group=self.group_name) if edge_cluster is not None: cfg.CONF.set_override("edge_cluster", edge_cluster, group=self.group_name) def test_simple_availability_zone(self): self._config_az() az = nsx_az.NsxPAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("metadata_proxy1", az.metadata_proxy) self.assertEqual("dhcp_profile1", az.dhcp_profile) self.assertEqual("2.2.2.2", az.native_metadata_route) self.assertEqual("aaa.com", az.dns_domain) self.assertEqual(["20.1.1.1"], az.nameservers) self.assertEqual("otz", az.default_overlay_tz) self.assertEqual("vtz", az.default_vlan_tz) self.assertEqual("uuidrtr2", az.default_tier0_router) self.assertEqual("ec2", az.edge_cluster) def test_missing_group_section(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxPAvailabilityZone, "doesnt_exist") def test_availability_zone_missing_metadata_proxy(self): # Mandatory parameter self._config_az(metadata_proxy=None) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxPAvailabilityZone, self.az_name) def test_availability_zone_missing_md_route(self): self._config_az(native_metadata_route=None) az = nsx_az.NsxPAvailabilityZone(self.az_name) self.assertEqual("1.1.1.1", az.native_metadata_route) def test_availability_zone_missing_dns_domain(self): self._config_az(dns_domain=None) az = nsx_az.NsxPAvailabilityZone(self.az_name) self.assertEqual("xxx.com", az.dns_domain) def test_availability_zone_missing_nameservers(self): self._config_az(nameservers=None) az = nsx_az.NsxPAvailabilityZone(self.az_name) self.assertEqual(["10.1.1.1"], az.nameservers) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_p/test_dhcp_metadata.py0000644000175000017500000015226000000000000027035 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import netaddr from oslo_config import cfg from oslo_utils import uuidutils from neutron.extensions import securitygroup as secgrp from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import advancedserviceproviders as as_providers from vmware_nsx.plugins.nsx_p import availability_zones as nsx_az from vmware_nsx.tests.unit.nsx_p import test_plugin from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3.policy import core_resources as policy_resources from vmware_nsxlib.v3 import resources as nsx_resources def set_az_in_config(name, metadata_proxy="metadata_proxy1", dhcp_profile="dhcp_profile1", native_metadata_route="2.2.2.2", dns_domain='aaaa', nameservers=['bbbb']): group_name = 'az:%s' % name cfg.CONF.set_override('availability_zones', [name], group="nsx_p") config.register_nsxp_azs(cfg.CONF, [name]) cfg.CONF.set_override("metadata_proxy", metadata_proxy, group=group_name) cfg.CONF.set_override("dhcp_profile", dhcp_profile, group=group_name) cfg.CONF.set_override("native_metadata_route", native_metadata_route, group=group_name) cfg.CONF.set_override("dns_domain", dns_domain, group=group_name) cfg.CONF.set_override("nameservers", nameservers, group=group_name) def mock_nsxlib_backend_calls(): """Mock nsxlib calls used as passthrough for MP metadata & mdproxy""" mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibDhcpProfile." "get_id_by_name_or_id", return_value=test_plugin.NSX_DHCP_PROFILE_ID).start() mock.patch( "vmware_nsxlib.v3.resources.LogicalDhcpServer." "get_id_by_name_or_id", return_value=test_plugin._return_same).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibMetadataProxy." "get_id_by_name_or_id", side_effect=test_plugin._return_same).start() mock.patch( "vmware_nsxlib.v3.resources.LogicalPort.create", side_effect=test_plugin._return_id_key).start() mock.patch( "vmware_nsxlib.v3.resources.LogicalDhcpServer.create", side_effect=test_plugin._return_id_key).start() mock.patch( "vmware_nsxlib.v3.resources.LogicalDhcpServer.update", side_effect=test_plugin._return_id_key).start() mock.patch( "vmware_nsxlib.v3.resources.LogicalDhcpServer.create_binding", side_effect=test_plugin._return_id_key).start() mock.patch("vmware_nsxlib.v3.resources.LogicalDhcpServer." "update_binding").start() class NsxNativeDhcpTestCase(test_plugin.NsxPPluginTestCaseMixin): """Test native dhcp config when using MP DHCP""" def setUp(self): self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification cfg.CONF.set_override('dhcp_agent_notification', False) mock_nsxlib_backend_calls() super(NsxNativeDhcpTestCase, self).setUp() self._az_name = 'zone1' self.az_metadata_route = '3.3.3.3' set_az_in_config(self._az_name, native_metadata_route=self.az_metadata_route) self._patcher1 = mock.patch.object(core_resources.NsxLibDhcpProfile, 'get') self._patcher1.start() self._patcher2 = mock.patch.object( policy_resources.NsxDhcpServerConfigApi, 'get', side_effect=nsxlib_exc.ResourceNotFound) self._patcher2.start() self._initialize_azs() self.plugin._init_dhcp_metadata() self.plugin.use_policy_dhcp = False def tearDown(self): self._patcher1.stop() self._patcher2.stop() cfg.CONF.set_override('dhcp_agent_notification', self._orig_dhcp_agent_notification) super(NsxNativeDhcpTestCase, self).tearDown() def _make_subnet_data(self, name=None, network_id=None, cidr=None, gateway_ip=None, tenant_id=None, allocation_pools=None, enable_dhcp=True, dns_nameservers=None, ip_version=4, host_routes=None, shared=False): return {'subnet': { 'name': name, 'network_id': network_id, 'cidr': cidr, 'gateway_ip': gateway_ip, 'tenant_id': tenant_id, 'allocation_pools': allocation_pools, 'ip_version': ip_version, 'enable_dhcp': enable_dhcp, 'dns_nameservers': dns_nameservers, 'host_routes': host_routes, 'shared': shared}} def _verify_dhcp_service(self, network_id, tenant_id, enabled): # Verify if DHCP service is enabled on a network. port_res = self._list_ports('json', 200, network_id, tenant_id=tenant_id, device_owner=constants.DEVICE_OWNER_DHCP) port_list = self.deserialize('json', port_res) self.assertEqual(len(port_list['ports']) == 1, enabled) def _verify_dhcp_binding(self, subnet, port_data, update_data, assert_data): # Verify if DHCP binding is updated. with mock.patch( 'vmware_nsxlib.v3.resources.LogicalDhcpServer.update_binding' ) as update_dhcp_binding: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, **port_data) as port: # Retrieve the DHCP binding info created in the DB for the # new port. dhcp_binding = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id'])[0] # Update the port with provided data. self.plugin.update_port( context.get_admin_context(), port['port']['id'], update_data) binding_data = {'mac_address': port['port']['mac_address'], 'ip_address': port['port']['fixed_ips'][0][ 'ip_address']} # Extend basic binding data with to-be-asserted data. binding_data.update(assert_data) # Verify the update call. update_dhcp_binding.assert_called_once_with( dhcp_binding['nsx_service_id'], dhcp_binding['nsx_binding_id'], **binding_data) def test_dhcp_profile_configuration(self): # Test if dhcp_agent_notification and dhcp_profile are # configured correctly. orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification cfg.CONF.set_override('dhcp_agent_notification', True) self.assertRaises(nsx_exc.NsxPluginException, self.plugin._init_dhcp_metadata) cfg.CONF.set_override('dhcp_agent_notification', orig_dhcp_agent_notification) orig_dhcp_profile_uuid = cfg.CONF.nsx_p.dhcp_profile cfg.CONF.set_override('dhcp_profile', '', 'nsx_p') self.assertRaises(cfg.RequiredOptError, self.plugin._init_default_config) cfg.CONF.set_override('dhcp_profile', orig_dhcp_profile_uuid, 'nsx_p') def test_dhcp_service_with_create_network(self): # Test if DHCP service is disabled on a network when it is created. with self.network() as network: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_delete_dhcp_network(self): # Test if DHCP service is disabled when directly deleting a network # with a DHCP-enabled subnet. with self.network() as network: with self.subnet(network=network, enable_dhcp=True): self.plugin.delete_network(context.get_admin_context(), network['network']['id']) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_non_dhcp_subnet(self): # Test if DHCP service is disabled on a network when a DHCP-disabled # subnet is created. with self.network() as network: with self.subnet(network=network, enable_dhcp=False): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_multiple_non_dhcp_subnets(self): # Test if DHCP service is disabled on a network when multiple # DHCP-disabled subnets are created. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=False): with self.subnet(network=network, cidr='20.0.0.0/24', enable_dhcp=False): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_dhcp_subnet(self): # Test if DHCP service is enabled on a network when a DHCP-enabled # subnet is created. with self.network() as network: with self.subnet(network=network, enable_dhcp=True): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) def test_dhcp_service_with_create_dhcp_subnet_bulk(self): # Test if DHCP service is enabled on all networks after a # create_subnet_bulk operation. with self.network() as network1, self.network() as network2: subnet1 = self._make_subnet_data( network_id=network1['network']['id'], cidr='10.0.0.0/24', tenant_id=network1['network']['tenant_id']) subnet2 = self._make_subnet_data( network_id=network2['network']['id'], cidr='20.0.0.0/24', tenant_id=network2['network']['tenant_id']) subnets = {'subnets': [subnet1, subnet2]} with mock.patch.object(self.plugin, '_post_create_subnet' ) as post_create_subnet: self.plugin.create_subnet_bulk( context.get_admin_context(), subnets) # Check if post_create function has been called for # both subnets. self.assertEqual(len(subnets['subnets']), post_create_subnet.call_count) # Check if the bindings to backend DHCP entries are created. dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network1['network']['id'], nsx_constants.SERVICE_DHCP) self.assertTrue(dhcp_service) dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network2['network']['id'], nsx_constants.SERVICE_DHCP) self.assertTrue(dhcp_service) def test_dhcp_service_with_create_dhcp_subnet_bulk_failure(self): # Test if user-provided rollback function is invoked when # exception occurred during a create_subnet_bulk operation. with self.network() as network1, self.network() as network2: subnet1 = self._make_subnet_data( network_id=network1['network']['id'], cidr='10.0.0.0/24', tenant_id=network1['network']['tenant_id']) subnet2 = self._make_subnet_data( network_id=network2['network']['id'], cidr='20.0.0.0/24', tenant_id=network2['network']['tenant_id']) subnets = {'subnets': [subnet1, subnet2]} # Inject an exception on the second create_subnet call. orig_create_subnet = self.plugin.create_subnet with mock.patch.object(self.plugin, 'create_subnet') as create_subnet: def side_effect(*args, **kwargs): return self._fail_second_call( create_subnet, orig_create_subnet, *args, **kwargs) create_subnet.side_effect = side_effect with mock.patch.object(self.plugin, '_rollback_subnet') as rollback_subnet: try: self.plugin.create_subnet_bulk( context.get_admin_context(), subnets) except Exception: pass # Check if rollback function has been called for # the subnet in the first network. rollback_subnet.assert_called_once_with(mock.ANY, mock.ANY) subnet_arg = rollback_subnet.call_args[0][0] self.assertEqual(network1['network']['id'], subnet_arg['network_id']) # Check if the bindings to backend DHCP entries are removed. dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network1['network']['id'], nsx_constants.SERVICE_DHCP) self.assertFalse(dhcp_service) dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network2['network']['id'], nsx_constants.SERVICE_DHCP) self.assertFalse(dhcp_service) def test_dhcp_service_with_create_multiple_dhcp_subnets(self): # Test if multiple DHCP-enabled subnets cannot be created in a network. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=True): subnet = {'subnet': {'network_id': network['network']['id'], 'cidr': '20.0.0.0/24', 'enable_dhcp': True}} self.assertRaises( n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), subnet) def test_dhcp_service_with_delete_dhcp_subnet(self): # Test if DHCP service is disabled on a network when a DHCP-disabled # subnet is deleted. with self.network() as network: with self.subnet(network=network, enable_dhcp=True) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) self.plugin.delete_subnet(context.get_admin_context(), subnet['subnet']['id']) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_update_dhcp_subnet(self): # Test if DHCP service is enabled on a network when a DHCP-disabled # subnet is updated to DHCP-enabled. with self.network() as network: with self.subnet(network=network, enable_dhcp=False) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) data = {'subnet': {'enable_dhcp': True}} self.plugin.update_subnet(context.get_admin_context(), subnet['subnet']['id'], data) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) def test_dhcp_service_with_update_multiple_dhcp_subnets(self): # Test if a DHCP-disabled subnet cannot be updated to DHCP-enabled # if a DHCP-enabled subnet already exists in the same network. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=True): with self.subnet(network=network, cidr='20.0.0.0/24', enable_dhcp=False) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) data = {'subnet': {'enable_dhcp': True}} self.assertRaises( n_exc.InvalidInput, self.plugin.update_subnet, context.get_admin_context(), subnet['subnet']['id'], data) def test_dhcp_service_with_update_dhcp_port(self): # Test if DHCP server IP is updated when the corresponding DHCP port # IP is changed. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'update') as update_logical_dhcp_server: with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) port = self.plugin.get_port(context.get_admin_context(), dhcp_service['port_id']) old_ip = port['fixed_ips'][0]['ip_address'] new_ip = str(netaddr.IPAddress(old_ip) + 1) data = {'port': {'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} self.plugin.update_port(context.get_admin_context(), dhcp_service['port_id'], data) update_logical_dhcp_server.assert_called_once_with( dhcp_service['nsx_service_id'], server_ip=new_ip) def test_dhcp_binding_with_create_port(self): # Test if DHCP binding is added when a compute port is created. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_p.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_opts(self): # Test if DHCP binding is added when a compute port is created # with extra options. opt_name = 'interface-mtu' opt_code = 26 opt_val = '9000' with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() extra_dhcp_opts = [{'opt_name': opt_name, 'opt_value': opt_val}] with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, extra_dhcp_opts=extra_dhcp_opts, arg_list=('extra_dhcp_opts',)) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': opt_code, 'values': [opt_val]}]} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_p.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_opts121(self): # Test if DHCP binding is added when a compute port is created # with extra option121. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() extra_dhcp_opts = [{'opt_name': 'classless-static-route', 'opt_value': '1.0.0.0/24,1.2.3.4'}] with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, extra_dhcp_opts=extra_dhcp_opts, arg_list=('extra_dhcp_opts',)) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}, {'network': '1.0.0.0/24', 'next_hop': '1.2.3.4'}]}} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_p.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_bad_opts(self): with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() ctx = context.get_admin_context() # Use illegal opt-name extra_dhcp_opts = [{'opt_name': 'Dummy', 'opt_value': 'Dummy'}] data = {'port': { 'name': 'dummy', 'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'device_owner': device_owner, 'device_id': device_id, 'extra_dhcp_opts': extra_dhcp_opts, 'admin_state_up': True, 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01', }} self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, ctx, data) # Use illegal option121 value extra_dhcp_opts = [{'opt_name': 'classless-static-route', 'opt_value': '1.0.0.0/24,5.5.5.5,cc'}] data['port']['extra_dhcp_opts'] = extra_dhcp_opts self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, ctx, data) def test_dhcp_binding_with_disable_enable_dhcp(self): # Test if DHCP binding is preserved after DHCP is disabled and # re-enabled on a subnet. with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: ip = port['port']['fixed_ips'][0]['ip_address'] dhcp_bindings = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id']) dhcp_service = dhcp_bindings[0]['nsx_service_id'] self.assertEqual(1, len(dhcp_bindings)) self.assertEqual(ip, dhcp_bindings[0]['ip_address']) # Disable DHCP on subnet. data = {'subnet': {'enable_dhcp': False}} self.plugin.update_subnet(context.get_admin_context(), subnet['subnet']['id'], data) dhcp_bindings = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id']) self.assertEqual([], dhcp_bindings) # Re-enable DHCP on subnet. data = {'subnet': {'enable_dhcp': True}} self.plugin.update_subnet(context.get_admin_context(), subnet['subnet']['id'], data) dhcp_bindings = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id']) self.assertEqual(1, len(dhcp_bindings)) self.assertEqual(ip, dhcp_bindings[0]['ip_address']) # The DHCP service ID should be different because a new # logical DHCP server is created for re-enabling DHCP. self.assertNotEqual(dhcp_service, dhcp_bindings[0]['nsx_service_id']) def test_dhcp_binding_with_delete_port(self): # Test if DHCP binding is removed when the associated compute port # is deleted. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'delete_binding') as delete_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_binding = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id'])[0] self.plugin.delete_port( context.get_admin_context(), port['port']['id']) delete_dhcp_binding.assert_called_once_with( dhcp_binding['nsx_service_id'], dhcp_binding['nsx_binding_id']) def test_dhcp_binding_with_update_port_delete_ip(self): # Test if DHCP binding is deleted when the IP of the associated # compute port is deleted. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'delete_binding') as delete_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_binding = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id'])[0] data = {'port': {'fixed_ips': [], 'admin_state_up': False, secgrp.SECURITYGROUPS: []}} self.plugin.update_port( context.get_admin_context(), port['port']['id'], data) delete_dhcp_binding.assert_called_once_with( dhcp_binding['nsx_service_id'], dhcp_binding['nsx_binding_id']) def test_dhcp_binding_with_update_port_ip(self): # Test if DHCP binding is updated when the IP of the associated # compute port is changed. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: port_data = {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]} new_ip = '10.0.0.4' update_data = {'port': {'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} assert_data = {'host_name': 'host-%s' % new_ip.replace('.', '-'), 'ip_address': new_ip, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': new_ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_mac(self): # Test if DHCP binding is updated when the Mac of the associated # compute port is changed. with self.subnet(enable_dhcp=True) as subnet: port_data = {'mac_address': '11:22:33:44:55:66'} new_mac = '22:33:44:55:66:77' update_data = {'port': {'mac_address': new_mac}} assert_data = {'mac_address': new_mac, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': mock.ANY}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_mac_ip(self): # Test if DHCP binding is updated when the IP and Mac of the associated # compute port are changed at the same time. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: port_data = {'mac_address': '11:22:33:44:55:66', 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]} new_mac = '22:33:44:55:66:77' new_ip = '10.0.0.4' update_data = {'port': {'mac_address': new_mac, 'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} assert_data = {'host_name': 'host-%s' % new_ip.replace('.', '-'), 'mac_address': new_mac, 'ip_address': new_ip, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': new_ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_update_dhcp_opt(self): # Test updating extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9000'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9002'}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 26, 'values': ['9002']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_adding_dhcp_opt(self): # Test adding extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'nis-domain', 'opt_value': 'abc'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9002'}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 26, 'values': ['9002']}, {'code': 40, 'values': ['abc']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_deleting_dhcp_opt(self): # Test adding extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'nis-domain', 'opt_value': 'abc'}, {'opt_name': 'interface-mtu', 'opt_value': '9002'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': None}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 40, 'values': ['abc']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_name(self): # Test if DHCP binding is not updated when the name of the associated # compute port is changed. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'update_binding') as update_dhcp_binding: with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, name='abc') as port: data = {'port': {'name': 'xyz'}} self.plugin.update_port( context.get_admin_context(), port['port']['id'], data) update_dhcp_binding.assert_not_called() def test_create_network_with_bad_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': ['bad_hint'] }} self.assertRaises(n_exc.NeutronException, p.create_network, ctx, data) def test_create_network_with_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': [self._az_name] }} # network creation should succeed net = p.create_network(ctx, data) self.assertEqual([self._az_name], net['availability_zone_hints']) self.assertEqual([self._az_name], net['availability_zones']) def test_create_network_with_no_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False }} # network creation should succeed net = p.create_network(ctx, data) self.assertEqual([], net['availability_zone_hints']) self.assertEqual([nsx_az.DEFAULT_NAME], net['availability_zones']) def test_dhcp_service_with_create_az_network(self): # Test if DHCP service is disabled on a network when it is created. with self.network(availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_binding_with_create_az_port(self): # Test if DHCP binding is added when a compute port is created. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.network( availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: with self.subnet(enable_dhcp=True, network=network) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % self.az_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % self.az_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_p.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_create_subnet_with_dhcp_port(self): with self.subnet(enable_dhcp=True) as subnet: # find the dhcp port and verify it has port security disabled ports = self.plugin.get_ports( context.get_admin_context()) self.assertEqual(1, len(ports)) self.assertEqual('network:dhcp', ports[0]['device_owner']) self.assertEqual(subnet['subnet']['network_id'], ports[0]['network_id']) self.assertEqual(False, ports[0]['port_security_enabled']) class NsxNativeMetadataTestCase(test_plugin.NsxPPluginTestCaseMixin): """Test native metadata config when using MP MDProxy""" def setUp(self): self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification cfg.CONF.set_override('dhcp_agent_notification', False) mock_nsxlib_backend_calls() super(NsxNativeMetadataTestCase, self).setUp() self._az_name = 'zone1' self._az_metadata_proxy = 'dummy' set_az_in_config(self._az_name, metadata_proxy=self._az_metadata_proxy) self._patcher = mock.patch.object(core_resources.NsxLibMetadataProxy, 'get') self._patcher.start() self._initialize_azs() self.plugin._init_dhcp_metadata() def tearDown(self): self._patcher.stop() cfg.CONF.set_override('dhcp_agent_notification', self._orig_dhcp_agent_notification) super(NsxNativeMetadataTestCase, self).tearDown() def test_metadata_proxy_configuration(self): # Test if dhcp_agent_notification and metadata_proxy are # configured correctly. orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification cfg.CONF.set_override('dhcp_agent_notification', True) self.assertRaises(nsx_exc.NsxPluginException, self.plugin._init_dhcp_metadata) cfg.CONF.set_override('dhcp_agent_notification', orig_dhcp_agent_notification) orig_metadata_proxy_uuid = cfg.CONF.nsx_p.metadata_proxy cfg.CONF.set_override('metadata_proxy', '', 'nsx_p') self.assertRaises(cfg.RequiredOptError, self.plugin._init_default_config) cfg.CONF.set_override('metadata_proxy', orig_metadata_proxy_uuid, 'nsx_p') def test_metadata_proxy_with_create_network(self): # Test if native metadata proxy is enabled on a network when it is # created (Using MP MDproxy). self.plugin._availability_zones_data._default_az.use_policy_md = False with mock.patch.object(nsx_resources.LogicalPort, 'create') as create_logical_port: with self.network() as network: nsx_net_id = self.plugin._get_network_nsx_id( context.get_admin_context(), network['network']['id']) tags = self.plugin.nsxlib.build_v3_tags_payload( network['network'], resource_type='os-neutron-net-id', project_name=None) name = utils.get_name_and_uuid('%s-%s' % ( 'mdproxy', network['network']['name'] or 'network'), network['network']['id']) create_logical_port.assert_called_once_with( nsx_net_id, cfg.CONF.nsx_p.metadata_proxy, tags=tags, name=name, attachment_type=nsx_constants.ATTACHMENT_MDPROXY) def test_metadata_proxy_with_create_az_network(self): # Test if native metadata proxy is enabled on a network when it is # created (Using MP MDproxy). azs = self.plugin._availability_zones_data.availability_zones azs[self._az_name].use_policy_md = False with mock.patch.object(nsx_resources.LogicalPort, 'create') as create_logical_port: with self.network( availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: nsx_net_id = self.plugin._get_network_nsx_id( context.get_admin_context(), network['network']['id']) tags = self.plugin.nsxlib.build_v3_tags_payload( network['network'], resource_type='os-neutron-net-id', project_name=None) name = utils.get_name_and_uuid('%s-%s' % ( 'mdproxy', network['network']['name'] or 'network'), network['network']['id']) create_logical_port.assert_called_once_with( nsx_net_id, self._az_metadata_proxy, tags=tags, name=name, attachment_type=nsx_constants.ATTACHMENT_MDPROXY) def test_metadata_proxy_with_get_subnets(self): # Test if get_subnets() handles advanced-service-provider extension, # which is used when processing metadata requests. with self.network() as n1, self.network() as n2: with self.subnet(network=n1) as s1, self.subnet(network=n2) as s2: # Get all the subnets. subnets = self._list('subnets')['subnets'] self.assertEqual(len(subnets), 2) self.assertEqual(set([s['id'] for s in subnets]), set([s1['subnet']['id'], s2['subnet']['id']])) lswitch_id = 'dummy' neutron_id = n1['network']['id'] segment_path = '/infra/segments/%s' % neutron_id # Get only the subnets associated with a particular advanced # service provider (i.e. logical switch). with mock.patch('vmware_nsxlib.v3.policy.NsxPolicyLib.' 'search_resource_by_realized_id', return_value=[segment_path]): subnets = self._list('subnets', query_params='%s=%s' % (as_providers.ADV_SERVICE_PROVIDERS, lswitch_id))['subnets'] self.assertEqual(len(subnets), 1) self.assertEqual(subnets[0]['id'], s1['subnet']['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_p/test_fwaas_v2_driver.py0000644000175000017500000004045400000000000027343 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib.api.definitions import constants as fwaas_consts from neutron_lib.plugins import directory from oslo_utils import uuidutils from vmware_nsx.services.fwaas.nsx_p import edge_fwaas_driver_v2 from vmware_nsx.services.fwaas.nsx_p import fwaas_callbacks_v2 from vmware_nsx.tests.unit.nsx_p import test_plugin as test_p_plugin from vmware_nsxlib.v3 import nsx_constants as consts from vmware_nsxlib.v3.policy import constants as policy_constants FAKE_FW_ID = 'fake_fw_uuid' FAKE_ROUTER_ID = 'fake_rtr_uuid' FAKE_PORT_ID = 'fake_port_uuid' FAKE_NET_ID = 'fake_net_uuid' GW_POLICY_PATH = ("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyGatewayPolicyApi") class NsxpFwaasTestCase(test_p_plugin.NsxPPluginTestCaseMixin): def setUp(self): super(NsxpFwaasTestCase, self).setUp() self.firewall = edge_fwaas_driver_v2.EdgeFwaasPDriverV2() self.project_id = uuidutils.generate_uuid() self.plugin = directory.get_plugin() self.plugin.fwaas_callbacks = fwaas_callbacks_v2.NsxpFwaasCallbacksV2( False) self.plugin.fwaas_callbacks.fwaas_enabled = True self.plugin.fwaas_callbacks.fwaas_driver = self.firewall self.plugin.fwaas_callbacks.internal_driver = self.firewall self.plugin.init_is_complete = True def mock_get_random_rule_id(rid): return rid mock.patch.object(self.plugin.fwaas_callbacks, '_get_random_rule_id', side_effect=mock_get_random_rule_id).start() mock.patch.object(self.plugin.nsxpolicy, 'search_by_tags', return_value={'results': []}).start() def _default_rule(self, seq_num): return self.plugin.nsxpolicy.gateway_policy.build_entry( fwaas_callbacks_v2.DEFAULT_RULE_NAME, policy_constants.DEFAULT_DOMAIN, FAKE_ROUTER_ID, fwaas_callbacks_v2.DEFAULT_RULE_ID, description=fwaas_callbacks_v2.DEFAULT_RULE_NAME, action=consts.FW_ACTION_ALLOW, scope=[self.plugin.nsxpolicy.tier1.get_path(FAKE_ROUTER_ID)], sequence_number=seq_num, direction=consts.IN_OUT).get_obj_dict() def _block_interface_rules(self, seq_num): net_group_id = '%s-%s' % (FAKE_ROUTER_ID, FAKE_NET_ID) ingress_rule = self.plugin.nsxpolicy.gateway_policy.build_entry( "Block port ingress", policy_constants.DEFAULT_DOMAIN, FAKE_ROUTER_ID, fwaas_callbacks_v2.DEFAULT_RULE_ID + FAKE_NET_ID + 'ingress', action=consts.FW_ACTION_DROP, dest_groups=[net_group_id], scope=[self.plugin.nsxpolicy.tier1.get_path(FAKE_ROUTER_ID)], sequence_number=seq_num, direction=consts.IN) egress_rule = self.plugin.nsxpolicy.gateway_policy.build_entry( "Block port egress", policy_constants.DEFAULT_DOMAIN, FAKE_ROUTER_ID, fwaas_callbacks_v2.DEFAULT_RULE_ID + FAKE_NET_ID + 'egress', action=consts.FW_ACTION_DROP, source_groups=[net_group_id], scope=[self.plugin.nsxpolicy.tier1.get_path(FAKE_ROUTER_ID)], sequence_number=seq_num + 1, direction=consts.OUT) return [ingress_rule.get_obj_dict(), egress_rule.get_obj_dict()] def _fake_rules_v4(self, is_ingress=True, cidr='10.24.4.0/24', is_conflict=False): rule1 = {'enabled': True, 'action': 'allow', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '80', 'id': 'fake-fw-rule1', 'description': 'first rule'} rule2 = {'name': 'rule 2', 'enabled': True, 'action': 'reject', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '22:24', 'source_port': '1:65535', 'id': 'fake-fw-rule2'} rule3 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'protocol': 'icmp', 'id': 'fake-fw-rule3'} rule4 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'id': 'fake-fw-rule4'} if is_ingress: if not is_conflict: rule1['source_ip_address'] = cidr else: rule1['destination_ip_address'] = cidr else: if not is_conflict: rule1['destination_ip_address'] = cidr else: rule1['source_ip_address'] = cidr return [rule1, rule2, rule3, rule4] def _translated_cidr(self, cidr): if cidr is None: return [] else: return [{'target_id': cidr, 'target_type': 'IPv4Address'}] def _validate_rules_translation(self, actual_rules, rule_list, is_ingress): for index in range(len(rule_list)): self._validate_rule_translation( actual_rules[index].get_obj_dict(), rule_list[index], is_ingress) def _validate_rule_translation(self, nsx_rule, fw_rule, is_ingress): self.assertEqual(fw_rule['id'], nsx_rule['id']) self.assertEqual(fwaas_callbacks_v2.RULE_NAME_PREFIX + (fw_rule.get('name') or fw_rule['id']), nsx_rule['display_name']) self.assertEqual(fw_rule.get('description'), nsx_rule['description']) self.assertEqual(consts.IN if is_ingress else consts.OUT, nsx_rule['direction']) self.assertEqual(self.plugin.nsxpolicy.tier1.get_path(FAKE_ROUTER_ID), nsx_rule['scope'][0]) # Action if (fw_rule['action'] == fwaas_consts.FWAAS_REJECT or fw_rule['action'] == fwaas_consts.FWAAS_DENY): self.assertEqual(consts.FW_ACTION_DROP, nsx_rule['action']) else: self.assertEqual(consts.FW_ACTION_ALLOW, nsx_rule['action']) # Service if fw_rule.get('protocol') in ['tcp', 'udp', 'icmp']: self.assertEqual(1, len(nsx_rule['services'])) self.assertTrue( nsx_rule['services'][0].startswith('/infra/services/')) # Source & destination if (fw_rule.get('source_ip_address') and not fw_rule['source_ip_address'].startswith('0.0.0.0')): self.assertEqual(['/infra/domains/%s/groups/source-%s' % ( policy_constants.DEFAULT_DOMAIN, fw_rule['id'])], nsx_rule['source_groups']) elif not is_ingress: self.assertEqual(['/infra/domains/%s/groups/%s-%s' % ( policy_constants.DEFAULT_DOMAIN, FAKE_ROUTER_ID, FAKE_NET_ID)], nsx_rule['source_groups']) if (fw_rule.get('destination_ip_address') and not fw_rule['destination_ip_address'].startswith('0.0.0.0')): self.assertEqual(['/infra/domains/%s/groups/destination-%s' % ( policy_constants.DEFAULT_DOMAIN, fw_rule['id'])], nsx_rule['destination_groups']) elif is_ingress: self.assertEqual(['/infra/domains/%s/groups/%s-%s' % ( policy_constants.DEFAULT_DOMAIN, FAKE_ROUTER_ID, FAKE_NET_ID)], nsx_rule['destination_groups']) def _fake_empty_firewall_group(self): fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': True, 'tenant_id': self.project_id, 'ingress_rule_list': [], 'egress_rule_list': []} return fw_inst def _fake_firewall_group(self, rule_list, is_ingress=True, admin_state_up=True): _rule_list = copy.deepcopy(rule_list) for rule in _rule_list: rule['position'] = str(_rule_list.index(rule)) fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': admin_state_up, 'tenant_id': self.project_id, 'ingress_rule_list': [], 'egress_rule_list': []} if is_ingress: fw_inst['ingress_rule_list'] = _rule_list else: fw_inst['egress_rule_list'] = _rule_list return fw_inst def _fake_firewall_group_with_admin_down(self, rule_list, is_ingress=True): return self._fake_firewall_group( rule_list, is_ingress=is_ingress, admin_state_up=False) def _fake_apply_list(self): router_inst = {'id': FAKE_ROUTER_ID, 'external_gateway_info': 'dummy'} router_info_inst = mock.Mock() router_info_inst.router = router_inst router_info_inst.router_id = FAKE_ROUTER_ID apply_list = [(router_info_inst, FAKE_PORT_ID)] return apply_list def test_create_firewall_no_rules(self): apply_list = self._fake_apply_list() firewall = self._fake_empty_firewall_group() port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall),\ mock.patch.object(self.plugin, '_get_router', return_value={'project_id': self.project_id}),\ mock.patch.object(self.plugin, 'service_router_has_services', return_value=True),\ mock.patch(GW_POLICY_PATH + ".update_entries") as update_fw: self.firewall.create_firewall_group('nsx', apply_list, firewall) # expecting 2 block rules for the logical switch (egress & ingress) # and last default allow all rule expected_rules = (self._block_interface_rules(0) + [self._default_rule(2)]) update_fw.assert_called_once_with( policy_constants.DEFAULT_DOMAIN, FAKE_ROUTER_ID, mock.ANY, category=policy_constants.CATEGORY_LOCAL_GW) # compare rules one by one actual_rules = update_fw.call_args[0][2] self.assertEqual(len(expected_rules), len(actual_rules)) for index in range(len(actual_rules)): self.assertEqual(expected_rules[index], actual_rules[index].get_obj_dict()) def _setup_firewall_with_rules(self, func, is_ingress=True, is_conflict=False, cidr='10.24.4.0/24'): apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4(is_ingress=is_ingress, is_conflict=is_conflict, cidr=cidr) firewall = self._fake_firewall_group(rule_list, is_ingress=is_ingress) port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall), \ mock.patch.object(self.plugin, '_get_router', return_value={'project_id': self.project_id}),\ mock.patch.object(self.plugin, 'service_router_has_services', return_value=True), \ mock.patch(GW_POLICY_PATH + ".update_entries") as update_fw: func('nsx', apply_list, firewall) expected_default_rules = self._block_interface_rules( len(rule_list)) + [self._default_rule(len(rule_list) + 2)] update_fw.assert_called_once_with( policy_constants.DEFAULT_DOMAIN, FAKE_ROUTER_ID, mock.ANY, category=policy_constants.CATEGORY_LOCAL_GW) # compare rules one by one actual_rules = update_fw.call_args[0][2] self.assertEqual(len(rule_list) + 3, len(actual_rules)) self._validate_rules_translation( actual_rules, rule_list, is_ingress) # compare the last 3 rules (default interface rules + # default allow rule) self.assertEqual(actual_rules[-3].get_obj_dict(), expected_default_rules[0]) self.assertEqual(actual_rules[-2].get_obj_dict(), expected_default_rules[1]) self.assertEqual(actual_rules[-1].get_obj_dict(), expected_default_rules[2]) def test_create_firewall_with_ingress_rules(self): self._setup_firewall_with_rules(self.firewall.create_firewall_group) def test_update_firewall_with_ingress_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group) def test_create_firewall_with_egress_rules(self): self._setup_firewall_with_rules(self.firewall.create_firewall_group, is_ingress=False) def test_update_firewall_with_egress_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group, is_ingress=False) def test_create_firewall_with_egress_conflicting_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group, is_ingress=False, is_conflict=True) def test_create_firewall_with_ingress_conflicting_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group, is_ingress=True, is_conflict=True) def test_create_firewall_with_illegal_cidr(self): self._setup_firewall_with_rules(self.firewall.create_firewall_group, cidr='0.0.0.0/24') def test_delete_firewall(self): apply_list = self._fake_apply_list() firewall = self._fake_empty_firewall_group() port = {'id': FAKE_PORT_ID} with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=None), \ mock.patch.object(self.plugin, '_get_router', return_value={'project_id': self.project_id}),\ mock.patch.object(self.plugin, 'service_router_has_services', return_value=True), \ mock.patch(GW_POLICY_PATH + ".delete") as delete_fw: self.firewall.delete_firewall_group('nsx', apply_list, firewall) delete_fw.assert_called_once_with( policy_constants.DEFAULT_DOMAIN, map_id=FAKE_ROUTER_ID) def test_create_firewall_with_admin_down(self): apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4() firewall = self._fake_firewall_group_with_admin_down(rule_list) with mock.patch.object(self.plugin, 'service_router_has_services', return_value=True), \ mock.patch.object(self.plugin, '_get_router', return_value={'project_id': self.project_id}),\ mock.patch(GW_POLICY_PATH + ".create_with_entries") as update_fw: self.firewall.create_firewall_group('nsx', apply_list, firewall) update_fw.assert_not_called() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_p/test_plugin.py0000644000175000017500000037067000000000000025564 0ustar00coreycorey00000000000000# Copyright (c) 2018 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from webob import exc from neutron.extensions import address_scope from neutron.extensions import l3 from neutron.extensions import securitygroup as secgrp from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_address_scope from neutron.tests.unit.extensions import test_extraroute as test_ext_route from neutron.tests.unit.extensions import test_l3 as test_l3_plugin from neutron.tests.unit.extensions import test_securitygroup from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib.api.definitions import extraroute as xroute_apidef from neutron_lib.api.definitions import l3_ext_gw_mode as l3_egm_apidef from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions as nc_exc from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.objects import registry as obj_reg from neutron_lib.plugins import directory from vmware_nsx.common import utils from vmware_nsx.extensions import providersecuritygroup as provider_sg from vmware_nsx.plugins.common import plugin as com_plugin from vmware_nsx.plugins.nsx_p import plugin as nsx_plugin from vmware_nsx.services.lbaas.nsx_p.implementation import loadbalancer_mgr from vmware_nsx.services.lbaas.octavia import octavia_listener from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.common_plugin import common_v3 from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3.policy import constants as pol_const from vmware_nsxlib.v3 import utils as nsxlib_utils PLUGIN_NAME = 'vmware_nsx.plugin.NsxPolicyPlugin' NSX_OVERLAY_TZ_NAME = 'OVERLAY_TZ' NSX_VLAN_TZ_NAME = 'VLAN_TZ' DEFAULT_TIER0_ROUTER_UUID = "efad0078-9204-4b46-a2d8-d4dd31ed448f" NSX_DHCP_PROFILE_ID = 'DHCP_PROFILE' NSX_MD_PROXY_ID = 'MD_PROXY' LOGICAL_SWITCH_ID = '00000000-1111-2222-3333-444444444444' def _return_id_key(*args, **kwargs): return {'id': uuidutils.generate_uuid()} def _return_id_key_list(*args, **kwargs): return [{'id': uuidutils.generate_uuid()}] def _return_same(key, *args, **kwargs): return key class NsxPPluginTestCaseMixin( test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None, **kwargs): self._mock_nsx_policy_backend_calls() self.setup_conf_overrides() super(NsxPPluginTestCaseMixin, self).setUp(plugin=plugin, ext_mgr=ext_mgr) self.ctx = context.get_admin_context() def _mock_nsx_policy_backend_calls(self): resource_list_result = {'results': [{'id': 'test', 'display_name': 'test'}]} mock.patch( "vmware_nsxlib.v3.policy.NsxPolicyLib.get_version", return_value=nsx_constants.NSX_VERSION_3_0_0).start() mock.patch( "vmware_nsxlib.v3.client.RESTClient.get").start() mock.patch( "vmware_nsxlib.v3.client.RESTClient.list", return_value=resource_list_result).start() mock.patch( "vmware_nsxlib.v3.client.RESTClient.patch").start() mock.patch( "vmware_nsxlib.v3.client.RESTClient.update").start() mock.patch( "vmware_nsxlib.v3.client.RESTClient.delete").start() mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyCommunicationMapApi._get_last_seq_num", return_value=-1).start() mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyResourceBase._wait_until_realized", return_value={'state': pol_const.STATE_REALIZED} ).start() mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.update_transport_zone").start() mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicySegmentApi.get_realized_logical_switch_id", return_value=LOGICAL_SWITCH_ID ).start() mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicySegmentApi.get_realized_id", return_value=LOGICAL_SWITCH_ID ).start() mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicySegmentApi.set_admin_state").start() mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicySegmentPortApi.set_admin_state").start() mock.patch("vmware_nsxlib.v3.policy.core_resources.NsxPolicyTier0Api." "get_edge_cluster_path", return_value="x/1").start() mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyEdgeClusterApi.get_edge_node_ids", return_value=["node1"]).start() mock.patch("vmware_nsxlib.v3.NsxLib.get_tag_limits", return_value=nsxlib_utils.TagLimits(20, 40, 15)).start() # Add some nsxlib mocks for the passthrough apis mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value=nsx_constants.NSX_VERSION_3_0_0).start() mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter." "update").start() mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportNode." "get_transport_zones", return_value=[NSX_OVERLAY_TZ_NAME, NSX_VLAN_TZ_NAME, mock.ANY]).start() mock.patch("vmware_nsxlib.v3.core_resources.NsxLibEdgeCluster." "get_transport_nodes", return_value=["dummy"]).start() mock.patch("vmware_nsxlib.v3.NsxLib." "get_id_by_resource_and_tag").start() def setup_conf_overrides(self): cfg.CONF.set_override('default_overlay_tz', NSX_OVERLAY_TZ_NAME, 'nsx_p') cfg.CONF.set_override('default_vlan_tz', NSX_VLAN_TZ_NAME, 'nsx_p') cfg.CONF.set_override('dhcp_profile', NSX_DHCP_PROFILE_ID, 'nsx_p') cfg.CONF.set_override('metadata_proxy', NSX_MD_PROXY_ID, 'nsx_p') cfg.CONF.set_override('dhcp_agent_notification', False) def _create_network(self, fmt, name, admin_state_up, arg_list=None, providernet_args=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'network': {'name': name, 'admin_state_up': admin_state_up, 'tenant_id': tenant_id}} # Fix to allow the router:external attribute and any other # attributes containing a colon to be passed with # a double underscore instead kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items()) if extnet_apidef.EXTERNAL in kwargs: arg_list = (extnet_apidef.EXTERNAL, ) + (arg_list or ()) if providernet_args: kwargs.update(providernet_args) for arg in (('admin_state_up', 'tenant_id', 'shared', 'availability_zone_hints') + (arg_list or ())): # Arg must be present if arg in kwargs: data['network'][arg] = kwargs[arg] network_req = self.new_create_request('networks', data, fmt) if set_context and tenant_id: # create a specific auth context for this request network_req.environ['neutron.context'] = context.Context( '', tenant_id) return network_req.get_response(self.api) def _create_l3_ext_network(self, physical_network='abc'): name = 'l3_ext_net' net_type = utils.NetworkTypes.L3_EXT providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: physical_network} return self.network(name=name, router__external=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) def _initialize_azs(self): self.plugin.init_availability_zones() self.plugin._init_default_config() class NsxPTestNetworks(test_db_base_plugin_v2.TestNetworksV2, NsxPPluginTestCaseMixin): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): # add vlan transparent to the configuration cfg.CONF.set_override('vlan_transparent', True) super(NsxPTestNetworks, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def tearDown(self): super(NsxPTestNetworks, self).tearDown() def test_create_provider_flat_network(self): providernet_args = {pnet.NETWORK_TYPE: 'flat'} with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicySegmentApi.create_or_overwrite', side_effect=_return_id_key) as nsx_create, \ mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicySegmentApi.delete') as nsx_delete, \ mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicyTransportZoneApi.get_transport_type', return_value=nsx_constants.TRANSPORT_TYPE_VLAN), \ self.network(name='flat_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, )) as net: self.assertEqual('flat', net['network'].get(pnet.NETWORK_TYPE)) # make sure the network is created at the backend nsx_create.assert_called_once() # Delete the network and make sure it is deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_called_once() def test_create_provider_flat_network_with_physical_net(self): physical_network = DEFAULT_TIER0_ROUTER_UUID providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( 'vmware_nsxlib.v3.policy.core_resources.NsxPolicyTransportZoneApi.' 'get_transport_type', return_value=nsx_constants.TRANSPORT_TYPE_VLAN), \ self.network(name='flat_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: self.assertEqual('flat', net['network'].get(pnet.NETWORK_TYPE)) def test_create_provider_flat_network_with_vlan(self): providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.SEGMENTATION_ID: 11} with mock.patch( 'vmware_nsxlib.v3.policy.core_resources.NsxPolicyTransportZoneApi.' 'get_transport_type', return_value=nsx_constants.TRANSPORT_TYPE_VLAN): result = self._create_network(fmt='json', name='bad_flat_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID)) data = self.deserialize('json', result) # should fail self.assertEqual('InvalidInput', data['NeutronError']['type']) def test_create_provider_geneve_network(self): providernet_args = {pnet.NETWORK_TYPE: 'geneve'} with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicySegmentApi.create_or_overwrite', side_effect=_return_id_key) as nsx_create, \ mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicySegmentApi.delete') as nsx_delete, \ self.network(name='geneve_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, )) as net: self.assertEqual('geneve', net['network'].get(pnet.NETWORK_TYPE)) # make sure the network is created at the backend nsx_create.assert_called_once() # Delete the network and make sure it is deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_called_once() def test_create_provider_geneve_network_with_physical_net(self): physical_network = DEFAULT_TIER0_ROUTER_UUID providernet_args = {pnet.NETWORK_TYPE: 'geneve', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( 'vmware_nsxlib.v3.policy.core_resources.NsxPolicyTransportZoneApi.' 'get_transport_type', return_value=nsx_constants.TRANSPORT_TYPE_OVERLAY),\ self.network(name='geneve_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, )) as net: self.assertEqual('geneve', net['network'].get(pnet.NETWORK_TYPE)) def test_create_provider_geneve_network_with_vlan(self): providernet_args = {pnet.NETWORK_TYPE: 'geneve', pnet.SEGMENTATION_ID: 11} with mock.patch( 'vmware_nsxlib.v3.policy.core_resources.NsxPolicyTransportZoneApi.' 'get_transport_type', return_value=nsx_constants.TRANSPORT_TYPE_OVERLAY): result = self._create_network(fmt='json', name='bad_geneve_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID)) data = self.deserialize('json', result) # should fail self.assertEqual('InvalidInput', data['NeutronError']['type']) def test_create_provider_vlan_network(self): providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 11} with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicySegmentApi.create_or_overwrite', side_effect=_return_id_key) as nsx_create, \ mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicySegmentApi.delete') as nsx_delete, \ mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicyTransportZoneApi.get_transport_type', return_value=nsx_constants.TRANSPORT_TYPE_VLAN), \ self.network(name='vlan_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID)) as net: self.assertEqual('vlan', net['network'].get(pnet.NETWORK_TYPE)) # make sure the network is created at the backend nsx_create.assert_called_once() # Delete the network and make sure it is deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_called_once() def test_create_provider_nsx_network(self): physical_network = 'Fake logical switch' providernet_args = {pnet.NETWORK_TYPE: 'nsx-net', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( 'vmware_nsxlib.v3.policy.core_resources.NsxPolicySegmentApi.' 'create_or_overwrite', side_effect=nsxlib_exc.ResourceNotFound) as nsx_create, \ mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicySegmentApi.delete') as nsx_delete, \ self.network(name='nsx_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: self.assertEqual('nsx-net', net['network'].get(pnet.NETWORK_TYPE)) self.assertEqual(physical_network, net['network'].get(pnet.PHYSICAL_NETWORK)) # make sure the network is NOT created at the backend nsx_create.assert_not_called() # Delete the network. It should NOT deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_not_called() def test_create_provider_bad_nsx_network(self): physical_network = 'Bad logical switch' providernet_args = {pnet.NETWORK_TYPE: 'nsx-net', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( "vmware_nsxlib.v3.policy.core_resources.NsxPolicySegmentApi.get", side_effect=nsxlib_exc.ResourceNotFound): result = self._create_network(fmt='json', name='bad_nsx_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) data = self.deserialize('json', result) # should fail self.assertEqual('InvalidInput', data['NeutronError']['type']) def _test_transparent_vlan_net(self, net_type, tz_type, should_succeed): providernet_args = {pnet.NETWORK_TYPE: net_type, vlan_apidef.VLANTRANSPARENT: True} with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicyTransportZoneApi.get_transport_type', return_value=tz_type): result = self._create_network(fmt='json', name='vlan_net', admin_state_up=True, providernet_args=providernet_args, arg_list=( pnet.NETWORK_TYPE, vlan_apidef.VLANTRANSPARENT)) data = self.deserialize('json', result) if should_succeed: self.assertEqual(net_type, data['network'].get(pnet.NETWORK_TYPE)) self.assertTrue( data['network'].get(vlan_apidef.VLANTRANSPARENT)) else: self.assertEqual('InvalidInput', data['NeutronError']['type']) def test_create_non_provider_network_with_transparent(self): self._test_transparent_vlan_net( net_type="", tz_type=nsx_constants.TRANSPORT_TYPE_OVERLAY, should_succeed=False) def test_create_provider_overlay_network_with_transparent(self): self._test_transparent_vlan_net( net_type=utils.NsxV3NetworkTypes.GENEVE, tz_type=nsx_constants.TRANSPORT_TYPE_OVERLAY, should_succeed=True) def test_create_provider_flat_network_with_transparent(self): self._test_transparent_vlan_net( net_type=utils.NsxV3NetworkTypes.FLAT, tz_type=nsx_constants.TRANSPORT_TYPE_VLAN, should_succeed=True) def test_create_provider_vlan_network_with_transparent(self): self._test_transparent_vlan_net( net_type=utils.NsxV3NetworkTypes.VLAN, tz_type=nsx_constants.TRANSPORT_TYPE_VLAN, should_succeed=True) def test_network_update_external_failure(self): data = {'network': {'name': 'net1', 'router:external': 'True', 'tenant_id': 'tenant_one', 'provider:physical_network': 'stam'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) ext_net_id = network['network']['id'] # should fail to update the network to non-external args = {'network': {'router:external': 'False'}} req = self.new_update_request('networks', args, ext_net_id, fmt='json') res = self.deserialize('json', req.get_response(self.api)) self.assertEqual('InvalidInput', res['NeutronError']['type']) @mock.patch.object(nsx_plugin.NsxPolicyPlugin, 'validate_availability_zones') def test_create_network_with_availability_zone(self, mock_validate_az): name = 'net-with-zone' zone = ['zone1'] mock_validate_az.return_value = None with self.network(name=name, availability_zone_hints=zone) as net: az_hints = net['network']['availability_zone_hints'] self.assertListEqual(az_hints, zone) def test_create_net_with_qos(self): policy_id = uuidutils.generate_uuid() data = {'network': { 'tenant_id': self._tenant_id, 'qos_policy_id': policy_id, 'name': 'qos_net', 'admin_state_up': True, 'shared': False} } dummy = mock.Mock() dummy.id = policy_id with mock.patch.object(self.plugin, '_validate_qos_policy_id'),\ mock.patch.object(obj_reg.load_class('QosPolicy'), 'get_network_policy', return_value=dummy): net = self.plugin.create_network(self.ctx, data) self.assertEqual(policy_id, net['qos_policy_id']) net = self.plugin.get_network(self.ctx, net['id']) self.assertEqual(policy_id, net['qos_policy_id']) def test_update_net_with_qos(self): data = {'network': { 'tenant_id': self._tenant_id, 'name': 'qos_net', 'admin_state_up': True, 'shared': False} } net = self.plugin.create_network(self.ctx, data) policy_id = uuidutils.generate_uuid() data['network']['qos_policy_id'] = policy_id dummy = mock.Mock() dummy.id = policy_id with mock.patch.object(self.plugin, '_validate_qos_policy_id'),\ mock.patch.object(obj_reg.load_class('QosPolicy'), 'get_network_policy', return_value=dummy): res = self.plugin.update_network(self.ctx, net['id'], data) self.assertEqual(policy_id, res['qos_policy_id']) res = self.plugin.get_network(self.ctx, net['id']) self.assertEqual(policy_id, res['qos_policy_id']) def test_create_ens_network_with_qos(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') mock_ens = mock.patch('vmware_nsxlib.v3.policy' '.core_resources.NsxPolicyTransportZoneApi' '.get_host_switch_mode', return_value='ENS') mock_tz = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibLogicalSwitch.get', return_value={'transport_zone_id': 'xxx'}) mock_tt = mock.patch('vmware_nsxlib.v3.policy' '.core_resources.NsxPolicyTransportZoneApi' '.get_transport_type', return_value='VLAN') policy_id = uuidutils.generate_uuid() data = {'network': { 'name': 'qos_net', 'tenant_id': 'some_tenant', 'provider:network_type': 'flat', 'provider:physical_network': 'xxx', 'admin_state_up': True, 'shared': False, 'qos_policy_id': policy_id, 'port_security_enabled': False}} with mock_ens, mock_tz, mock_tt, mock.patch.object( self.plugin, '_validate_qos_policy_id'): res = self.plugin.create_network(context.get_admin_context(), data) self.assertEqual(policy_id, res['qos_policy_id']) def test_update_ens_network_with_qos(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') mock_ens = mock.patch('vmware_nsxlib.v3.policy' '.core_resources.NsxPolicyTransportZoneApi' '.get_host_switch_mode', return_value='ENS') mock_tz = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibLogicalSwitch.get', return_value={'transport_zone_id': 'xxx'}) mock_tt = mock.patch('vmware_nsxlib.v3.policy' '.core_resources.NsxPolicyTransportZoneApi' '.get_transport_type', return_value='VLAN') data = {'network': { 'name': 'qos_net', 'tenant_id': 'some_tenant', 'provider:network_type': 'flat', 'provider:physical_network': 'xxx', 'admin_state_up': True, 'shared': False, 'port_security_enabled': False}} with mock_ens, mock_tz, mock_tt,\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): network = self.plugin.create_network(context.get_admin_context(), data) policy_id = uuidutils.generate_uuid() data = {'network': { 'id': network['id'], 'admin_state_up': True, 'shared': False, 'port_security_enabled': False, 'tenant_id': 'some_tenant', 'qos_policy_id': policy_id}} res = self.plugin.update_network( context.get_admin_context(), network['id'], data) self.assertEqual(policy_id, res['qos_policy_id']) class NsxPTestPorts(common_v3.NsxV3TestPorts, common_v3.NsxV3SubnetMixin, NsxPPluginTestCaseMixin): def setUp(self, **kwargs): super(NsxPTestPorts, self).setUp(**kwargs) @common_v3.with_disable_dhcp def test_requested_ips_only(self): return super(NsxPTestPorts, self).test_requested_ips_only() @common_v3.with_disable_dhcp def test_list_ports_with_sort_emulated(self): return super(NsxPTestPorts, self).test_list_ports_with_sort_emulated() @common_v3.with_disable_dhcp def test_list_ports_with_pagination_native(self): return super(NsxPTestPorts, self).test_list_ports_with_pagination_native() @common_v3.with_disable_dhcp def test_list_ports_for_network_owner(self): return super(NsxPTestPorts, self).test_list_ports_for_network_owner() @common_v3.with_disable_dhcp def test_list_ports_public_network(self): return super(NsxPTestPorts, self).test_list_ports_public_network() @common_v3.with_disable_dhcp def test_list_ports(self): return super(NsxPTestPorts, self).test_list_ports() @common_v3.with_disable_dhcp def test_get_ports_count(self): return super(NsxPTestPorts, self).test_get_ports_count() @common_v3.with_disable_dhcp def test_list_ports_with_sort_native(self): return super(NsxPTestPorts, self).test_list_ports_with_sort_native() @common_v3.with_disable_dhcp def test_list_ports_with_pagination_emulated(self): return super(NsxPTestPorts, self).test_list_ports_with_pagination_emulated() def test_update_port_delete_ip(self): # This test case overrides the default because the nsx plugin # implements port_security/security groups and it is not allowed # to remove an ip address from a port unless the security group # is first removed. with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [], secgrp.SECURITYGROUPS: []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) self.assertEqual(res['port']['fixed_ips'], data['port']['fixed_ips']) def test_create_port_with_qos(self): with self.network() as network: policy_id = uuidutils.generate_uuid() data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'qos_policy_id': policy_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } with mock.patch.object(self.plugin, '_validate_qos_policy_id'): port = self.plugin.create_port(self.ctx, data) self.assertEqual(policy_id, port['qos_policy_id']) # Get port should also return the qos policy id with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_port_policy_id', return_value=policy_id): port = self.plugin.get_port(self.ctx, port['id']) self.assertEqual(policy_id, port['qos_policy_id']) def test_update_port_with_qos(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } port = self.plugin.create_port(self.ctx, data) policy_id = uuidutils.generate_uuid() data['port']['qos_policy_id'] = policy_id with mock.patch.object(self.plugin, '_validate_qos_policy_id'): res = self.plugin.update_port(self.ctx, port['id'], data) self.assertEqual(policy_id, res['qos_policy_id']) # Get port should also return the qos policy id with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_port_policy_id', return_value=policy_id): res = self.plugin.get_port(self.ctx, port['id']) self.assertEqual(policy_id, res['qos_policy_id']) # now remove the qos from the port data['port']['qos_policy_id'] = None res = self.plugin.update_port(self.ctx, port['id'], data) self.assertIsNone(res['qos_policy_id']) def test_create_ext_port_with_qos_fail(self): with self._create_l3_ext_network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=False),\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): policy_id = uuidutils.generate_uuid() data = {'port': {'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'qos_policy_id': policy_id}} # Cannot add qos policy to a router port self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, self.ctx, data) def _test_create_illegal_port_with_qos_fail(self, device_owner): with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'),\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): policy_id = uuidutils.generate_uuid() data = {'port': {'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'device_owner': device_owner, 'qos_policy_id': policy_id}} # Cannot add qos policy to this type of port self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, self.ctx, data) def test_create_port_ens_with_qos_fail(self): with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'): policy_id = uuidutils.generate_uuid() mock_ens = mock.patch( 'vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicyTransportZoneApi.get_host_switch_mode', return_value='ENS') mock_tz = mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get', return_value={'transport_zone_id': 'xxx'}) mock_tt = mock.patch( 'vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicyTransportZoneApi.get_transport_type', return_value='VLAN') data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01', 'qos_policy_id': policy_id} } # Cannot add qos policy to this type of port with mock_ens, mock_tz, mock_tt, \ mock.patch.object(self.plugin, '_validate_qos_policy_id'): res = self.plugin.create_port(self.ctx, data) self.assertEqual(policy_id, res['qos_policy_id']) def test_create_port_with_mac_learning_true(self): plugin = directory.get_plugin() ctx = context.get_admin_context() with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01', 'mac_learning_enabled': True} } port = plugin.create_port(ctx, data) self.assertTrue(port['mac_learning_enabled']) def test_create_port_with_mac_learning_false(self): plugin = directory.get_plugin() ctx = context.get_admin_context() with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01', 'mac_learning_enabled': False} } port = plugin.create_port(ctx, data) self.assertFalse(port['mac_learning_enabled']) def test_update_port_with_mac_learning_true(self): plugin = directory.get_plugin() ctx = context.get_admin_context() with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01'} } port = plugin.create_port(ctx, data) data['port']['mac_learning_enabled'] = True update_res = plugin.update_port(ctx, port['id'], data) self.assertTrue(update_res['mac_learning_enabled']) def test_update_port_with_mac_learning_false(self): plugin = directory.get_plugin() ctx = context.get_admin_context() with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01'} } port = plugin.create_port(ctx, data) data['port']['mac_learning_enabled'] = False update_res = plugin.update_port(ctx, port['id'], data) self.assertFalse(update_res['mac_learning_enabled']) def test_update_port_with_mac_learning_failes(self): plugin = directory.get_plugin() ctx = context.get_admin_context() with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': constants.DEVICE_OWNER_FLOATINGIP, 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01'} } port = plugin.create_port(ctx, data) data['port']['mac_learning_enabled'] = True self.assertRaises( n_exc.InvalidInput, plugin.update_port, ctx, port['id'], data) def _create_l3_ext_network( self, physical_network=DEFAULT_TIER0_ROUTER_UUID): name = 'l3_ext_net' net_type = utils.NetworkTypes.L3_EXT providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: physical_network} return self.network(name=name, router__external=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) def test_fail_create_port_with_ext_net(self): expected_error = 'InvalidInput' with self._create_l3_ext_network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=False): device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' res = self._create_port(self.fmt, network['network']['id'], exc.HTTPBadRequest.code, device_owner=device_owner) data = self.deserialize(self.fmt, res) self.assertEqual(expected_error, data['NeutronError']['type']) def test_fail_update_port_with_ext_net(self): with self._create_l3_ext_network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=False) as subnet: with self.port(subnet=subnet) as port: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' data = {'port': {'device_owner': device_owner}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def _test_create_direct_network(self, vlan_id=0): net_type = vlan_id and 'vlan' or 'flat' name = 'direct_net' providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'tzuuid'} if vlan_id: providernet_args[pnet.SEGMENTATION_ID] = vlan_id mock_tt = mock.patch('vmware_nsxlib.v3.policy' '.core_resources.NsxPolicyTransportZoneApi' '.get_transport_type', return_value=nsx_constants.TRANSPORT_TYPE_VLAN) mock_tt.start() return self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) def _test_create_port_vnic_direct(self, vlan_id): with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicyTransportZoneApi.get_transport_type', return_value=nsx_constants.TRANSPORT_TYPE_VLAN),\ self._test_create_direct_network(vlan_id=vlan_id) as network: # Check that port security conflicts kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, psec.PORTSECURITY: True} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, exc.HTTPBadRequest.code) # Check that security group conflicts kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, 'security_groups': [ '4cd70774-cc67-4a87-9b39-7d1db38eb087'], psec.PORTSECURITY: False} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, exc.HTTPBadRequest.code) # All is kosher so we can create the port kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE,), **kwargs) port = self.deserialize('json', res) self.assertEqual("direct", port['port'][portbindings.VNIC_TYPE]) self.assertEqual("dvs", port['port'][portbindings.VIF_TYPE]) self.assertEqual( vlan_id, port['port'][portbindings.VIF_DETAILS]['segmentation-id']) # try to get the same port req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual("dvs", sport['port'][portbindings.VIF_TYPE]) self.assertEqual("direct", sport['port'][portbindings.VNIC_TYPE]) self.assertEqual( vlan_id, sport['port'][portbindings.VIF_DETAILS]['segmentation-id']) def test_create_port_vnic_direct_flat(self): self._test_create_port_vnic_direct(0) def test_create_port_vnic_direct_vlan(self): self._test_create_port_vnic_direct(10) def test_create_port_vnic_direct_invalid_network(self): with self.network(name='not vlan/flat') as net: kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, psec.PORTSECURITY: False} net_id = net['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_update_vnic_direct(self): with self._test_create_direct_network(vlan_id=7) as network: with self.subnet(network=network) as subnet: with self.port(subnet=subnet) as port: # need to do two updates as the update for port security # disabled requires that it can only change 2 items data = {'port': {psec.PORTSECURITY: False, 'security_groups': []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(portbindings.VNIC_NORMAL, res['port'][portbindings.VNIC_TYPE]) data = {'port': {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(portbindings.VNIC_DIRECT, res['port'][portbindings.VNIC_TYPE]) def test_port_invalid_vnic_type(self): with self._test_create_direct_network(vlan_id=7) as network: kwargs = {portbindings.VNIC_TYPE: 'invalid', psec.PORTSECURITY: False} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, exc.HTTPBadRequest.code) def test_create_ipv6_port(self): with self.network(name='net') as network: self._make_v6_subnet(network, constants.DHCPV6_STATEFUL) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertIn('id', port['port']) def test_create_ipv6_port_with_extra_dhcp(self): with self.network(name='net') as network: self._make_v6_subnet(network, constants.DHCPV6_STATEFUL) opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} self._create_port(self.fmt, network['network']['id'], exc.HTTPBadRequest.code, **params) def test_update_ipv6_port_with_extra_dhcp(self): with self.network(name='net') as network: self._make_v6_subnet(network, constants.DHCPV6_STATEFUL) res = self._create_port(self.fmt, net_id=network['network']['id']) port = self.deserialize(self.fmt, res) self.assertIn('id', port['port']) opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] data = {'port': {edo_ext.EXTRADHCPOPTS: opt_list}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertIn('NeutronError', res) class NsxPTestSubnets(common_v3.NsxV3TestSubnets, NsxPPluginTestCaseMixin): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): super(NsxPTestSubnets, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def _create_subnet_bulk(self, fmt, number, net_id, name, ip_version=4, **kwargs): base_data = {'subnet': {'network_id': net_id, 'ip_version': ip_version, 'enable_dhcp': False, 'tenant_id': self._tenant_id}} if 'ipv6_mode' in kwargs: base_data['subnet']['ipv6_ra_mode'] = kwargs['ipv6_mode'] base_data['subnet']['ipv6_address_mode'] = kwargs['ipv6_mode'] # auto-generate cidrs as they should not overlap base_cidr = "10.0.%s.0/24" if ip_version == constants.IP_VERSION_6: base_cidr = "fd%s::/64" # auto-generate cidrs as they should not overlap overrides = dict((k, v) for (k, v) in zip(range(number), [{'cidr': base_cidr % num} for num in range(number)])) kwargs.update({'override': overrides}) return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs) def test_create_external_subnet_with_conflicting_t0_address(self): with self._create_l3_ext_network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '172.20.1.0/24', 'name': 'sub1', 'enable_dhcp': False, 'dns_nameservers': None, 'allocation_pools': None, 'tenant_id': 'tenant_one', 'host_routes': None, 'ip_version': 4}} with mock.patch.object(self.plugin.nsxpolicy.tier0, 'get_uplink_cidrs', return_value=['172.20.1.60/24']): self.assertRaises(n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), data) def test_create_external_subnet_with_non_conflicting_t0_address(self): with self._create_l3_ext_network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '172.20.1.0/24', 'name': 'sub1', 'enable_dhcp': False, 'dns_nameservers': None, 'allocation_pools': None, 'tenant_id': 'tenant_one', 'host_routes': None, 'ip_version': 4}} with mock.patch.object(self.plugin.nsxpolicy.tier0, 'get_uplink_ips', return_value=['172.20.2.60']): self.plugin.create_subnet( context.get_admin_context(), data) @common_v3.with_disable_dhcp_once def test_create_subnet_ipv6_slaac_with_port_on_network(self): super(NsxPTestSubnets, self).test_create_subnet_ipv6_slaac_with_port_on_network() def test_create_subnet_ipv6_gw_values(self): self.skipTest("IPv6 gateway IP is assigned by the plugin") def test_create_ipv6_subnet_with_host_routes(self): # IPv6 host routes are not allowed with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '100::/64', 'ip_version': 6, 'tenant_id': network['network']['tenant_id'], 'host_routes': [{'destination': '200::/64', 'nexthop': '100::16'}]}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(exc.HTTPClientError.code, res.status_int) def test_update_ipv6_subnet_with_host_routes(self): # IPv6 host routes are not allowed with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '100::/64', 'ip_version': 6, 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) subnet = self.deserialize(self.fmt, subnet_req.get_response(self.api)) sub_id = subnet['subnet']['id'] # update host routes should fail data = {'subnet': {'host_routes': [{'destination': '200::/64', 'nexthop': '100::16'}]}} update_req = self.new_update_request('subnets', data, sub_id) res = update_req.get_response(self.api) self.assertEqual(exc.HTTPClientError.code, res.status_int) def _verify_dhcp_service(self, network_id, tenant_id, enabled): # Verify if DHCP service is enabled on a network. port_res = self._list_ports('json', 200, network_id, tenant_id=tenant_id, device_owner=constants.DEVICE_OWNER_DHCP) port_list = self.deserialize('json', port_res) self.assertEqual(len(port_list['ports']) == 1, enabled) def test_create_dhcpv6_subnet(self): with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicySegmentApi.update") as seg_update,\ self.subnet(ip_version=constants.IP_VERSION_6, cidr='fe80::/64', enable_dhcp=True) as subnet: self.assertEqual(True, subnet['subnet']['enable_dhcp']) # verify that the dhcp port was created self._verify_dhcp_service(subnet['subnet']['network_id'], subnet['subnet']['tenant_id'], True) # verify backend calls seg_update.assert_called_once_with( dhcp_server_config_id=NSX_DHCP_PROFILE_ID, segment_id=subnet['subnet']['network_id'], subnets=[mock.ANY]) def test_subnet_enable_dhcpv6(self): with self.subnet(ip_version=constants.IP_VERSION_6, cidr='fe80::/64', enable_dhcp=False) as subnet: data = {'subnet': {'enable_dhcp': True}} with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicySegmentApi.update") as seg_update: req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(True, res['subnet']['enable_dhcp']) # verify that the dhcp port was created self._verify_dhcp_service(subnet['subnet']['network_id'], subnet['subnet']['tenant_id'], True) # verify backend calls seg_update.assert_called_once_with( dhcp_server_config_id=NSX_DHCP_PROFILE_ID, segment_id=subnet['subnet']['network_id'], subnets=[mock.ANY]) def test_subnet_disable_dhcpv6(self): with self.subnet(ip_version=constants.IP_VERSION_6, cidr='fe80::/64', enable_dhcp=True) as subnet: data = {'subnet': {'enable_dhcp': False}} with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicySegmentApi.update") as seg_update: req = self.new_update_request('subnets', data, subnet['subnet']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(False, res['subnet']['enable_dhcp']) # verify that the dhcp port was deleted self._verify_dhcp_service(subnet['subnet']['network_id'], subnet['subnet']['tenant_id'], False) # verify backend calls seg_update.assert_called_once_with( dhcp_server_config_id=None, segment_id=subnet['subnet']['network_id'], subnets=[]) def test_delete_ipv6_dhcp_subnet(self): with self.subnet(ip_version=constants.IP_VERSION_6, cidr='fe80::/64', enable_dhcp=True) as subnet: with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicySegmentApi.update") as seg_update: req = self.new_delete_request( 'subnets', subnet['subnet']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) # verify that the dhcp port was deleted self._verify_dhcp_service(subnet['subnet']['network_id'], subnet['subnet']['tenant_id'], False) # verify backend calls seg_update.assert_called_once_with( dhcp_server_config_id=None, segment_id=subnet['subnet']['network_id'], subnets=[]) class NsxPTestSecurityGroup(common_v3.FixExternalNetBaseTest, NsxPPluginTestCaseMixin, test_securitygroup.TestSecurityGroups, test_securitygroup.SecurityGroupDBTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): super(NsxPTestSecurityGroup, self).setUp(plugin=plugin, ext_mgr=ext_mgr) self.project_id = test_db_base_plugin_v2.TEST_TENANT_ID # add provider group attributes secgrp.Securitygroup().update_attributes_map( provider_sg.EXTENDED_ATTRIBUTES_2_0) def test_create_security_group_rule_icmp_with_type_and_code(self): """No non-zero icmp codes are currently supported by the NSX""" self.skipTest('not supported') def test_create_security_group_rule_icmp_with_type(self): name = 'webservers' description = 'my webservers' with self.security_group(name, description) as sg: security_group_id = sg['security_group']['id'] direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = "icmp" # port_range_min (ICMP type) is greater than port_range_max # (ICMP code) in order to confirm min <= max port check is # not called for ICMP. port_range_min = 14 port_range_max = None keys = [('remote_ip_prefix', remote_ip_prefix), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol), ('port_range_min', port_range_min), ('port_range_max', port_range_max)] with self.security_group_rule(security_group_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix) as rule: for k, v, in keys: self.assertEqual(rule['security_group_rule'][k], v) @common_v3.with_no_dhcp_subnet def test_list_ports_security_group(self): return super(NsxPTestSecurityGroup, self).test_list_ports_security_group() @mock.patch.object(nsx_plugin.NsxPolicyPlugin, 'get_security_group') def test_create_security_group_rule_with_invalid_tcp_or_udp_protocol( self, get_mock): super(NsxPTestSecurityGroup, self).\ test_create_security_group_rule_with_invalid_tcp_or_udp_protocol() @mock.patch.object(nsx_plugin.NsxPolicyPlugin, 'get_security_group') def test_create_security_group_source_group_ip_and_ip_prefix( self, get_mock): super(NsxPTestSecurityGroup, self).\ test_create_security_group_source_group_ip_and_ip_prefix() def _create_default_sg(self): self.plugin._ensure_default_security_group( context.get_admin_context(), self.project_id) def test_sg_create_on_nsx(self): """Verify that a group and comm-map are created for a new SG""" # Make sure the default SG is created before testing self._create_default_sg() name = description = 'sg1' with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyGroupApi.create_or_overwrite_with_conditions" ) as group_create,\ mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyCommunicationMapApi." "create_or_overwrite_map_only") as comm_map_create,\ self.security_group(name, description) as sg: sg_id = sg['security_group']['id'] nsx_name = utils.get_name_and_uuid(name, sg_id) group_create.assert_called_once_with( nsx_name, pol_const.DEFAULT_DOMAIN, group_id=sg_id, description=description, conditions=[mock.ANY], tags=mock.ANY) comm_map_create.assert_called_once_with( nsx_name, pol_const.DEFAULT_DOMAIN, map_id=sg_id, description=description, tags=mock.ANY, category=pol_const.CATEGORY_ENVIRONMENT) def _create_provider_security_group(self): body = {'security_group': {'name': 'provider-deny', 'tenant_id': self._tenant_id, 'description': 'provider sg', 'provider': True}} security_group_req = self.new_create_request('security-groups', body) return self.deserialize(self.fmt, security_group_req.get_response(self.ext_api)) def test_provider_sg_on_port(self): psg = self._create_provider_security_group() with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicySegmentPortApi.create_or_overwrite" ) as port_create: with self.port(tenant_id=self._tenant_id) as port: # make sure the port has the provider sg port_data = port['port'] self.assertEqual(1, len(port_data['provider_security_groups'])) self.assertEqual(psg['security_group']['id'], port_data['provider_security_groups'][0]) # Make sure the correct security groups tags were set port_create.assert_called_once() actual_tags = port_create.call_args[1]['tags'] sg_tags = 0 psg_tag_found = False for tag in actual_tags: if tag['scope'] == 'os-security-group': sg_tags += 1 if tag['tag'] == psg['security_group']['id']: psg_tag_found = True self.assertEqual(2, sg_tags) self.assertTrue(psg_tag_found) def test_remove_provider_sg_from_port(self): psg = self._create_provider_security_group() with self.port(tenant_id=self._tenant_id) as port: with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicySegmentPortApi.create_or_overwrite" ) as port_update: # specifically remove the provider sg from the port data = {'port': {'provider_security_groups': []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(0, len(res['port']['provider_security_groups'])) # Make sure the correct security groups tags were set port_update.assert_called_once() actual_tags = port_update.call_args[1]['tags'] sg_tags = 0 psg_tag_found = False for tag in actual_tags: if tag['scope'] == 'os-security-group': sg_tags += 1 if tag['tag'] == psg['security_group']['id']: psg_tag_found = True self.assertEqual(1, sg_tags) self.assertFalse(psg_tag_found) def test_sg_rule_create_on_nsx(self): """Verify that a comm-map entry is created for a new SG rule """ name = description = 'sg1' direction = "ingress" remote_ip_prefix = "10.0.0.0/24" protocol = "tcp" port_range_min = 80 port_range_max = 80 with self.security_group(name, description) as sg: sg_id = sg['security_group']['id'] with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyCommunicationMapApi.update_with_entries" ) as update_policy,\ self.security_group_rule(sg_id, direction, protocol, port_range_min, port_range_max, remote_ip_prefix): update_policy.assert_called_once() def test_create_security_group_rule_with_remote_group(self): with self.security_group() as sg1, self.security_group() as sg2: security_group_id = sg1['security_group']['id'] direction = "ingress" remote_group_id = sg2['security_group']['id'] protocol = "tcp" keys = [('remote_group_id', remote_group_id), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol)] with self.security_group_rule( security_group_id, direction=direction, protocol=protocol, remote_group_id=remote_group_id) as rule: for k, v, in keys: self.assertEqual(rule['security_group_rule'][k], v) def test_delete_security_group_rule_with_remote_group(self): com_plugin.subscribe() with self.security_group() as sg1, self.security_group() as sg2: security_group_id = sg1['security_group']['id'] direction = "ingress" remote_group_id = sg2['security_group']['id'] protocol = "tcp" with self.security_group_rule( security_group_id, direction=direction, protocol=protocol, remote_group_id=remote_group_id) as rule,\ mock.patch.object( self.plugin, "delete_security_group_rule") as del_rule: # delete sg2 self._delete('security-groups', remote_group_id, exc.HTTPNoContent.code) # verify the rule was deleted del_rule.assert_called_once_with( mock.ANY, rule["security_group_rule"]["id"]) class NsxPTestL3ExtensionManager(object): def get_resources(self): # Simulate extension of L3 attribute map l3.L3().update_attributes_map( l3_egm_apidef.RESOURCE_ATTRIBUTE_MAP) l3.L3().update_attributes_map( xroute_apidef.RESOURCE_ATTRIBUTE_MAP) return (l3.L3.get_resources() + address_scope.Address_scope.get_resources()) def get_actions(self): return [] def get_request_extensions(self): return [] class NsxPTestL3NatTest(common_v3.FixExternalNetBaseTest, common_v3.NsxV3SubnetMixin, NsxPPluginTestCaseMixin, test_l3_plugin.L3BaseForIntTests, test_address_scope.AddressScopeTestCase): def setUp(self, *args, **kwargs): cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) cfg.CONF.set_default('max_routes', 3) kwargs['ext_mgr'] = (kwargs.get('ext_mgr') or NsxPTestL3ExtensionManager()) # Make sure the LB callback is not called on router deletion self.lb_mock1 = mock.patch( "vmware_nsx.services.lbaas.octavia.octavia_listener." "NSXOctaviaListenerEndpoint._check_lb_service_on_router") self.lb_mock1.start() self.lb_mock2 = mock.patch( "vmware_nsx.services.lbaas.octavia.octavia_listener." "NSXOctaviaListenerEndpoint._check_lb_service_on_router_interface") self.lb_mock2.start() super(NsxPTestL3NatTest, self).setUp(*args, **kwargs) self.original_subnet = self.subnet self.original_network = self.network self.plugin_instance = directory.get_plugin() self._plugin_name = "%s.%s" % ( self.plugin_instance.__module__, self.plugin_instance.__class__.__name__) self._plugin_class = self.plugin_instance.__class__ def external_network(self, name='net1', admin_state_up=True, fmt=None, **kwargs): if not name: name = 'l3_ext_net' physical_network = 'abc' net_type = utils.NetworkTypes.L3_EXT providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: physical_network} return self.original_network(name=name, admin_state_up=admin_state_up, fmt=fmt, router__external=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) def test_floatingip_create_different_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_router_add_interface_multiple_ipv4_subnet_port_returns_400(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_router_add_interface_multiple_ipv6_subnet_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_floatingip_update_different_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_create_multiple_floatingips_same_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') class NsxPTestL3NatTestCase(NsxPTestL3NatTest, test_l3_plugin.L3NatDBIntTestCase, test_ext_route.ExtraRouteDBTestCaseBase): def setUp(self, *args, **kwargs): super(NsxPTestL3NatTestCase, self).setUp(*args, **kwargs) mock.patch.object(self.plugin.nsxpolicy, 'search_by_tags', return_value={'results': []}).start() def test__notify_gateway_port_ip_changed(self): self.skipTest('not supported') def test__notify_gateway_port_ip_not_changed(self): self.skipTest('not supported') def test_floatingip_via_router_interface_returns_201(self): self.skipTest('not supported') def test_floatingip_via_router_interface_returns_404(self): self.skipTest('not supported') def test_network_update_external(self): # This plugin does not support updating the external flag of a network self.skipTest('not supported') def test_network_update_external_failure(self): # This plugin does not support updating the external flag of a network self.skipTest('not supported') def test_router_add_gateway_dup_subnet1_returns_400(self): self.skipTest('not supported') def test_router_add_interface_dup_subnet2_returns_400(self): self.skipTest('not supported') def test_router_add_interface_ipv6_port_existing_network_returns_400(self): self.skipTest('not supported') def test_routes_update_for_multiple_routers(self): self.skipTest('not supported') def test_floatingip_multi_external_one_internal(self): self.skipTest('not supported') def test_floatingip_same_external_and_internal(self): self.skipTest('not supported') def test_route_update_with_external_route(self): self.skipTest('not supported') def test_floatingip_update_subnet_gateway_disabled(self): self.skipTest('not supported') def test_floatingip_update_to_same_port_id_twice(self): self.skipTest('Plugin changes floating port status') def test_router_add_interface_by_port_other_tenant_address_out_of_pool( self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_router_add_interface_by_port_other_tenant_address_in_pool(self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_router_add_interface_by_port_admin_address_out_of_pool(self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_router_add_gateway_no_subnet(self): self.skipTest('No support for no subnet gateway set') def test_router_remove_ipv6_subnet_from_interface(self): self.skipTest('not supported') def test_router_add_interface_multiple_ipv6_subnets_same_net(self): self.skipTest('not supported') def test_router_add_interface_multiple_ipv4_subnets(self): self.skipTest('not supported') def test_router_update_gateway_upon_subnet_create_max_ips_ipv6(self): self.skipTest('not supported') def test_router_delete_dhcpv6_stateless_subnet_inuse_returns_409(self): self.skipTest('not supported') @common_v3.with_disable_dhcp @common_v3.with_external_network def test_router_update_gateway_upon_subnet_create_ipv6(self): super(NsxPTestL3NatTestCase, self).test_router_update_gateway_upon_subnet_create_ipv6() def test_router_delete_ipv6_slaac_subnet_inuse_returns_409(self): self.skipTest('not supported') def test_router_add_gateway_multiple_subnets_ipv6(self): self.skipTest('not supported') def test_slaac_profile_single_subnet(self): with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.update") as t1_update: with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='fd00::0/64', gateway_ip='fd00::1', ip_version=6, ipv6_address_mode='slaac', ipv6_ra_mode='slaac') as s: self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None) # Validate T1 was updated with slaac profile t1_update.assert_called_with( r['router']['id'], ipv6_ndra_profile_id='neutron-slaac-profile') self._router_interface_action('remove', r['router']['id'], s['subnet']['id'], None) # Validate T1 was updated with default profile t1_update.assert_called_with( r['router']['id'], ipv6_ndra_profile_id='neutron-no-slaac-profile') def test_slaac_profile_dual_stack(self): with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.update") as t1_update: with self.router() as r,\ self.network() as n: with self.subnet(network=n, cidr='2.3.3.0/24') as s1,\ self.subnet(network=n, cidr='fd10::0/64', gateway_ip='fd10::1', ip_version=6, ipv6_address_mode='slaac', ipv6_ra_mode='slaac') as s2: self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) # Validate T1 was updated with slaac profile t1_update.assert_called_with( r['router']['id'], ipv6_ndra_profile_id='neutron-slaac-profile') # Remove non-slaac subnets first self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) # Validate T1 was updated with default profile t1_update.assert_called_with( r['router']['id'], ipv6_ndra_profile_id='neutron-no-slaac-profile') self._delete('subnets', s1['subnet']['id']) self._delete('subnets', s2['subnet']['id']) def test_slaac_profile_multi_net(self): with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.update") as t1_update: with self.router() as r,\ self.network() as n1, self.network() as n2: with self.subnet(network=n1, cidr='fd00::0/64', gateway_ip='fd00::1', ip_version=6, enable_dhcp=False) as s1,\ self.subnet(network=n2, cidr='fd10::0/64', gateway_ip='fd10::1', ip_version=6, ipv6_address_mode='slaac', ipv6_ra_mode='slaac') as s2,\ self.subnet(network=n2, cidr='2.3.3.0/24', gateway_ip='2.3.3.1') as s3: # Add three subnets to the router, with slaac-enabled one # in the middle self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s3['subnet']['id'], None) # Validate T1 was updated with slaac profile t1_update.assert_called_with( r['router']['id'], ipv6_ndra_profile_id='neutron-slaac-profile') # Remove non-slaac subnets first self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s3['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) # Validate T1 was updated with default profile t1_update.assert_called_with( r['router']['id'], ipv6_ndra_profile_id='neutron-no-slaac-profile') def _test_router_add_dual_stack_subnets(self, s6_first=False): """Add dual stack subnets to router""" with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicySegmentApi.update") as seg_update: with self.router() as r, self.network() as n: with self.subnet( network=n, cidr='fd00::0/64', gateway_ip='fd00::1', ip_version=6, enable_dhcp=False) as s6, self.subnet( network=n, cidr='2.0.0.0/24', gateway_ip='2.0.0.1') as s4: subnets = [] if s6_first: self._router_interface_action('add', r['router']['id'], s6['subnet']['id'], None) subnets.append(s6['subnet']['cidr']) self._router_interface_action('add', r['router']['id'], s4['subnet']['id'], None) subnets.append(s4['subnet']['cidr']) if not s6_first: self._router_interface_action('add', r['router']['id'], s6['subnet']['id'], None) subnets.append(s6['subnet']['cidr']) # We expect two subnet objects on segment seg_update.assert_called_with( n['network']['id'], subnets=[mock.ANY, mock.ANY], tier1_id=r['router']['id']) def test_router_add_v4_v6_subnets(self): self._test_router_add_dual_stack_subnets() def test_router_add_v6_v4_subnets(self): self._test_router_add_dual_stack_subnets(s6_first=True) def test_router_remove_dual_stack_subnets(self): """Delete dual stack subnets from router interface""" with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='fd00::0/64', ip_version=6, enable_dhcp=False) as s6, \ self.subnet(network=n, cidr='2.0.0.0/24') as s4: body6 = self._router_interface_action('add', r['router']['id'], s6['subnet']['id'], None) body4 = self._router_interface_action('add', r['router']['id'], s4['subnet']['id'], None) port = self._show('ports', body6['port_id']) self.assertEqual(1, len(port['port']['fixed_ips'])) port = self._show('ports', body4['port_id']) self.assertEqual(1, len(port['port']['fixed_ips'])) self._router_interface_action('remove', r['router']['id'], s6['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s4['subnet']['id'], None) def test_router_add_interface_ipv6_single_subnet(self): with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='fd00::1/64', gateway_ip='fd00::1', ip_version=6, enable_dhcp=False) as s: self._test_router_add_interface_subnet(r, s) @common_v3.with_disable_dhcp def test_route_clear_routes_with_None(self): super(NsxPTestL3NatTestCase, self).test_route_clear_routes_with_None() @common_v3.with_disable_dhcp def test_route_update_with_multi_routes(self): super(NsxPTestL3NatTestCase, self).test_route_update_with_multi_routes() @common_v3.with_disable_dhcp def test_route_update_with_one_route(self): super(NsxPTestL3NatTestCase, self).test_route_update_with_one_route() @common_v3.with_disable_dhcp def test_router_update_delete_routes(self): super(NsxPTestL3NatTestCase, self).test_router_update_delete_routes() @common_v3.with_disable_dhcp def test_router_interface_in_use_by_route(self): super(NsxPTestL3NatTestCase, self).test_router_interface_in_use_by_route() @common_v3.with_disable_dhcp def test_create_floatingip_with_assoc_to_ipv4_and_ipv6_port(self): super(NsxPTestL3NatTestCase, self).test_create_floatingip_with_assoc_to_ipv4_and_ipv6_port() @common_v3.with_external_subnet def test_router_update_gateway_with_external_ip_used_by_gw(self): super(NsxPTestL3NatTestCase, self).test_router_update_gateway_with_external_ip_used_by_gw() @common_v3.with_external_subnet def test_router_update_gateway_with_invalid_external_ip(self): super(NsxPTestL3NatTestCase, self).test_router_update_gateway_with_invalid_external_ip() @common_v3.with_external_subnet def test_router_update_gateway_with_invalid_external_subnet(self): super(NsxPTestL3NatTestCase, self).test_router_update_gateway_with_invalid_external_subnet() @common_v3.with_external_network def test_router_update_gateway_with_different_external_subnet(self): super(NsxPTestL3NatTestCase, self).test_router_update_gateway_with_different_external_subnet() @common_v3.with_external_subnet_once def test_router_update_gateway_with_existed_floatingip(self): with self.subnet(cidr='20.0.0.0/24') as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.floatingip_with_assoc() as fip: self._add_external_gateway_to_router( fip['floatingip']['router_id'], subnet['subnet']['network_id'], expected_code=exc.HTTPConflict.code) @common_v3.with_external_network def test_router_update_gateway_add_multiple_prefixes_ipv6(self): super(NsxPTestL3NatTestCase, self).test_router_update_gateway_add_multiple_prefixes_ipv6() @common_v3.with_external_network def test_router_concurrent_delete_upon_subnet_create(self): super(NsxPTestL3NatTestCase, self).test_router_concurrent_delete_upon_subnet_create() @common_v3.with_external_subnet def test_router_add_gateway_dup_subnet2_returns_400(self): super(NsxPTestL3NatTestCase, self).test_router_add_gateway_dup_subnet2_returns_400() @common_v3.with_external_subnet def test_router_update_gateway(self): super(NsxPTestL3NatTestCase, self).test_router_update_gateway() @common_v3.with_external_subnet def test_router_create_with_gwinfo(self): super(NsxPTestL3NatTestCase, self).test_router_create_with_gwinfo() @common_v3.with_external_subnet def test_router_clear_gateway_callback_failure_returns_409(self): super(NsxPTestL3NatTestCase, self).test_router_clear_gateway_callback_failure_returns_409() @common_v3.with_external_subnet def test_router_create_with_gwinfo_ext_ip(self): super(NsxPTestL3NatTestCase, self).test_router_create_with_gwinfo_ext_ip() @common_v3.with_external_network def test_router_create_with_gwinfo_ext_ip_subnet(self): super(NsxPTestL3NatTestCase, self).test_router_create_with_gwinfo_ext_ip_subnet() @common_v3.with_external_subnet_second_time def test_router_delete_with_floatingip_existed_returns_409(self): super(NsxPTestL3NatTestCase, self).test_router_delete_with_floatingip_existed_returns_409() @common_v3.with_external_subnet def test_router_add_and_remove_gateway_tenant_ctx(self): super(NsxPTestL3NatTestCase, self).test_router_add_and_remove_gateway_tenant_ctx() @common_v3.with_external_subnet def test_router_add_and_remove_gateway(self): super(NsxPTestL3NatTestCase, self).test_router_add_and_remove_gateway() @common_v3.with_external_subnet def test_floatingip_list_with_sort(self): super(NsxPTestL3NatTestCase, self).test_floatingip_list_with_sort() @common_v3.with_external_subnet_once def test_floatingip_with_assoc_fails(self): super(NsxPTestL3NatTestCase, self).test_floatingip_with_assoc_fails() @common_v3.with_external_subnet_second_time def test_floatingip_update_same_fixed_ip_same_port(self): super(NsxPTestL3NatTestCase, self).test_floatingip_update_same_fixed_ip_same_port() @common_v3.with_external_subnet def test_floatingip_list_with_pagination_reverse(self): super(NsxPTestL3NatTestCase, self).test_floatingip_list_with_pagination_reverse() @common_v3.with_external_subnet_once def test_floatingip_association_on_unowned_router(self): super(NsxPTestL3NatTestCase, self).test_floatingip_association_on_unowned_router() @common_v3.with_external_network def test_delete_ext_net_with_disassociated_floating_ips(self): super(NsxPTestL3NatTestCase, self).test_delete_ext_net_with_disassociated_floating_ips() @common_v3.with_external_network def test_create_floatingip_with_subnet_and_invalid_fip_address(self): super( NsxPTestL3NatTestCase, self).test_create_floatingip_with_subnet_and_invalid_fip_address() @common_v3.with_external_subnet def test_create_floatingip_with_duplicated_specific_ip(self): super(NsxPTestL3NatTestCase, self).test_create_floatingip_with_duplicated_specific_ip() @common_v3.with_external_subnet def test_create_floatingip_with_subnet_id_non_admin(self): super(NsxPTestL3NatTestCase, self).test_create_floatingip_with_subnet_id_non_admin() @common_v3.with_external_subnet def test_floatingip_list_with_pagination(self): super(NsxPTestL3NatTestCase, self).test_floatingip_list_with_pagination() @common_v3.with_external_subnet def test_create_floatingips_native_quotas(self): super(NsxPTestL3NatTestCase, self).test_create_floatingips_native_quotas() @common_v3.with_external_network def test_create_floatingip_with_multisubnet_id(self): super(NsxPTestL3NatTestCase, self).test_create_floatingip_with_multisubnet_id() @common_v3.with_external_network def test_create_floatingip_with_subnet_id_and_fip_address(self): super(NsxPTestL3NatTestCase, self).test_create_floatingip_with_subnet_id_and_fip_address() @common_v3.with_external_subnet def test_create_floatingip_with_specific_ip(self): super(NsxPTestL3NatTestCase, self).test_create_floatingip_with_specific_ip() @common_v3.with_external_network def test_create_floatingip_ipv6_and_ipv4_network_creates_ipv4(self): super(NsxPTestL3NatTestCase, self).test_create_floatingip_ipv6_and_ipv4_network_creates_ipv4() @common_v3.with_external_subnet_once def test_create_floatingip_non_admin_context_agent_notification(self): super( NsxPTestL3NatTestCase, self).test_create_floatingip_non_admin_context_agent_notification() @common_v3.with_external_subnet def test_create_floatingip_no_ext_gateway_return_404(self): super(NsxPTestL3NatTestCase, self).test_create_floatingip_no_ext_gateway_return_404() @common_v3.with_external_subnet def test_create_floatingip_with_specific_ip_out_of_allocation(self): super(NsxPTestL3NatTestCase, self).test_create_floatingip_with_specific_ip_out_of_allocation() @common_v3.with_external_subnet_third_time def test_floatingip_update_different_router(self): super(NsxPTestL3NatTestCase, self).test_floatingip_update_different_router() def test_floatingip_update(self): super(NsxPTestL3NatTestCase, self).test_floatingip_update( expected_status=constants.FLOATINGIP_STATUS_DOWN) @common_v3.with_external_subnet_second_time def test_floatingip_with_invalid_create_port(self): self._test_floatingip_with_invalid_create_port(self._plugin_name) def test_router_add_gateway_notifications(self): with self.router() as r,\ self._create_l3_ext_network() as ext_net,\ self.subnet(network=ext_net, enable_dhcp=False): with mock.patch.object(registry, 'publish') as publish: self._add_external_gateway_to_router( r['router']['id'], ext_net['network']['id']) expected = [mock.call( resources.ROUTER_GATEWAY, events.AFTER_CREATE, mock.ANY, payload=mock.ANY)] publish.assert_has_calls(expected) def test_router_add_gateway_no_subnet_forbidden(self): with self.router() as r: with self._create_l3_ext_network() as n: self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], expected_code=exc.HTTPBadRequest.code) def test_route_update_illegal_ip_ver(self): routes = [{'destination': '21.0.0.0/24', 'nexthop': 'fd00::d6c'}] with self.router() as r: with self.subnet(cidr='fd00::0/64', ip_version=6, enable_dhcp=False) as s: fixed_ip_data = [{'ip_address': 'fd00::2'}] with self.port(subnet=s, fixed_ips=fixed_ip_data) as p: self._router_interface_action( 'add', r['router']['id'], None, p['port']['id']) self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=400) def test_router_update_on_external_port(self): with self.router() as r: with self._create_l3_ext_network() as ext_net,\ self.subnet(network=ext_net, cidr='10.0.1.0/24', enable_dhcp=False) as s: self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = body['router']['external_gateway_info']['network_id'] self.assertEqual(net_id, s['subnet']['network_id']) port_res = self._list_ports( 'json', 200, s['subnet']['network_id'], tenant_id=r['router']['tenant_id'], device_owner=constants.DEVICE_OWNER_ROUTER_GW) port_list = self.deserialize('json', port_res) self.assertEqual(len(port_list['ports']), 1) routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] self.assertRaises(n_exc.InvalidInput, self.plugin_instance.update_router, context.get_admin_context(), r['router']['id'], {'router': {'routes': routes}}) updates = {'admin_state_up': False} self.assertRaises(n_exc.InvalidInput, self.plugin_instance.update_router, context.get_admin_context(), r['router']['id'], {'router': updates}) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) gw_info = body['router']['external_gateway_info'] self.assertIsNone(gw_info) @mock.patch.object(nsx_plugin.NsxPolicyPlugin, 'validate_availability_zones') def test_create_router_with_availability_zone(self, mock_validate_az): name = 'rtr-with-zone' zone = ['zone1'] mock_validate_az.return_value = None with self.router(name=name, availability_zone_hints=zone) as rtr: az_hints = rtr['router']['availability_zone_hints'] self.assertListEqual(zone, az_hints) def test_update_router_distinct_edge_cluster(self): # define an edge cluster in the config edge_cluster = uuidutils.generate_uuid() cfg.CONF.set_override('edge_cluster', edge_cluster, 'nsx_p') self._initialize_azs() path_prefix = ("/infra/sites/default/enforcement-points/default/" "edge-clusters/") # create a router and external network with self.router() as r, \ self._create_l3_ext_network() as ext_net, \ self.subnet(network=ext_net, cidr='10.0.1.0/24', enable_dhcp=False) as s, \ mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.get_edge_cluster_path", return_value=False), \ mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.set_edge_cluster_path" ) as add_srv_router,\ mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.get_realized_id"): self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) add_srv_router.assert_called_once_with( mock.ANY, '%s%s' % (path_prefix, edge_cluster)) def test_router_add_interface_cidr_overlapped_with_gateway(self): with self.router() as r,\ self._create_l3_ext_network() as ext_net,\ self.subnet(cidr='10.0.1.0/24') as s1,\ self.subnet(network=ext_net, cidr='10.0.0.0/16', enable_dhcp=False) as s2: self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) res = self._router_interface_action( 'add', r['router']['id'], s1['subnet']['id'], None, expected_code=exc.HTTPBadRequest.code) self.assertIn('NeutronError', res) def test_router_add_gateway_overlapped_with_interface_cidr(self): with self.router() as r,\ self._create_l3_ext_network() as ext_net,\ self.subnet(cidr='10.0.1.0/24') as s1,\ self.subnet(network=ext_net, cidr='10.0.0.0/16', enable_dhcp=False) as s2: self._router_interface_action( 'add', r['router']['id'], s1['subnet']['id'], None) res = self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id'], expected_code=exc.HTTPBadRequest.code) self.assertIn('NeutronError', res) def test_router_add_interface_by_port_cidr_overlapped_with_gateway(self): with self.router() as r,\ self._create_l3_ext_network() as ext_net,\ self.subnet(cidr='10.0.1.0/24') as s1,\ self.subnet(network=ext_net, cidr='10.0.0.0/16', enable_dhcp=False) as s2,\ self.port(subnet=s1) as p: self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) res = self._router_interface_action( 'add', r['router']['id'], None, p['port']['id'], expected_code=exc.HTTPBadRequest.code) self.assertIn('NeutronError', res) @common_v3.with_disable_dhcp def test_create_floatingip_with_assoc_to_ipv6_subnet(self): super(NsxPTestL3NatTestCase, self).test_create_floatingip_with_assoc_to_ipv6_subnet() @common_v3.with_disable_dhcp def test_router_add_interface_ipv6_subnet_without_gateway_ip(self): super(NsxPTestL3NatTestCase, self).test_router_add_interface_ipv6_subnet_without_gateway_ip() @common_v3.with_disable_dhcp def test_router_add_interface_multiple_ipv6_subnets_different_net(self): super(NsxPTestL3NatTestCase, self).\ test_router_add_interface_multiple_ipv6_subnets_different_net() @common_v3.with_disable_dhcp def test_create_floatingip_ipv6_only_network_returns_400(self): super(NsxPTestL3NatTestCase, self).test_create_floatingip_ipv6_only_network_returns_400() def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self): self.skipTest('DHCPv6 not supported') def test_create_floatingip_invalid_fixed_ipv6_address_returns_400(self): self.skipTest('Failed because of illegal port id') def test_create_floatingip_with_router_interface_device_owner_fail(self): # This tests that an error is raised when trying to assign a router # interface port with floatingip. with self.subnet(cidr='30.0.0.0/24', gateway_ip=None) as private_sub: with self.port( subnet=private_sub, device_owner=constants.DEVICE_OWNER_ROUTER_INTF) as p: port_id = p['port']['id'] with self.router() as r: self._router_interface_action('add', r['router']['id'], None, port_id) with self.external_network() as public_net, self.subnet( network=public_net, cidr='12.0.0.0/24', enable_dhcp=False) as public_sub: self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._make_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=port_id, http_status=exc.HTTPBadRequest.code) def test_assign_floatingip_to_router_interface_device_owner_fail(self): # This tests that an error is raised when trying to assign a router # interface port with floatingip. with self.subnet(cidr='30.0.0.0/24', gateway_ip=None) as private_sub: with self.port( subnet=private_sub, device_owner=constants.DEVICE_OWNER_ROUTER_INTF) as p: port_id = p['port']['id'] with self.router() as r: self._router_interface_action('add', r['router']['id'], None, port_id) with self.external_network() as public_net, self.subnet( network=public_net, cidr='12.0.0.0/24', enable_dhcp=False) as public_sub: self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) fip = self._make_floatingip(self.fmt, public_sub[ 'subnet']['network_id']) self._update('floatingips', fip['floatingip'][ 'id'], {'floatingip': {'port_id': port_id}}, expected_code=exc.HTTPBadRequest.code) def test_router_delete_with_lb_service(self): self.lb_mock1.stop() self.lb_mock2.stop() # Create the LB object - here the delete callback is registered loadbalancer = loadbalancer_mgr.EdgeLoadBalancerManagerFromDict() oct_listener = octavia_listener.NSXOctaviaListenerEndpoint( loadbalancer=loadbalancer) with self.router() as router: with mock.patch.object( self.plugin.nsxpolicy, 'search_by_tags', return_value={'results': [{'id': 'dummy'}]}): self.assertRaises(nc_exc.CallbackFailure, self.plugin_instance.delete_router, context.get_admin_context(), router['router']['id']) # Unregister callback oct_listener._unsubscribe_router_delete_callback() self.lb_mock1.start() self.lb_mock2.start() def test_router_delete_with_no_lb_service(self): self.lb_mock1.stop() self.lb_mock2.stop() # Create the LB object - here the delete callback is registered loadbalancer = loadbalancer_mgr.EdgeLoadBalancerManagerFromDict() oct_listener = octavia_listener.NSXOctaviaListenerEndpoint( loadbalancer=loadbalancer) with self.router() as router: with mock.patch.object( self.plugin.nsxpolicy, 'search_by_tags', return_value={'results': []}): self.plugin_instance.delete_router( context.get_admin_context(), router['router']['id']) # Unregister callback oct_listener._unsubscribe_router_delete_callback() self.lb_mock1.start() self.lb_mock2.start() def test_router_gw_info_rollback(self): """Fail the GW addition and verify rollback was performed""" with self.router() as r,\ self.external_network() as public_net,\ self.subnet(network=public_net, cidr='12.0.0.0/24', enable_dhcp=False) as s1,\ mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.update_route_advertisement", side_effect=nsxlib_exc.NsxLibException): # Make sure creation fails self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id'], expected_code=exc.HTTPInternalServerError.code) # Make sure there is no GW configured body = self._show('routers', r['router']['id']) self.assertIsNone(body['router']['external_gateway_info']) def test_router_create_with_gw_info_failed(self): """Fail the GW addition during router creation and verify rollback was performed """ with self.router() as r,\ self.external_network() as public_net,\ self.subnet(network=public_net, cidr='12.0.0.0/24', enable_dhcp=False) as s1,\ mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.update_route_advertisement", side_effect=nsxlib_exc.NsxLibException): # Make sure creation fails self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id'], expected_code=exc.HTTPInternalServerError.code) # Make sure there is no GW configured body = self._show('routers', r['router']['id']) self.assertIsNone(body['router']['external_gateway_info']) def test_create_router_gateway_fails(self): with self.external_network() as public_net,\ self.subnet(network=public_net, cidr='12.0.0.0/24', enable_dhcp=False),\ mock.patch.object(self.plugin.nsxpolicy.tier1, "get_edge_cluster_path", return_value=False),\ mock.patch.object(self.plugin.nsxpolicy.tier1, "set_edge_cluster_path", side_effect=nsxlib_exc.NsxLibException): data = {'router': { 'name': 'router1', 'admin_state_up': True, 'tenant_id': self._tenant_id, 'external_gateway_info': { 'network_id': public_net['network']['id']}}} self.assertRaises(nsxlib_exc.NsxLibException, self.plugin.create_router, self.ctx, data) # Verify router doesn't persist on failure routers = self.plugin.get_routers(self.ctx) self.assertEqual(0, len(routers)) def test_delete_router_gateway_fails(self): """Verify that router deletion continues even if gw update fails""" with self.router() as r,\ self.external_network() as public_net,\ self.subnet(network=public_net, cidr='12.0.0.0/24', enable_dhcp=False) as s1: self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.update_route_advertisement", side_effect=nsxlib_exc.NsxLibException): self._delete('routers', r['router']['id']) routers = self.plugin.get_routers(self.ctx) self.assertEqual(0, len(routers)) def _test_nat_rules_firewall_match(self, config_val, call_val): cfg.CONF.set_override('firewall_match_internal_addr', config_val, 'nsx_p') with self.subnet(cidr='20.0.0.0/24') as subnet: self._set_net_external(subnet['subnet']['network_id']) with mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1NatRuleApi." "create_or_overwrite") as add_func,\ self.floatingip_with_assoc(): add_func.assert_called_with( mock.ANY, mock.ANY, action='DNAT', destination_network=mock.ANY, firewall_match=call_val, nat_rule_id=mock.ANY, sequence_number=mock.ANY, translated_network=mock.ANY) def test_nat_rules_firewall_match_internal(self): self._test_nat_rules_firewall_match( True, pol_const.NAT_FIREWALL_MATCH_INTERNAL) def test_nat_rules_firewall_match_external(self): self._test_nat_rules_firewall_match( False, pol_const.NAT_FIREWALL_MATCH_EXTERNAL) def test_router_interface_with_dhcp_subnet(self): # Policy DHCP does not allow 1 dhcp subnet and another router # interface subnet on the same overlay network with self.router() as r,\ self.network() as net,\ self.subnet(cidr='20.0.0.0/24', network=net),\ self.subnet(cidr='30.0.0.0/24', network=net, enable_dhcp=False) as if_subnet: self._router_interface_action( 'add', r['router']['id'], if_subnet['subnet']['id'], None, expected_code=exc.HTTPBadRequest.code) def test_router_interface_ndprofile_ipv4(self): with self.router() as r,\ self.network() as net,\ self.subnet(cidr='20.0.0.0/24', network=net) as if_subnet,\ mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.update") as t1_update: # Adding ipv4 interface self._router_interface_action( 'add', r['router']['id'], if_subnet['subnet']['id'], None) t1_update.assert_not_called() # Removing ipv4 interface self._router_interface_action( 'remove', r['router']['id'], if_subnet['subnet']['id'], None) t1_update.assert_not_called() def _test_router_interface_ndprofile(self, profile_with, enable_dhcp=True, mode='slaac'): with self.router() as r,\ self.network() as net,\ self.subnet(cidr='2001::/64', network=net, ip_version=6, enable_dhcp=enable_dhcp, ipv6_address_mode=mode, ipv6_ra_mode=mode) as if_subnet,\ mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.update") as t1_update: # Adding subnet interface self._router_interface_action( 'add', r['router']['id'], if_subnet['subnet']['id'], None) t1_update.assert_called_with( r['router']['id'], ipv6_ndra_profile_id=profile_with) t1_update.reset_mock() # Removing subnet interface self._router_interface_action( 'remove', r['router']['id'], if_subnet['subnet']['id'], None) t1_update.assert_called_with( r['router']['id'], ipv6_ndra_profile_id=nsx_plugin.NO_SLAAC_NDRA_PROFILE_ID) def test_router_interface_ndprofile_no_dhcp(self): self._test_router_interface_ndprofile( nsx_plugin.NO_SLAAC_NDRA_PROFILE_ID, enable_dhcp=False, mode=None) def test_router_interface_ndprofile_slaac(self): self._test_router_interface_ndprofile( nsx_plugin.SLAAC_NDRA_PROFILE_ID, enable_dhcp=True, mode=constants.IPV6_SLAAC) def test_router_interface_ndprofile_stateful(self): self._test_router_interface_ndprofile( nsx_plugin.STATEFUL_DHCP_NDRA_PROFILE_ID, enable_dhcp=True, mode=constants.DHCPV6_STATEFUL) def test_router_interface_ndprofile_stateless(self): self._test_router_interface_ndprofile( nsx_plugin.STATELESS_DHCP_NDRA_PROFILE_ID, enable_dhcp=True, mode=constants.DHCPV6_STATELESS) def _test_router_interfaces_ndprofile(self, sub1_enable_dhcp, sub1_mode, sub2_enable_dhcp, sub2_mode, sub1_profile, mixed_profile=None, successful=True, sub1_ipversion=6, sub2_ipversion=6): cidr1 = '2001::/64' if sub1_ipversion == 6 else '201.0.0.0/24' cidr2 = '2002::/64' if sub2_ipversion == 6 else '202.0.0.0/24' with self.router() as r,\ self.network() as net1, self.network() as net2,\ self.subnet(cidr=cidr1, network=net1, ip_version=sub1_ipversion, enable_dhcp=sub1_enable_dhcp, ipv6_address_mode=sub1_mode, ipv6_ra_mode=sub1_mode) as sub1,\ self.subnet(cidr=cidr2, network=net2, ip_version=sub2_ipversion, enable_dhcp=sub2_enable_dhcp, ipv6_address_mode=sub2_mode, ipv6_ra_mode=sub2_mode) as sub2,\ mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.update") as t1_update: # Adding first interface self._router_interface_action( 'add', r['router']['id'], sub1['subnet']['id'], None) if sub1_ipversion == 6: t1_update.assert_called_with( r['router']['id'], ipv6_ndra_profile_id=sub1_profile) t1_update.reset_mock() else: t1_update.assert_not_called() # Adding the 2nd interface expected_code = (exc.HTTPBadRequest.code if not successful else exc.HTTPOk.code) self._router_interface_action( 'add', r['router']['id'], sub2['subnet']['id'], None, expected_code=expected_code) if not successful: return if sub2_ipversion == 6: t1_update.assert_called_with( r['router']['id'], ipv6_ndra_profile_id=mixed_profile) t1_update.reset_mock() else: t1_update.assert_not_called() # Removing the 2nd interface self._router_interface_action( 'remove', r['router']['id'], sub2['subnet']['id'], None) if sub2_ipversion == 6: t1_update.assert_called_with( r['router']['id'], ipv6_ndra_profile_id=sub1_profile) else: t1_update.assert_not_called() def test_router_interfaces_ndprofile_slaac_slaac(self): self._test_router_interfaces_ndprofile( True, constants.IPV6_SLAAC, True, constants.IPV6_SLAAC, nsx_plugin.SLAAC_NDRA_PROFILE_ID, nsx_plugin.SLAAC_NDRA_PROFILE_ID) def test_router_interfaces_ndprofile_slaac_stateful(self): self._test_router_interfaces_ndprofile( True, constants.IPV6_SLAAC, True, constants.DHCPV6_STATEFUL, nsx_plugin.SLAAC_NDRA_PROFILE_ID, None, successful=False) def test_router_interfaces_ndprofile_slaac_stateless(self): self._test_router_interfaces_ndprofile( True, constants.IPV6_SLAAC, True, constants.DHCPV6_STATELESS, nsx_plugin.SLAAC_NDRA_PROFILE_ID, None, successful=False) def test_router_interfaces_ndprofile_disabled_stateful(self): self._test_router_interfaces_ndprofile( False, None, True, constants.DHCPV6_STATEFUL, nsx_plugin.NO_SLAAC_NDRA_PROFILE_ID, nsx_plugin.STATEFUL_DHCP_NDRA_PROFILE_ID) def test_router_interfaces_ndprofile_disabled_stateless(self): self._test_router_interfaces_ndprofile( False, None, True, constants.DHCPV6_STATELESS, nsx_plugin.NO_SLAAC_NDRA_PROFILE_ID, nsx_plugin.STATELESS_DHCP_NDRA_PROFILE_ID) def test_router_interfaces_ndprofile_stateful_stateless(self): self._test_router_interfaces_ndprofile( True, constants.DHCPV6_STATEFUL, True, constants.DHCPV6_STATELESS, nsx_plugin.STATEFUL_DHCP_NDRA_PROFILE_ID, None, successful=False) def test_router_interfaces_ndprofile_v4_stateless(self): self._test_router_interfaces_ndprofile( True, None, True, constants.DHCPV6_STATELESS, nsx_plugin.NO_SLAAC_NDRA_PROFILE_ID, nsx_plugin.STATELESS_DHCP_NDRA_PROFILE_ID, sub1_ipversion=4) def test_router_interfaces_ndprofile_stateless_v4(self): self._test_router_interfaces_ndprofile( True, constants.DHCPV6_STATELESS, True, None, nsx_plugin.STATELESS_DHCP_NDRA_PROFILE_ID, nsx_plugin.STATELESS_DHCP_NDRA_PROFILE_ID, sub2_ipversion=4) def _test_router_vlan_interface_ndprofile(self, profile_with, enable_dhcp=True, mode='slaac'): providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 11} with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicyTransportZoneApi.get_transport_type', return_value=nsx_constants.TRANSPORT_TYPE_VLAN), \ self.network(name='vlan_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID)) as net,\ self.router() as r,\ self.subnet(cidr='2001::/64', network=net, ip_version=6, enable_dhcp=enable_dhcp, ipv6_address_mode=mode, ipv6_ra_mode=mode) as if_subnet,\ self._create_l3_ext_network() as ext_net,\ self.subnet(network=ext_net, cidr='10.0.0.0/16', enable_dhcp=False) as ext_sub,\ mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.add_segment_interface") as t1_add,\ mock.patch("vmware_nsxlib.v3.policy.core_resources." "NsxPolicyTier1Api.remove_segment_interface") as t1_del: # Add router GW self._add_external_gateway_to_router( r['router']['id'], ext_sub['subnet']['network_id']) # Adding subnet interface self._router_interface_action( 'add', r['router']['id'], if_subnet['subnet']['id'], None) t1_add.assert_called_once_with( r['router']['id'], mock.ANY, mock.ANY, [mock.ANY], profile_with) # Removing subnet interface self._router_interface_action( 'remove', r['router']['id'], if_subnet['subnet']['id'], None) t1_del.assert_called_once_with(r['router']['id'], mock.ANY) def test_router_vlan_interface_ndprofile_no_dhcp(self): self._test_router_vlan_interface_ndprofile( nsx_plugin.NO_SLAAC_NDRA_PROFILE_ID, enable_dhcp=False, mode=None) def test_router_vlan_interface_ndprofile_slaac(self): self._test_router_vlan_interface_ndprofile( nsx_plugin.SLAAC_NDRA_PROFILE_ID, enable_dhcp=True, mode=constants.IPV6_SLAAC) def test_router_vlan_interface_ndprofile_stateful(self): self._test_router_vlan_interface_ndprofile( nsx_plugin.STATEFUL_DHCP_NDRA_PROFILE_ID, enable_dhcp=True, mode=constants.DHCPV6_STATEFUL) def test_router_vlan_interface_ndprofile_stateless(self): self._test_router_vlan_interface_ndprofile( nsx_plugin.STATELESS_DHCP_NDRA_PROFILE_ID, enable_dhcp=True, mode=constants.DHCPV6_STATELESS) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_p/test_policy_dhcp_metadata.py0000644000175000017500000014456600000000000030426 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import netaddr from oslo_config import cfg from oslo_utils import uuidutils from neutron.extensions import securitygroup as secgrp from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.extensions import advancedserviceproviders as as_providers from vmware_nsx.plugins.nsx_p import availability_zones as nsx_az from vmware_nsx.tests.unit.nsx_p import test_plugin from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3.policy import core_resources as nsx_resources from vmware_nsxlib.v3 import utils as nsxlib_utils def set_az_in_config(name, metadata_proxy="metadata_proxy1", dhcp_server_config="dsc1", native_metadata_route="2.2.2.2", dns_domain='aaaa', nameservers=['bbbb']): group_name = 'az:%s' % name cfg.CONF.set_override('availability_zones', [name], group="nsx_p") config.register_nsxp_azs(cfg.CONF, [name]) cfg.CONF.set_override("metadata_proxy", metadata_proxy, group=group_name) cfg.CONF.set_override("dhcp_profile", dhcp_server_config, group=group_name) cfg.CONF.set_override("native_metadata_route", native_metadata_route, group=group_name) cfg.CONF.set_override("dns_domain", dns_domain, group=group_name) cfg.CONF.set_override("nameservers", nameservers, group=group_name) class NsxPolicyDhcpTestCase(test_plugin.NsxPPluginTestCaseMixin): """Test native dhcp config when using MP DHCP""" #TODO(asarfaty): Add tests for DHCPv6 def setUp(self): self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('dhcp_profile', 'dsc1', 'nsx_p') super(NsxPolicyDhcpTestCase, self).setUp() self._az_name = 'zone1' self.az_metadata_route = '3.3.3.3' set_az_in_config(self._az_name, native_metadata_route=self.az_metadata_route) self._patcher = mock.patch.object(core_resources.NsxLibDhcpProfile, 'get') self._patcher.start() self._initialize_azs() self.plugin._init_dhcp_metadata() def tearDown(self): self._patcher.stop() cfg.CONF.set_override('dhcp_agent_notification', self._orig_dhcp_agent_notification) super(NsxPolicyDhcpTestCase, self).tearDown() def _make_subnet_data(self, name=None, network_id=None, cidr=None, gateway_ip=None, tenant_id=None, allocation_pools=None, enable_dhcp=True, dns_nameservers=None, ip_version=4, host_routes=None, shared=False): return {'subnet': { 'name': name, 'network_id': network_id, 'cidr': cidr, 'gateway_ip': gateway_ip, 'tenant_id': tenant_id, 'allocation_pools': allocation_pools, 'ip_version': ip_version, 'enable_dhcp': enable_dhcp, 'dns_nameservers': dns_nameservers, 'host_routes': host_routes, 'shared': shared}} def _bind_name(self, port, ip_version=4): return 'IPv%s binding for port %s' % (ip_version, port['port']['id']) def _verify_dhcp_service(self, network_id, tenant_id, enabled): # Verify if DHCP service is enabled on a network. port_res = self._list_ports('json', 200, network_id, tenant_id=tenant_id, device_owner=constants.DEVICE_OWNER_DHCP) port_list = self.deserialize('json', port_res) self.assertEqual(len(port_list['ports']) == 1, enabled) def _verify_dhcp_binding(self, subnet, port_data, update_data, assert_data): # Verify if DHCP binding is updated. with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'SegmentDhcpStaticBindingConfigApi.' 'create_or_overwrite_v4') as update_dhcp_binding: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, **port_data) as port: binding_name = self._bind_name(port) ip_address = port['port']['fixed_ips'][0]['ip_address'] options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip_address}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}} if 'extra_dhcp_opts' in port_data: other_opts = [] options['others'] = [] for opt in port_data['extra_dhcp_opts']: other_opts.append( {'code': nsxlib_utils.get_dhcp_opt_code( opt['opt_name']), 'values': [opt['opt_value']]}) options['others'] = other_opts binding_data = {'mac_address': port['port']['mac_address'], 'ip_address': ip_address, 'gateway_address': subnet['subnet']['gateway_ip'], 'host_name': 'host-%s' % ip_address.replace('.', '-'), 'lease_time': 86400, 'options': options} # Verify the initial bindings call. update_dhcp_binding.assert_called_once_with( binding_name, subnet['subnet']['network_id'], binding_id=port['port']['id'] + '-ipv4', **binding_data) update_dhcp_binding.reset_mock() # Update the port with provided data. self.plugin.update_port( context.get_admin_context(), port['port']['id'], update_data) # Extend basic binding data with to-be-asserted data. binding_data.update(assert_data) # Verify the update call. update_dhcp_binding.assert_called_once_with( binding_name, subnet['subnet']['network_id'], binding_id=port['port']['id'] + '-ipv4', **binding_data) def _verify_dhcp_binding_v6(self, subnet, port_data, update_data, assert_data): # Verify if DHCP-v6 binding is updated. with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'SegmentDhcpStaticBindingConfigApi.' 'create_or_overwrite_v6') as update_dhcp_binding: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, **port_data) as port: binding_name = self._bind_name(port, 6) ip_address = port['port']['fixed_ips'][0]['ip_address'] binding_data = {'mac_address': port['port']['mac_address'], 'ip_addresses': [ip_address], 'lease_time': 86400} # Verify the initial bindings call. update_dhcp_binding.assert_called_once_with( binding_name, subnet['subnet']['network_id'], binding_id=port['port']['id'] + '-ipv6', **binding_data) update_dhcp_binding.reset_mock() # Update the port with provided data. self.plugin.update_port( context.get_admin_context(), port['port']['id'], update_data) # Extend basic binding data with to-be-asserted data. binding_data.update(assert_data) # Verify the update call. update_dhcp_binding.assert_called_once_with( binding_name, subnet['subnet']['network_id'], binding_id=port['port']['id'] + '-ipv6', **binding_data) def test_dhcp_service_with_create_network(self): # Test if DHCP service is disabled on a network when it is created. with self.network() as network: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_delete_dhcp_network(self): # Test if DHCP service is disabled when directly deleting a network # with a DHCP-enabled subnet. with self.network() as network: with self.subnet(network=network, enable_dhcp=True): self.plugin.delete_network(context.get_admin_context(), network['network']['id']) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_non_dhcp_subnet(self): # Test if DHCP service is disabled on a network when a DHCP-disabled # subnet is created. with self.network() as network: with self.subnet(network=network, enable_dhcp=False): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_multiple_non_dhcp_subnets(self): # Test if DHCP service is disabled on a network when multiple # DHCP-disabled subnets are created. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=False): with self.subnet(network=network, cidr='20.0.0.0/24', enable_dhcp=False): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_dhcp_subnet(self): # Test if DHCP service is enabled on a network when a DHCP-enabled # subnet is created. with self.network() as network: with self.subnet(network=network, enable_dhcp=True): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) def test_dhcp_service_with_create_multiple_dhcp_subnets(self): # Test if multiple DHCP-enabled subnets cannot be created in a network. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=True): subnet = {'subnet': {'network_id': network['network']['id'], 'cidr': '20.0.0.0/24', 'enable_dhcp': True}} self.assertRaises( n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), subnet) def test_dhcp_service_with_delete_dhcp_subnet(self): # Test if DHCP service is disabled on a network when a DHCP-disabled # subnet is deleted. with self.network() as network: with self.subnet(network=network, enable_dhcp=True) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) self.plugin.delete_subnet(context.get_admin_context(), subnet['subnet']['id']) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_update_dhcp_subnet(self): # Test if DHCP service is enabled on a network when a DHCP-disabled # subnet is updated to DHCP-enabled. with self.network() as network: with self.subnet(network=network, enable_dhcp=False) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) data = {'subnet': {'enable_dhcp': True}} self.plugin.update_subnet(context.get_admin_context(), subnet['subnet']['id'], data) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) def test_dhcp_service_with_update_multiple_dhcp_subnets(self): # Test if a DHCP-disabled subnet cannot be updated to DHCP-enabled # if a DHCP-enabled subnet already exists in the same network. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=True): with self.subnet(network=network, cidr='20.0.0.0/24', enable_dhcp=False) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) data = {'subnet': {'enable_dhcp': True}} self.assertRaises( n_exc.InvalidInput, self.plugin.update_subnet, context.get_admin_context(), subnet['subnet']['id'], data) def test_dhcp_service_with_update_dhcp_port(self): # Test if DHCP server IP is updated when the corresponding DHCP port # IP is changed. with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxPolicySegmentApi.' 'update') as update_segment_dhcp: with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: filters = { 'network_id': [subnet['subnet']['network_id']], 'device_owner': [constants.DEVICE_OWNER_DHCP] } dhcp_ports = self.plugin.get_ports( context.get_admin_context(), filters=filters) port = dhcp_ports[0] old_ip = port['fixed_ips'][0]['ip_address'] new_ip = str(netaddr.IPAddress(old_ip) + 1) data = {'port': {'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} update_segment_dhcp.reset_mock() self.plugin.update_port(context.get_admin_context(), port['id'], data) update_segment_dhcp.assert_called_once() def test_dhcp_binding_with_create_port(self): # Test if DHCP binding is added when a compute port is created. with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'SegmentDhcpStaticBindingConfigApi.' 'create_or_overwrite_v4') as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: binding_name = self._bind_name(port) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}} create_dhcp_binding.assert_called_once_with( binding_name, subnet['subnet']['network_id'], binding_id=port['port']['id'] + '-ipv4', mac_address=port['port']['mac_address'], ip_address=ip, host_name=hostname, lease_time=cfg.CONF.nsx_p.dhcp_lease_time, options=options, gateway_address=subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_opts(self): # Test if DHCP binding is added when a compute port is created # with extra options. opt_name = 'interface-mtu' opt_code = 26 opt_val = '9000' with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'SegmentDhcpStaticBindingConfigApi.' 'create_or_overwrite_v4') as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() extra_dhcp_opts = [{'opt_name': opt_name, 'opt_value': opt_val}] with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, extra_dhcp_opts=extra_dhcp_opts, arg_list=('extra_dhcp_opts',)) as port: binding_name = self._bind_name(port) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': opt_code, 'values': [opt_val]}]} create_dhcp_binding.assert_called_once_with( binding_name, subnet['subnet']['network_id'], binding_id=port['port']['id'] + '-ipv4', mac_address=port['port']['mac_address'], ip_address=ip, host_name=hostname, lease_time=cfg.CONF.nsx_p.dhcp_lease_time, options=options, gateway_address=subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_opts121(self): # Test if DHCP binding is added when a compute port is created # with extra option121. with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'SegmentDhcpStaticBindingConfigApi.' 'create_or_overwrite_v4') as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() extra_dhcp_opts = [{'opt_name': 'classless-static-route', 'opt_value': '1.0.0.0/24,1.2.3.4'}] with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, extra_dhcp_opts=extra_dhcp_opts, arg_list=('extra_dhcp_opts',)) as port: ip = port['port']['fixed_ips'][0]['ip_address'] binding_name = self._bind_name(port) hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}, {'network': '1.0.0.0/24', 'next_hop': '1.2.3.4'}]}} create_dhcp_binding.assert_called_once_with( binding_name, subnet['subnet']['network_id'], binding_id=port['port']['id'] + '-ipv4', mac_address=port['port']['mac_address'], ip_address=ip, host_name=hostname, lease_time=cfg.CONF.nsx_p.dhcp_lease_time, options=options, gateway_address=subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_bad_opts(self): with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() ctx = context.get_admin_context() # Use illegal opt-name extra_dhcp_opts = [{'opt_name': 'Dummy', 'opt_value': 'Dummy'}] data = {'port': { 'name': 'dummy', 'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'device_owner': device_owner, 'device_id': device_id, 'extra_dhcp_opts': extra_dhcp_opts, 'admin_state_up': True, 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01', }} self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, ctx, data) # Use illegal option121 value extra_dhcp_opts = [{'opt_name': 'classless-static-route', 'opt_value': '1.0.0.0/24,5.5.5.5,cc'}] data['port']['extra_dhcp_opts'] = extra_dhcp_opts self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, ctx, data) def test_dhcp_binding_with_delete_port(self): # Test if DHCP binding is removed when the associated compute port # is deleted. with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'SegmentDhcpStaticBindingConfigApi.' 'delete') as delete_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: self.plugin.delete_port( context.get_admin_context(), port['port']['id']) delete_dhcp_binding.assert_called_with( port['port']['network_id'], port['port']['id'] + '-ipv4') def test_dhcp_binding_with_update_port_delete_ip(self): # Test if DHCP binding is deleted when the IP of the associated # compute port is deleted. with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'SegmentDhcpStaticBindingConfigApi.' 'delete') as delete_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: data = {'port': {'fixed_ips': [], 'admin_state_up': False, secgrp.SECURITYGROUPS: []}} self.plugin.update_port( context.get_admin_context(), port['port']['id'], data) delete_dhcp_binding.assert_called_with( port['port']['network_id'], port['port']['id'] + '-ipv4') def test_dhcp_binding_with_update_port_ip(self): # Test if DHCP binding is updated when the IP of the associated # compute port is changed. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: port_data = {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]} new_ip = '10.0.0.4' update_data = {'port': {'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} assert_data = {'host_name': 'host-%s' % new_ip.replace('.', '-'), 'ip_address': new_ip, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': new_ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_v6_with_update_port_ip(self): # Test if DHCP binding is updated when the IP of the associated # compute port is changed. with self.subnet(ip_version=6, cidr='101::/64', enable_dhcp=True) as subnet: port_data = {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '101::3'}]} new_ip = '101::4' update_data = {'port': {'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} assert_data = {'ip_addresses': [new_ip]} self._verify_dhcp_binding_v6(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_mac(self): # Test if DHCP binding is updated when the Mac of the associated # compute port is changed. with self.subnet(enable_dhcp=True) as subnet: port_data = {'mac_address': '11:22:33:44:55:66'} new_mac = '22:33:44:55:66:77' update_data = {'port': {'mac_address': new_mac}} assert_data = {'mac_address': new_mac, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': mock.ANY}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_mac_ip(self): # Test if DHCP binding is updated when the IP and Mac of the associated # compute port are changed at the same time. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: port_data = {'mac_address': '11:22:33:44:55:66', 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]} new_mac = '22:33:44:55:66:77' new_ip = '10.0.0.4' update_data = {'port': {'mac_address': new_mac, 'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} assert_data = {'host_name': 'host-%s' % new_ip.replace('.', '-'), 'mac_address': new_mac, 'ip_address': new_ip, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': new_ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_update_dhcp_opt(self): # Test updating extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9000'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9002'}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 26, 'values': ['9002']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_adding_dhcp_opt(self): # Test adding extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'nis-domain', 'opt_value': 'abc'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9002'}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 26, 'values': ['9002']}, {'code': 40, 'values': ['abc']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_deleting_dhcp_opt(self): # Test adding extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9002'}, {'opt_name': 'nis-domain', 'opt_value': 'abc'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': None}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_p.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 40, 'values': ['abc']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_name(self): # Test if DHCP binding is not updated when the name of the associated # compute port is changed. with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'SegmentDhcpStaticBindingConfigApi.' 'create_or_overwrite_v4') as update_dhcp_binding: with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, name='abc') as port: data = {'port': {'name': 'xyz'}} update_dhcp_binding.reset_mock() self.plugin.update_port( context.get_admin_context(), port['port']['id'], data) update_dhcp_binding.assert_not_called() def test_create_network_with_bad_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': ['bad_hint'] }} self.assertRaises(n_exc.NeutronException, p.create_network, ctx, data) def test_create_network_with_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': [self._az_name] }} # network creation should succeed net = p.create_network(ctx, data) self.assertEqual([self._az_name], net['availability_zone_hints']) self.assertEqual([self._az_name], net['availability_zones']) def test_create_network_with_no_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False }} # network creation should succeed net = p.create_network(ctx, data) self.assertEqual([], net['availability_zone_hints']) self.assertEqual([nsx_az.DEFAULT_NAME], net['availability_zones']) def test_dhcp_service_with_create_az_network(self): # Test if DHCP service is disabled on a network when it is created. with self.network(availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_binding_with_create_az_port(self): # Test if DHCP binding is added when a compute port is created. with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'SegmentDhcpStaticBindingConfigApi.' 'create_or_overwrite_v4') as create_dhcp_binding: with self.network( availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: with self.subnet(enable_dhcp=True, network=network) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: binding_name = self._bind_name(port) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % self.az_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % self.az_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}} create_dhcp_binding.assert_called_once_with( binding_name, subnet['subnet']['network_id'], binding_id=port['port']['id'] + '-ipv4', mac_address=port['port']['mac_address'], ip_address=ip, host_name=hostname, lease_time=cfg.CONF.nsx_p.dhcp_lease_time, options=options, gateway_address=subnet['subnet']['gateway_ip']) def test_create_subnet_with_dhcp_port(self): with self.subnet(enable_dhcp=True) as subnet: # find the dhcp port and verify it has port security disabled ports = self.plugin.get_ports( context.get_admin_context()) self.assertEqual(1, len(ports)) self.assertEqual('network:dhcp', ports[0]['device_owner']) self.assertEqual(subnet['subnet']['network_id'], ports[0]['network_id']) self.assertEqual(False, ports[0]['port_security_enabled']) def test_create_dhcp_subnet_with_rtr_if(self): # Test that cannot create a DHCP subnet if a router interface exists dummy_port = {'fixed_ips': [{'subnet_id': 'dummy'}]} with mock.patch.object(self.plugin, 'get_ports', return_value=[dummy_port]),\ self.network() as net: subnet = self._make_subnet_data( network_id=net['network']['id'], cidr='10.0.0.0/24', tenant_id=net['network']['tenant_id']) self.assertRaises( n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), subnet) def test_update_dhcp_subnet_with_rtr_if(self): # Test that cannot enable a DHCP on a subnet if a router interface # exists dummy_port = {'fixed_ips': [{'subnet_id': 'dummy'}]} with mock.patch.object(self.plugin, 'get_ports', return_value=[dummy_port]),\ self.network() as net: subnet = self._make_subnet_data( network_id=net['network']['id'], cidr='10.0.0.0/24', tenant_id=net['network']['tenant_id'], enable_dhcp=False) neutron_subnet = self.plugin.create_subnet( context.get_admin_context(), subnet) self.assertRaises( n_exc.InvalidInput, self.plugin.update_subnet, context.get_admin_context(), neutron_subnet['id'], {'subnet': {'enable_dhcp': True}}) def test_create_subnet_with_dhcp_v6_port(self): with self.subnet(enable_dhcp=True, ip_version=6, cidr="2002::/64") as subnet: # find the dhcp port and verify it has port security disabled ports = self.plugin.get_ports( context.get_admin_context()) self.assertEqual(1, len(ports)) self.assertEqual('network:dhcp', ports[0]['device_owner']) self.assertEqual(subnet['subnet']['network_id'], ports[0]['network_id']) self.assertEqual(False, ports[0]['port_security_enabled']) class NsxPolicyMetadataTestCase(test_plugin.NsxPPluginTestCaseMixin): """Test native metadata config when using MP MDProxy""" def setUp(self): self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification cfg.CONF.set_override('dhcp_agent_notification', False) super(NsxPolicyMetadataTestCase, self).setUp() self._az_name = 'zone1' self._az_metadata_proxy = 'dummy' set_az_in_config(self._az_name, metadata_proxy=self._az_metadata_proxy) self._patcher = mock.patch.object(core_resources.NsxLibMetadataProxy, 'get') self._patcher.start() self._initialize_azs() self.plugin._init_dhcp_metadata() def tearDown(self): self._patcher.stop() cfg.CONF.set_override('dhcp_agent_notification', self._orig_dhcp_agent_notification) super(NsxPolicyMetadataTestCase, self).tearDown() def test_metadata_proxy_configuration(self): # Test if dhcp_agent_notification and metadata_proxy are # configured correctly. orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification cfg.CONF.set_override('dhcp_agent_notification', True) self.assertRaises(nsx_exc.NsxPluginException, self.plugin._init_dhcp_metadata) cfg.CONF.set_override('dhcp_agent_notification', orig_dhcp_agent_notification) orig_metadata_proxy_uuid = cfg.CONF.nsx_p.metadata_proxy cfg.CONF.set_override('metadata_proxy', '', 'nsx_p') self.assertRaises(cfg.RequiredOptError, self.plugin._init_default_config) cfg.CONF.set_override('metadata_proxy', orig_metadata_proxy_uuid, 'nsx_p') def test_metadata_proxy_with_create_network(self): # Test if native metadata proxy is enabled on a network when it is # created (Using Policy MDproxy). self.plugin._availability_zones_data._default_az.use_policy_md = True with mock.patch.object(nsx_resources.NsxPolicySegmentApi, 'create_or_overwrite') as create: with self.network() as network: create.assert_called_once_with( mock.ANY, segment_id=network['network']['id'], description=mock.ANY, vlan_ids=mock.ANY, transport_zone_id=mock.ANY, tags=mock.ANY, metadata_proxy_id=test_plugin.NSX_MD_PROXY_ID) def test_metadata_proxy_with_create_az_network(self): # Test if native metadata proxy is enabled on a network when it is # created (Using Plolicy MDproxy). azs = self.plugin._availability_zones_data.availability_zones azs[self._az_name].use_policy_md = True with mock.patch.object(nsx_resources.NsxPolicySegmentApi, 'create_or_overwrite') as create: with self.network( availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: create.assert_called_once_with( mock.ANY, segment_id=network['network']['id'], description=mock.ANY, vlan_ids=mock.ANY, transport_zone_id=mock.ANY, tags=mock.ANY, metadata_proxy_id='dummy') def test_metadata_proxy_with_get_subnets(self): # Test if get_subnets() handles advanced-service-provider extension, # which is used when processing metadata requests. self.plugin._availability_zones_data._default_az.use_policy_md = True with self.network() as n1, self.network() as n2: with self.subnet(network=n1, enable_dhcp=False) as s1, \ self.subnet(network=n2, enable_dhcp=False) as s2: # Get all the subnets. subnets = self._list('subnets')['subnets'] self.assertEqual(len(subnets), 2) self.assertEqual(set([s['id'] for s in subnets]), set([s1['subnet']['id'], s2['subnet']['id']])) lswitch_id = uuidutils.generate_uuid() neutron_id = n1['network']['id'] segment_path = '/infra/segments/%s' % neutron_id # Get only the subnets associated with a particular advanced # service provider (i.e. logical switch). with mock.patch('vmware_nsxlib.v3.policy.NsxPolicyLib.' 'search_resource_by_realized_id', return_value=[segment_path]): subnets = self._list('subnets', query_params='%s=%s' % (as_providers.ADV_SERVICE_PROVIDERS, lswitch_id))['subnets'] self.assertEqual(len(subnets), 1) self.assertEqual(subnets[0]['id'], s1['subnet']['id']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2382548 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_tvd/0000755000175000017500000000000000000000000023176 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_tvd/__init__.py0000644000175000017500000000000000000000000025275 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_tvd/test_plugin.py0000644000175000017500000004130500000000000026110 0ustar00coreycorey00000000000000# Copyright (c) 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from vmware_nsx.tests.unit.dvs import test_plugin as dvs_tests from vmware_nsx.tests.unit.nsx_v import test_plugin as v_tests from vmware_nsx.tests.unit.nsx_v3 import test_plugin as t_tests PLUGIN_NAME = 'vmware_nsx.plugin.NsxTVDPlugin' _uuid = uuidutils.generate_uuid class NsxTVDPluginTestCase(v_tests.NsxVPluginV2TestCase, t_tests.NsxV3PluginTestCaseMixin, dvs_tests.NeutronSimpleDvsTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): # set the default plugin if self.plugin_type: cfg.CONF.set_override('default_plugin', self.plugin_type, group="nsx_tvd") # set the default availability zones cfg.CONF.set_override('nsx_v_default_availability_zones', ['default'], group="nsx_tvd") cfg.CONF.set_override('nsx_v3_default_availability_zones', ['defaultv3'], group="nsx_tvd") super(NsxTVDPluginTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr, with_md_proxy=False) self._project_id = _uuid() self.core_plugin = directory.get_plugin() # create a context with this tenant self.context = context.get_admin_context() self.context.tenant_id = self.project_id # create a default user for this plugin self.core_plugin.create_project_plugin_map(self.context, {'project_plugin_map': {'plugin': self.plugin_type, 'project': self.project_id}}) self.sub_plugin = self.core_plugin.get_plugin_by_type(self.plugin_type) @property def project_id(self): return self._project_id @property def plugin_type(self): pass def _test_plugin_initialized(self): self.assertTrue(self.core_plugin.is_tvd_plugin()) self.assertIsNotNone(self.sub_plugin) def _test_call_create(self, obj_name, calls_count=1, project_id=None, is_bulk=False): method_name = single_name = 'create_%s' % obj_name if is_bulk: method_name = method_name + '_bulk' func_to_call = getattr(self.core_plugin, method_name) if not project_id: project_id = self.project_id with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.sub_plugin, single_name) as single_func: if is_bulk: func_to_call(self.context, {obj_name + 's': [{obj_name: {'tenant_id': project_id}}]}) else: func_to_call(self.context, {obj_name: {'tenant_id': project_id}}) self.assertEqual(calls_count, sub_func.call_count or single_func.call_count) def _test_call_create_with_net_id(self, obj_name, field_name='network_id', calls_count=1, is_bulk=False): method_name = 'create_%s' % obj_name if is_bulk: method_name = method_name + '_bulk' func_to_call = getattr(self.core_plugin, method_name) net_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): if is_bulk: func_to_call(self.context, {obj_name + 's': [{obj_name: {'tenant_id': self.project_id, field_name: net_id}}]}) else: func_to_call(self.context, {obj_name: {'tenant_id': self.project_id, field_name: net_id}}) self.assertEqual(calls_count, sub_func.call_count) def _test_call_delete(self, obj_name): method_name = 'delete_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id) sub_func.assert_called_once() def _test_call_delete_with_net(self, obj_name, field_name='network_id'): method_name = 'delete_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() net_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={field_name: net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id) sub_func.assert_called_once() def _test_call_update(self, obj_name): method_name = 'update_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id, {obj_name: {}}) sub_func.assert_called_once() def _test_call_update_with_net(self, obj_name, field_name='network_id'): method_name = 'update_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() net_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={field_name: net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id, {obj_name: {}}) sub_func.assert_called_once() def _test_call_get(self, obj_name): method_name = 'get_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id) sub_func.assert_called_once() def _test_call_get_with_net(self, obj_name, field_name='network_id'): method_name = 'get_%s' % obj_name func_to_call = getattr(self.core_plugin, method_name) obj_id = _uuid() net_id = _uuid() with mock.patch.object(self.sub_plugin, method_name) as sub_func,\ mock.patch.object(self.core_plugin, '_get_%s' % obj_name, return_value={field_name: net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): func_to_call(self.context, obj_id) sub_func.assert_called_once() class TestPluginWithDefaultPlugin(NsxTVDPluginTestCase): """Test TVD plugin with the NSX-T (default) sub plugin""" @property def plugin_type(self): return 'nsx-t' def test_plugin_initialized(self): self._test_plugin_initialized() # no unsupported extensions for the nsx_t plugin self.assertItemsEqual( ['router_type', 'router_size'], self.core_plugin._unsupported_fields[self.plugin_type]['router']) self.assertEqual( [], self.core_plugin._unsupported_fields[self.plugin_type]['port']) def test_create_network(self): self._test_call_create('network') def test_create_subnet(self): self._test_call_create_with_net_id('subnet') def test_create_port(self): self._test_call_create_with_net_id('port') def test_create_router(self): self._test_call_create('router') def test_create_floatingip(self): self._test_call_create_with_net_id( 'floatingip', field_name='floating_network_id') def test_create_security_group(self): # plugin will be called twice because of the default sg self._test_call_create('security_group', calls_count=2) def test_create_security_group_rule(self): self._test_call_create('security_group_rule') def test_create_network_bulk(self): self._test_call_create('network', is_bulk=True) def test_create_subnet_bulk(self): self._test_call_create_with_net_id('subnet', is_bulk=True) def test_create_security_group_rule_bulk(self): self._test_call_create('security_group_rule', is_bulk=True) def test_delete_network(self): self._test_call_delete('network') def test_delete_subnet(self): self._test_call_delete_with_net('subnet') def test_delete_port(self): self._test_call_delete_with_net('port') def test_delete_router(self): self._test_call_delete('router') def test_delete_floatingip(self): self._test_call_delete_with_net( 'floatingip', field_name='floating_network_id') def test_delete_security_group(self): self._test_call_delete('security_group') def test_update_network(self): self._test_call_update('network') def test_update_subnet(self): self._test_call_update_with_net('subnet') def test_update_port(self): self._test_call_update_with_net('port') def test_update_router(self): self._test_call_update('router') def test_update_floatingip(self): self._test_call_update_with_net( 'floatingip', field_name='floating_network_id') def test_update_security_group(self): self._test_call_update('security_group') def test_unsupported_extensions(self): self.assertRaises(n_exc.InvalidInput, self.core_plugin.create_router, self.context, {'router': {'tenant_id': self.project_id, 'router_type': 'exclusive'}}) def test_get_network(self): self._test_call_get('network') def test_get_subnet(self): self._test_call_get_with_net('subnet') def test_get_port(self): self._test_call_get_with_net('port') def test_get_router(self): self._test_call_get('router') def test_get_floatingip(self): self._test_call_get_with_net( 'floatingip', field_name='floating_network_id') def test_get_security_group(self): self._test_call_get('security_group') def test_add_router_interface(self): rtr_id = _uuid() port_id = _uuid() net_id = _uuid() with mock.patch.object(self.sub_plugin, 'add_router_interface') as sub_func,\ mock.patch.object(self.core_plugin, '_get_router', return_value={'tenant_id': self.project_id}),\ mock.patch.object(self.core_plugin, '_get_port', return_value={'network_id': net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}),\ mock.patch.object(self.core_plugin, '_validate_interface_info', return_value=(True, False)): self.core_plugin.add_router_interface(self.context, rtr_id, {'port_id': port_id}) sub_func.assert_called_once() def test_add_invalid_router_interface(self): # Test that the plugin prevents adding interface from one plugin # to a router of another plugin rtr_id = _uuid() port_id = _uuid() net_id = _uuid() another_tenant_id = _uuid() another_plugin = 'nsx-v' if self.plugin_type == 'nsx-t' else 'nsx-t' self.core_plugin.create_project_plugin_map(self.context, {'project_plugin_map': {'plugin': another_plugin, 'project': another_tenant_id}}) with mock.patch.object(self.core_plugin, '_get_router', return_value={'tenant_id': self.project_id}),\ mock.patch.object(self.core_plugin, '_get_port', return_value={'network_id': net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': another_tenant_id}),\ mock.patch.object(self.core_plugin, '_validate_interface_info', return_value=(True, False)): self.assertRaises(n_exc.InvalidInput, self.core_plugin.add_router_interface, self.context, rtr_id, {'port_id': port_id}) def test_remove_router_interface(self): rtr_id = _uuid() with mock.patch.object(self.sub_plugin, 'remove_router_interface') as sub_func,\ mock.patch.object(self.core_plugin, '_get_router', return_value={'tenant_id': self.project_id}): self.core_plugin.remove_router_interface(self.context, rtr_id, {}) sub_func.assert_called_once() def test_disassociate_floatingips(self): port_id = _uuid() net_id = _uuid() with mock.patch.object(self.sub_plugin, 'disassociate_floatingips') as sub_func,\ mock.patch.object(self.core_plugin, '_get_port', return_value={'network_id': net_id}),\ mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): self.core_plugin.disassociate_floatingips(self.context, port_id) sub_func.assert_called_once() def test_new_user(self): project_id = _uuid() self._test_call_create('network', project_id=project_id) class TestPluginWithNsxv(TestPluginWithDefaultPlugin): """Test TVD plugin with the NSX-V sub plugin""" @property def plugin_type(self): return 'nsx-v' def test_plugin_initialized(self): self._test_plugin_initialized() # no unsupported extensions for the nsx_v plugin self.assertEqual( [], self.core_plugin._unsupported_fields[self.plugin_type]['router']) self.assertEqual( [], self.core_plugin._unsupported_fields[self.plugin_type]['port']) def test_unsupported_extensions(self): self.skipTest('No unsupported extensions in this plugin') class TestPluginWithDvs(TestPluginWithDefaultPlugin): """Test TVD plugin with the DVS sub plugin""" @property def plugin_type(self): return 'dvs' def test_plugin_initialized(self): self._test_plugin_initialized() # no unsupported extensions for the dvs plugin self.assertItemsEqual( ['mac_learning_enabled', 'provider_security_groups'], self.core_plugin._unsupported_fields[self.plugin_type]['port']) def test_unsupported_extensions(self): net_id = _uuid() with mock.patch.object(self.core_plugin, '_get_network', return_value={'tenant_id': self.project_id}): self.assertRaises(n_exc.InvalidInput, self.core_plugin.create_port, self.context, {'port': {'tenant_id': self.project_id, 'network_id': net_id, 'mac_learning_enabled': True}}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2382548 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/0000755000175000017500000000000000000000000022646 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/__init__.py0000644000175000017500000000000000000000000024745 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2382548 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/housekeeper/0000755000175000017500000000000000000000000025165 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/housekeeper/__init__.py0000644000175000017500000000000000000000000027264 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_backup_edge.py0000644000175000017500000000625000000000000032243 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from neutron_lib.plugins import constants from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v.housekeeper import error_backup_edge FAKE_ROUTER_BINDINGS = [ { 'router_id': 'backup-3b0b1fe1-c984', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-782', 'edge_type': 'service', 'appliance_size': 'compact'}] class ErrorBackupEdgeTestCaseReadOnly(base.BaseTestCase): def setUp(self): def get_plugin_mock(alias=constants.CORE): if alias in (constants.CORE, constants.L3): return self.plugin super(ErrorBackupEdgeTestCaseReadOnly, self).setUp() self.plugin = mock.Mock() self.context = mock.Mock() self.context.session = mock.Mock() mock.patch('neutron_lib.plugins.directory.get_plugin', side_effect=get_plugin_mock).start() self.log = mock.Mock() base_job.LOG = self.log self.job = error_backup_edge.ErrorBackupEdgeJob(True, []) def run_job(self): self.job.run(self.context, readonly=True) def test_clean_run(self): mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=[]).start() self.run_job() self.log.warning.assert_not_called() def test_broken_backup_edge(self): mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=FAKE_ROUTER_BINDINGS).start() self.run_job() self.log.warning.assert_called_once() class ErrorBackupEdgeTestCaseReadWrite(ErrorBackupEdgeTestCaseReadOnly): def run_job(self): self.job.run(self.context, readonly=False) def test_broken_backup_edge(self): upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() upd_edge = mock.patch.object(self.plugin.nsx_v, 'update_edge').start() self.job.azs = mock.Mock() az = mock.Mock() mock.patch.object(self.job.azs, 'get_availability_zone', return_value=az).start() super(ErrorBackupEdgeTestCaseReadWrite, self ).test_broken_backup_edge() upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) upd_edge.assert_called_with( self.context, 'backup-3b0b1fe1-c984', 'edge-782', 'backup-3b0b1fe1-c984', None, appliance_size='compact', availability_zone=az, dist=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_dhcp_edge.py0000644000175000017500000006007100000000000031715 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import mock from neutron.tests import base from neutron_lib.plugins import constants from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v.housekeeper import error_dhcp_edge FAKE_ROUTER_BINDINGS = [ { 'router_id': 'dhcp-16c224dd-7c2b-4241-a447-4fc07a3', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-31341032-6911-4596-8b64-afce92f', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-51c97abb-8ac9-4f24-b914-cc30cf8', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-5d01cea4-58f8-4a16-9be0-11012ca', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-65a5335c-4c72-4721-920e-5abdc9e', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-83bce421-b72c-4744-9285-a0fcc25', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-9d2f5b66-c252-4681-86af-9460484', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}, { 'router_id': 'dhcp-aea44408-0448-42dd-9ae6-ed940da', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'}] BAD_ROUTER_BINDING = { 'router_id': 'dhcp-11111111-1111-1111-aaaa-aaaaaaa', 'status': 'ERROR', 'availability_zone': 'default', 'edge_id': 'edge-752'} FAKE_EDGE_VNIC_BINDS = [ { 'network_id': '7c0b6fb5-d86c-4e5e-a2af-9ce36971764b', 'vnic_index': 1, 'edge_id': 'edge-752', 'tunnel_index': 1}, { 'network_id': '16c224dd-7c2b-4241-a447-4fc07a38dc80', 'vnic_index': 2, 'edge_id': 'edge-752', 'tunnel_index': 4}, { 'network_id': '65a5335c-4c72-4721-920e-5abdc9e09ba4', 'vnic_index': 2, 'edge_id': 'edge-752', 'tunnel_index': 6}, { 'network_id': 'aea44408-0448-42dd-9ae6-ed940dac564a', 'vnic_index': 4, 'edge_id': 'edge-752', 'tunnel_index': 10}, { 'network_id': '5d01cea4-58f8-4a16-9be0-11012cadbf55', 'vnic_index': 4, 'edge_id': 'edge-752', 'tunnel_index': 12}, { 'network_id': '51c97abb-8ac9-4f24-b914-cc30cf8e856a', 'vnic_index': 6, 'edge_id': 'edge-752', 'tunnel_index': 16}, { 'network_id': '31341032-6911-4596-8b64-afce92f46bf4', 'vnic_index': 6, 'edge_id': 'edge-752', 'tunnel_index': 18}, { 'network_id': '9d2f5b66-c252-4681-86af-946048414a1f', 'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 22}, { 'network_id': '83bce421-b72c-4744-9285-a0fcc25b001a', 'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 24}] BAD_VNIC_BINDING = { 'network_id': '11111111-1111-1111-aaaa-aaaaaaabbaac', 'vnic_index': 8, 'edge_id': 'edge-752', 'tunnel_index': 21} FAKE_INTERNAL_NETWORKS = [ {'availability_zone': u'default', 'network_id': u'7c0b6fb5-d86c-4e5e-a2af-9ce36971764b', 'network_purpose': 'inter_edge_net', 'updated_at': None, '_rev_bumped': False, 'created_at': datetime.datetime(2017, 12, 13, 12, 28, 18)}] FAKE_NETWORK_RESULTS = [{'id': 'e3a02b46-b9c9-4f2f-bcea-7978355a7dca'}, {'id': '031eaf4b-49b8-4003-9369-8a0dd5d7a163'}, {'id': '16c224dd-7c2b-4241-a447-4fc07a38dc80'}, {'id': '1a3b570c-c8b5-411e-8e13-d4dc0b3e56b2'}, {'id': '24b31d2c-fcec-45e5-bdcb-aa089d3713ae'}, {'id': '31341032-6911-4596-8b64-afce92f46bf4'}, {'id': '51c97abb-8ac9-4f24-b914-cc30cf8e856a'}, {'id': '5484b39b-ec6e-43f4-b900-fc1b2c49c71a'}, {'id': '54eae237-3516-4f82-b46f-f955e91c989c'}, {'id': '5a859fa0-bea0-41be-843a-9f9bf39e2509'}, {'id': '5d01cea4-58f8-4a16-9be0-11012cadbf55'}, {'id': '65a5335c-4c72-4721-920e-5abdc9e09ba4'}, {'id': '708f11d4-00d0-48ea-836f-01273cbf36cc'}, {'id': '7c0b6fb5-d86c-4e5e-a2af-9ce36971764b'}, {'id': '83bce421-b72c-4744-9285-a0fcc25b001a'}, {'id': '9d2f5b66-c252-4681-86af-946048414a1f'}, {'id': 'aea44408-0448-42dd-9ae6-ed940dac564a'}, {'id': 'b0cee4e3-266b-48d3-a651-04f1985fe4b0'}, {'id': 'be82b8c5-96a9-4e08-a965-bb09d48ec161'}, {'id': 'e69279c6-9a1e-4f7b-b421-b8b3eb92c54b'}] BACKEND_EDGE_VNICS = {'vnics': [ {'label': 'vNic_0', 'name': 'external', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'uplink', 'isConnected': True, 'index': 0, 'portgroupId': 'network-13', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_1', 'name': 'internal1', 'addressGroups': { 'addressGroups': [ {'primaryAddress': '169.254.128.14', 'secondaryAddresses': { 'type': 'secondary_addresses', 'ipAddress': ['169.254.169.254']}, 'subnetMask': '255.255.128.0', 'subnetPrefixLength': '17'}]}, 'mtu': 1500, 'type': 'internal', 'isConnected': True, 'index': 1, 'portgroupId': 'virtualwire-472', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_2', 'name': 'internal2', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', 'subInterfaces': {'subInterfaces': [ {'isConnected': True, 'label': 'vNic_10', 'name': '1639ff40-8137-4803-a29f-dcf0efc35b34', 'index': 10, 'tunnelId': 4, 'logicalSwitchId': 'virtualwire-497', 'logicalSwitchName': '16c224dd-7c2b-4241-a447-4fc07a38dc80', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [{ 'primaryAddress': '10.24.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5025, 'subInterfaceBackingType': 'NETWORK'}, {'isConnected': True, 'label': 'vNic_12', 'name': 'd1515746-a21a-442d-8347-62b36f5791d6', 'index': 12, 'tunnelId': 6, 'logicalSwitchId': 'virtualwire-499', 'logicalSwitchName': '65a5335c-4c72-4721-920e-5abdc9e09ba4', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.26.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5027, 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, 'index': 2, 'portgroupId': 'dvportgroup-1550', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_3', 'name': 'vnic3', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 3, 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_4', 'name': 'internal4', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', 'subInterfaces': {'subInterfaces': [ {'isConnected': True, 'label': 'vNic_16', 'name': 'e2405dc6-21d7-4421-a70c-3eecf675b286', 'index': 16, 'tunnelId': 10, 'logicalSwitchId': 'virtualwire-503', 'logicalSwitchName': 'aea44408-0448-42dd-9ae6-ed940dac564a', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.30.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5031, 'subInterfaceBackingType': 'NETWORK'}, {'isConnected': True, 'label': 'vNic_18', 'name': 'a10fb348-30e4-477f-817f-bb3c9c9fd3f5', 'index': 18, 'tunnelId': 12, 'logicalSwitchId': 'virtualwire-505', 'logicalSwitchName': '5d01cea4-58f8-4a16-9be0-11012cadbf55', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.32.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5033, 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, 'index': 4, 'portgroupId': 'dvportgroup-1559', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_5', 'name': 'vnic5', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 5, 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_6', 'name': 'internal6', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', 'subInterfaces': {'subInterfaces': [ {'isConnected': True, 'label': 'vNic_22', 'name': '2da534c8-3d9b-4677-aa14-2e66efd09e3f', 'index': 22, 'tunnelId': 16, 'logicalSwitchId': 'virtualwire-509', 'logicalSwitchName': '51c97abb-8ac9-4f24-b914-cc30cf8e856a', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.36.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5037, 'subInterfaceBackingType': 'NETWORK'}, {'isConnected': True, 'label': 'vNic_24', 'name': 'd25f00c2-eb82-455c-87b9-d2d510d42917', 'index': 24, 'tunnelId': 18, 'logicalSwitchId': 'virtualwire-511', 'logicalSwitchName': '31341032-6911-4596-8b64-afce92f46bf4', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.38.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5039, 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, 'index': 6, 'portgroupId': 'dvportgroup-1567', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_7', 'name': 'vnic7', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 7, 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_8', 'name': 'internal8', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'trunk', 'subInterfaces': {'subInterfaces': [ {'isConnected': True, 'label': 'vNic_28', 'name': 'cf4cc867-e958-4f86-acea-d8a52a4c26c8', 'index': 28, 'tunnelId': 22, 'logicalSwitchId': 'virtualwire-515', 'logicalSwitchName': '9d2f5b66-c252-4681-86af-946048414a1f', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.42.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5043, 'subInterfaceBackingType': 'NETWORK'}, {'isConnected': True, 'label': 'vNic_30', 'name': 'ceab3d83-3ee2-4372-b5d7-f1d47be76e9d', 'index': 30, 'tunnelId': 24, 'logicalSwitchId': 'virtualwire-517', 'logicalSwitchName': '83bce421-b72c-4744-9285-a0fcc25b001a', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': {'addressGroups': [ {'primaryAddress': '10.44.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5045, 'subInterfaceBackingType': 'NETWORK'}]}, 'isConnected': True, 'index': 8, 'portgroupId': 'dvportgroup-1575', 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}, {'label': 'vNic_9', 'name': 'vnic9', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 9, 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True}]} BAD_SUBINTERFACE = { 'isConnected': True, 'label': 'vNic_31', 'name': '11111111-2222-3333-4444-555555555555', 'index': 31, 'tunnelId': 25, 'logicalSwitchId': 'virtualwire-518', 'logicalSwitchName': '55555555-4444-3333-2222-111111111111', 'enableSendRedirects': True, 'mtu': 1500, 'addressGroups': { 'addressGroups': [ {'primaryAddress': '10.99.0.2', 'subnetMask': '255.255.255.0', 'subnetPrefixLength': '24'}]}, 'virtualNetworkId': 5045, 'subInterfaceBackingType': 'NETWORK'} BAD_INTERFACE = { 'label': 'vNic_8', 'name': 'vnic8', 'addressGroups': {'addressGroups': []}, 'mtu': 1500, 'type': 'internal', 'isConnected': False, 'index': 8, 'fenceParameters': [], 'enableProxyArp': False, 'enableSendRedirects': True} class ErrorDhcpEdgeTestCaseReadOnly(base.BaseTestCase): def setUp(self): def get_plugin_mock(alias=constants.CORE): if alias in (constants.CORE, constants.L3): return self.plugin super(ErrorDhcpEdgeTestCaseReadOnly, self).setUp() self.plugin = mock.Mock() self.context = mock.Mock() self.context.session = mock.Mock() mock.patch('neutron_lib.plugins.directory.get_plugin', side_effect=get_plugin_mock).start() self.plugin.edge_manager = mock.Mock() self.plugin.nsx_v = mock.Mock() self.plugin.nsx_v.vcns = mock.Mock() mock.patch.object(self.plugin, 'get_availability_zone_name_by_edge', return_value='default').start() self.log = mock.Mock() base_job.LOG = self.log self.job = error_dhcp_edge.ErrorDhcpEdgeJob(True, []) def run_job(self): self.job.run(self.context, readonly=True) def test_clean_run(self): mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=[]).start() self.run_job() self.log.warning.assert_not_called() def test_invalid_router_binding(self): router_binds = copy.deepcopy(FAKE_ROUTER_BINDINGS) router_binds.append(BAD_ROUTER_BINDING) mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=router_binds).start() mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', return_value=FAKE_EDGE_VNIC_BINDS).start() mock.patch.object(self.plugin, 'get_networks', return_value=FAKE_NETWORK_RESULTS).start() mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', return_value=(None, BACKEND_EDGE_VNICS)).start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', return_value=FAKE_INTERNAL_NETWORKS).start() self.run_job() self.log.warning.assert_called_once() def test_invalid_edge_vnic_bindings(self): def fake_vnic_bind(*args, **kwargs): # The DB content is manipulated by the housekeeper. Therefore # get_edge_vnic_bindings_by_edge() output should be altered if fake_vnic_bind.ctr < 2: ret = fake_vnic_bind.vnic_binds else: ret = FAKE_EDGE_VNIC_BINDS fake_vnic_bind.ctr += 1 return ret fake_vnic_bind.ctr = 0 fake_vnic_bind.vnic_binds = copy.deepcopy(FAKE_EDGE_VNIC_BINDS) fake_vnic_bind.vnic_binds.append(BAD_VNIC_BINDING) mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', side_effect=fake_vnic_bind).start() mock.patch.object(self.plugin, 'get_networks', return_value=FAKE_NETWORK_RESULTS).start() mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', return_value=(None, BACKEND_EDGE_VNICS)).start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', return_value=FAKE_INTERNAL_NETWORKS).start() self.run_job() self.log.warning.assert_called_once() def test_invalid_edge_sub_if(self): backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS) backend_vnics['vnics'][8]['subInterfaces']['subInterfaces'].append( BAD_SUBINTERFACE) mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', return_value=FAKE_EDGE_VNIC_BINDS).start() mock.patch.object(self.plugin, 'get_networks', return_value=FAKE_NETWORK_RESULTS).start() mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', return_value=(None, backend_vnics)).start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', return_value=FAKE_INTERNAL_NETWORKS).start() self.run_job() self.log.warning.assert_called_once() def test_missing_edge_sub_if(self): backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS) del backend_vnics['vnics'][8]['subInterfaces']['subInterfaces'][1] mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', return_value=FAKE_EDGE_VNIC_BINDS).start() mock.patch.object(self.plugin, 'get_networks', return_value=FAKE_NETWORK_RESULTS).start() mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', return_value=(None, backend_vnics)).start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', return_value=FAKE_INTERNAL_NETWORKS).start() self.run_job() self.log.warning.assert_called_once() def test_missing_edge_interface(self): backend_vnics = copy.deepcopy(BACKEND_EDGE_VNICS) backend_vnics['vnics'][8] = BAD_INTERFACE mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch('vmware_nsx.db.nsxv_db.get_edge_vnic_bindings_by_edge', return_value=FAKE_EDGE_VNIC_BINDS).start() mock.patch.object(self.plugin, 'get_networks', return_value=FAKE_NETWORK_RESULTS).start() mock.patch.object(self.plugin.nsx_v.vcns, 'get_interfaces', return_value=(None, backend_vnics)).start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_internal_networks', return_value=FAKE_INTERNAL_NETWORKS).start() self.run_job() self.assertEqual(2, self.log.warning.call_count) class ErrorDhcpEdgeTestCaseReadWrite(ErrorDhcpEdgeTestCaseReadOnly): def run_job(self): self.job.run(self.context, readonly=False) def test_invalid_router_binding(self): del_binding = mock.patch( 'vmware_nsx.db.nsxv_db.delete_nsxv_router_binding').start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', return_value=FAKE_ROUTER_BINDINGS).start() upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() super(ErrorDhcpEdgeTestCaseReadWrite, self ).test_invalid_router_binding() del_binding.assert_called_with(mock.ANY, BAD_ROUTER_BINDING['router_id']) upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) def test_invalid_edge_vnic_bindings(self): del_binding = mock.patch( 'vmware_nsx.db.nsxv_db.free_edge_vnic_by_network').start() mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', return_value=FAKE_ROUTER_BINDINGS).start() upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() super(ErrorDhcpEdgeTestCaseReadWrite, self ).test_invalid_edge_vnic_bindings() del_binding.assert_called_with(mock.ANY, BAD_VNIC_BINDING['edge_id'], BAD_VNIC_BINDING['network_id']) upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) def test_invalid_edge_sub_if(self): mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', return_value=FAKE_ROUTER_BINDINGS).start() upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() upd_if = mock.patch.object(self.plugin.nsx_v.vcns, 'update_interface').start() super(ErrorDhcpEdgeTestCaseReadWrite, self ).test_invalid_edge_sub_if() upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8]) def test_missing_edge_sub_if(self): deleted_sub_if = BACKEND_EDGE_VNICS['vnics'][8]['subInterfaces'][ 'subInterfaces'][1] mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch.object( self.plugin.edge_manager, '_create_sub_interface', return_value=('dvportgroup-1575', deleted_sub_if)).start() upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() upd_if = mock.patch.object(self.plugin.nsx_v.vcns, 'update_interface').start() super(ErrorDhcpEdgeTestCaseReadWrite, self ).test_missing_edge_sub_if() upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8]) def test_missing_edge_interface(self): def fake_create_subif(*args, **kwargs): deleted_sub_if = BACKEND_EDGE_VNICS['vnics'][8]['subInterfaces'][ 'subInterfaces'][fake_create_subif.ctr] fake_create_subif.ctr += 1 return (BACKEND_EDGE_VNICS['vnics'][8]['portgroupId'], deleted_sub_if) fake_create_subif.ctr = 0 mock.patch('vmware_nsx.db.nsxv_db.get_nsxv_router_bindings_by_edge', return_value=FAKE_ROUTER_BINDINGS).start() mock.patch.object( self.plugin.edge_manager, '_create_sub_interface', side_effect=fake_create_subif).start() upd_binding = mock.patch( 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding').start() upd_if = mock.patch.object(self.plugin.nsx_v.vcns, 'update_interface').start() super(ErrorDhcpEdgeTestCaseReadWrite, self ).test_missing_edge_interface() upd_binding.assert_has_calls( [mock.call(mock.ANY, r['router_id'], status='ACTIVE') for r in FAKE_ROUTER_BINDINGS]) upd_if.assert_called_with('edge-752', BACKEND_EDGE_VNICS['vnics'][8]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/test_availability_zones.py0000644000175000017500000003011600000000000030150 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from neutron.tests import base from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az DEF_AZ_POOL = ['service:compact:1:2', 'vdr:compact:1:2'] DEF_GLOBAL_POOL = ['service:compact:4:10', 'vdr:compact:4:10'] class NsxvAvailabilityZonesTestCase(base.BaseTestCase): def setUp(self): super(NsxvAvailabilityZonesTestCase, self).setUp() self.az_name = 'zone1' self.group_name = 'az:%s' % self.az_name config.register_nsxv_azs(cfg.CONF, [self.az_name]) cfg.CONF.set_override("ha_placement_random", True, group="nsxv") cfg.CONF.set_override("mgt_net_proxy_ips", ["2.2.2.2"], group="nsxv") cfg.CONF.set_override("dvs_id", "dvs-1", group="nsxv") def _config_az(self, resource_pool_id="respool", datastore_id="datastore", edge_ha=True, ha_datastore_id="hastore", backup_edge_pool=DEF_AZ_POOL, ha_placement_random=False, datacenter_moid="datacenter", mgt_net_moid="portgroup-407", mgt_net_proxy_ips=["1.1.1.1"], mgt_net_proxy_netmask="255.255.255.0", mgt_net_default_gateway="2.2.2.2", external_network="network-17", vdn_scope_id="vdnscope-1", dvs_id="dvs-2"): cfg.CONF.set_override("resource_pool_id", resource_pool_id, group=self.group_name) cfg.CONF.set_override("datastore_id", datastore_id, group=self.group_name) if edge_ha is not None: cfg.CONF.set_override("edge_ha", edge_ha, group=self.group_name) cfg.CONF.set_override("ha_datastore_id", ha_datastore_id, group=self.group_name) if ha_placement_random is not None: cfg.CONF.set_override("ha_placement_random", ha_placement_random, group=self.group_name) if datacenter_moid is not None: cfg.CONF.set_override("datacenter_moid", datacenter_moid, group=self.group_name) if backup_edge_pool is not None: cfg.CONF.set_override("backup_edge_pool", backup_edge_pool, group=self.group_name) if mgt_net_moid is not None: cfg.CONF.set_override("mgt_net_moid", mgt_net_moid, group=self.group_name) if mgt_net_proxy_ips is not None: cfg.CONF.set_override("mgt_net_proxy_ips", mgt_net_proxy_ips, group=self.group_name) if mgt_net_proxy_netmask is not None: cfg.CONF.set_override("mgt_net_proxy_netmask", mgt_net_proxy_netmask, group=self.group_name) if mgt_net_default_gateway is not None: cfg.CONF.set_override("mgt_net_default_gateway", mgt_net_default_gateway, group=self.group_name) if external_network is not None: cfg.CONF.set_override("external_network", external_network, group=self.group_name) if vdn_scope_id is not None: cfg.CONF.set_override("vdn_scope_id", vdn_scope_id, group=self.group_name) if dvs_id is not None: cfg.CONF.set_override("dvs_id", dvs_id, group=self.group_name) def test_simple_availability_zone(self): self._config_az() az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertTrue(az.edge_ha) self.assertEqual("hastore", az.ha_datastore_id) self.assertFalse(az.ha_placement_random) self.assertEqual("datacenter", az.datacenter_moid) self.assertEqual(DEF_AZ_POOL, az.backup_edge_pool) self.assertEqual("portgroup-407", az.mgt_net_moid) self.assertEqual(["1.1.1.1"], az.mgt_net_proxy_ips) self.assertEqual("255.255.255.0", az.mgt_net_proxy_netmask) self.assertEqual("2.2.2.2", az.mgt_net_default_gateway) self.assertEqual("network-17", az.external_network) self.assertEqual("vdnscope-1", az.vdn_scope_id) self.assertEqual("dvs-2", az.dvs_id) self.assertTrue(az.az_metadata_support) def test_availability_zone_no_edge_ha(self): self._config_az(edge_ha=False) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertFalse(az.edge_ha) self.assertIsNone(az.ha_datastore_id) self.assertFalse(az.ha_placement_random) def test_availability_zone_no_ha_datastore(self): self._config_az(ha_datastore_id=None) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertTrue(az.edge_ha) self.assertIsNone(az.ha_datastore_id) self.assertFalse(az.ha_placement_random) def test_missing_group_section(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "doesnt_exist") def test_availability_zone_missing_respool(self): self._config_az(resource_pool_id=None) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, self.az_name) def test_availability_zone_missing_datastore(self): self._config_az(datastore_id=None) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, self.az_name) def test_availability_zone_missing_edge_ha(self): self._config_az(edge_ha=None) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertFalse(az.edge_ha) self.assertIsNone(az.ha_datastore_id) self.assertFalse(az.ha_placement_random) def test_availability_zone_missing_edge_placement(self): self._config_az(ha_placement_random=None) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertTrue(az.edge_ha) self.assertEqual("hastore", az.ha_datastore_id) # ha_placement_random should have the global value self.assertTrue(az.ha_placement_random) def test_availability_zone_missing_backup_pool(self): self._config_az(backup_edge_pool=None) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) # Should use the global configuration instead self.assertEqual(DEF_GLOBAL_POOL, az.backup_edge_pool) def test_availability_zone_missing_metadata(self): self._config_az(mgt_net_proxy_ips=None) az = nsx_az.NsxVAvailabilityZone(self.az_name) self.assertIsNone(az.mgt_net_moid) self.assertEqual([], az.mgt_net_proxy_ips) self.assertIsNone(az.mgt_net_proxy_netmask) self.assertIsNone(az.mgt_net_default_gateway) self.assertFalse(az.az_metadata_support) def test_availability_zone_same_metadata(self): self._config_az(mgt_net_proxy_ips=["2.2.2.2"]) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, self.az_name) self._config_az(mgt_net_proxy_ips=["2.2.2.2", "3.3.3.3"]) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, self.az_name) class NsxvAvailabilityZonesOldTestCase(base.BaseTestCase): """Test old way of configuring the availability zones using a one-line configuration instead of different dynamic sections """ def setUp(self): super(NsxvAvailabilityZonesOldTestCase, self).setUp() cfg.CONF.set_override("mgt_net_proxy_ips", ["2.2.2.2"], group="nsxv") cfg.CONF.set_override("dvs_id", "dvs-1", group="nsxv") def test_simple_availability_zone(self): az = nsx_az.NsxVAvailabilityZone( "name:respool:datastore:true:hastore") self.assertEqual("name", az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertTrue(az.edge_ha) self.assertEqual("hastore", az.ha_datastore_id) self.assertFalse(az.ha_placement_random) self.assertEqual(DEF_GLOBAL_POOL, az.backup_edge_pool) # should get the global configuration (which is empty now) self.assertIsNone(az.external_network) self.assertIsNone(az.vdn_scope_id) self.assertEqual("dvs-1", az.dvs_id) # no metadata per az support self.assertFalse(az.az_metadata_support) self.assertIsNone(az.mgt_net_moid) self.assertEqual([], az.mgt_net_proxy_ips) self.assertIsNone(az.mgt_net_proxy_netmask) self.assertIsNone(az.mgt_net_default_gateway) def test_availability_zone_without_ha_datastore(self): az = nsx_az.NsxVAvailabilityZone( "name:respool:datastore:true") self.assertEqual("name", az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertTrue(az.edge_ha) self.assertIsNone(az.ha_datastore_id) def test_availability_zone_without_edge_ha(self): az = nsx_az.NsxVAvailabilityZone( "name:respool:datastore:FALSE") self.assertEqual("name", az.name) self.assertEqual("respool", az.resource_pool) self.assertEqual("datastore", az.datastore_id) self.assertFalse(az.edge_ha) self.assertIsNone(az.ha_datastore_id) def test_availability_fail_long_name(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "very-very-very-very-very-longest-name:respool:da:true:ha") def test_availability_fail_few_args(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "name:respool") def test_availability_fail_many_args(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "name:1:2:3:4:5:6") def test_availability_fail_bad_edge_ha(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "name:respool:datastore:truex:hastore") def test_availability_fail_no_ha_datastore(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxVAvailabilityZone, "name:respool:datastore:false:hastore") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/test_edge_loadbalancer_driver_v2.py0000644000175000017500000015160300000000000031642 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.services.flavors import flavors_plugin from neutron.tests import base from neutron_lib import context from neutron_lib import exceptions as n_exc from oslo_config import cfg from vmware_nsx.db import nsxv_db from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import healthmon_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import l7policy_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import l7rule_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import listener_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import loadbalancer_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import member_mgr from vmware_nsx.services.lbaas.nsx_v.implementation import pool_mgr from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common from vmware_nsx.services.lbaas.octavia import octavia_listener from vmware_nsx.tests.unit.services.lbaas import lb_data_models as lb_models from vmware_nsx.tests.unit.services.lbaas import lb_translators # TODO(asarfaty): Use octavia api for those tests LB_VIP = '10.0.0.10' LB_SUBNET = 'some-subnet' LB_EDGE_ID = 'edge-x' LB_ID = 'xxx-xxx' LB_TENANT_ID = 'yyy-yyy' LB_VIP_FWR_ID = 'fwr-1' LB_BINDING = {'loadbalancer_id': LB_ID, 'edge_id': LB_EDGE_ID, 'edge_fw_rule_id': LB_VIP_FWR_ID, 'vip_address': LB_VIP} LISTENER_ID = 'xxx-111' EDGE_APP_PROFILE_ID = 'appp-x' EDGE_APP_PROF_DEF = {'sslPassthrough': False, 'insertXForwardedFor': False, 'serverSslEnabled': False, 'name': LISTENER_ID, 'template': 'http', 'persistence': { 'cookieMode': 'insert', 'cookieName': 'default_cookie_name', 'method': 'cookie'}} EDGE_VIP_ID = 'vip-aaa' EDGE_VIP_DEF = {'protocol': 'http', 'name': 'vip_' + LISTENER_ID, 'connectionLimit': 0, 'defaultPoolId': None, 'ipAddress': LB_VIP, 'port': 80, 'accelerationEnabled': False, 'applicationProfileId': EDGE_APP_PROFILE_ID, 'description': '', 'enabled': True} LISTENER_BINDING = {'loadbalancer_id': LB_ID, 'listener_id': LISTENER_ID, 'app_profile_id': EDGE_APP_PROFILE_ID, 'vse_id': EDGE_VIP_ID} POOL_ID = 'ppp-qqq' EDGE_POOL_ID = 'pool-xx' EDGE_POOL_DEF = {'transparent': False, 'name': 'pool_' + POOL_ID, 'algorithm': 'round-robin', 'description': ''} POOL_BINDING = {'loadbalancer_id': LB_ID, 'pool_id': POOL_ID, 'edge_pool_id': EDGE_POOL_ID} MEMBER_ID = 'mmm-mmm' MEMBER_ADDRESS = '10.0.0.200' EDGE_MEMBER_DEF = {'monitorPort': 80, 'name': 'member-' + MEMBER_ID, 'weight': 1, 'ipAddress': MEMBER_ADDRESS, 'port': 80, 'condition': 'disabled'} POOL_FW_SECT = '10001' HM_ID = 'hhh-mmm' EDGE_HM_ID = 'hm-xx' EDGE_HM_DEF = {'maxRetries': 1, 'interval': 3, 'type': 'icmp', 'name': HM_ID, 'timeout': 3} HM_BINDING = {'loadbalancer_id': LB_ID, 'pool_id': POOL_ID, 'hm_id': HM_ID, 'edge_id': LB_EDGE_ID, 'edge_mon_id': EDGE_HM_ID} L7POL_ID = 'l7pol-l7pol' EDGE_RULE_ID = 'app-rule-xx' L7POL_BINDING = {'policy_id': L7POL_ID, 'edge_id': LB_EDGE_ID, 'edge_app_rule_id': EDGE_RULE_ID} EDGE_L7POL_DEF = {'script': 'http-request deny if TRUE', 'name': 'pol_' + L7POL_ID} L7RULE_ID1 = 'l7rule-111' L7RULE_ID2 = 'l7rule-222' class BaseTestEdgeLbaasV2(base.BaseTestCase): def _tested_entity(self): return None def completor(self, success=True): self.last_completor_succees = success self.last_completor_called = True def setUp(self): super(BaseTestEdgeLbaasV2, self).setUp() self.last_completor_succees = False self.last_completor_called = False self.context = context.get_admin_context() self.nsx_v = mock.Mock() octavia_objects = { 'loadbalancer': loadbalancer_mgr.EdgeLoadBalancerManagerFromDict( self.nsx_v), 'listener': listener_mgr.EdgeListenerManagerFromDict(self.nsx_v), 'pool': pool_mgr.EdgePoolManagerFromDict(self.nsx_v), 'member': member_mgr.EdgeMemberManagerFromDict(self.nsx_v), 'healthmonitor': healthmon_mgr.EdgeHealthMonitorManagerFromDict( self.nsx_v), 'l7policy': l7policy_mgr.EdgeL7PolicyManagerFromDict(self.nsx_v), 'l7rule': l7rule_mgr.EdgeL7RuleManagerFromDict(self.nsx_v)} self.edge_driver = octavia_listener.NSXOctaviaListenerEndpoint( **octavia_objects) self.lbv2_driver = mock.Mock() self.core_plugin = mock.Mock() self.flavor_plugin = flavors_plugin.FlavorsPlugin() base_mgr.LoadbalancerBaseManager._lbv2_driver = self.lbv2_driver base_mgr.LoadbalancerBaseManager._core_plugin = self.core_plugin base_mgr.LoadbalancerBaseManager._flavor_plugin = self.flavor_plugin self._patch_lb_plugin(self.lbv2_driver, self._tested_entity) self.lb = lb_models.LoadBalancer(LB_ID, LB_TENANT_ID, 'lb-name', '', LB_SUBNET, 'port-id', LB_VIP) self.listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'l-name', '', None, LB_ID, 'HTTP', protocol_port=80, loadbalancer=self.lb, admin_state_up=True) self.sess_persist = lb_models.SessionPersistence(type='HTTP_COOKIE') self.pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listener=self.listener, listeners=[self.listener], loadbalancer=self.lb, session_persistence=self.sess_persist) self.listener.default_pool = self.pool self.member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, MEMBER_ADDRESS, 80, 1, pool=self.pool) self.hm = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'PING', 3, 3, 1, pool=self.pool) self.l7policy = lb_models.L7Policy(L7POL_ID, LB_TENANT_ID, name='policy-test', description='policy-desc', listener_id=LISTENER_ID, action='REJECT', listener=self.listener, position=1) self.l7rule1 = lb_models.L7Rule(L7RULE_ID1, LB_TENANT_ID, l7policy_id=L7POL_ID, compare_type='EQUAL_TO', invert=False, type='HEADER', key='key1', value='val1', policy=self.l7policy) self.l7rule2 = lb_models.L7Rule(L7RULE_ID2, LB_TENANT_ID, l7policy_id=L7POL_ID, compare_type='STARTS_WITH', invert=True, type='PATH', value='/images', policy=self.l7policy) # Translate LBaaS objects to dictionaries self.lb_dict = lb_translators.lb_loadbalancer_obj_to_dict( self.lb) self.listener_dict = lb_translators.lb_listener_obj_to_dict( self.listener) self.pool_dict = lb_translators.lb_pool_obj_to_dict( self.pool) self.member_dict = lb_translators.lb_member_obj_to_dict( self.member) self.hm_dict = lb_translators.lb_hm_obj_to_dict( self.hm) self.l7policy_dict = lb_translators.lb_l7policy_obj_to_dict( self.l7policy) self.l7rule1_dict = lb_translators.lb_l7rule_obj_to_dict( self.l7rule1) self.l7rule2_dict = lb_translators.lb_l7rule_obj_to_dict( self.l7rule2) def tearDown(self): self._unpatch_lb_plugin(self.lbv2_driver, self._tested_entity) super(BaseTestEdgeLbaasV2, self).tearDown() def _patch_lb_plugin(self, lb_plugin, manager): self.real_manager = getattr(lb_plugin, manager) lb_manager = mock.patch.object(lb_plugin, manager).start() mock.patch.object(lb_manager, 'create').start() mock.patch.object(lb_manager, 'update').start() mock.patch.object(lb_manager, 'delete').start() mock.patch.object(lb_manager, 'successful_completion').start() def _unpatch_lb_plugin(self, lb_plugin, manager): setattr(lb_plugin, manager, self.real_manager) class TestEdgeLbaasV2LoadbalancerOnRtr(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2LoadbalancerOnRtr, self).setUp() cfg.CONF.set_override('use_routers_as_lbaas_platform', self._deploy_on_router, group="nsxv") @property def _tested_entity(self): return 'load_balancer' @property def _edge_getter(self): return 'get_lbaas_edge_id_for_subnet' @property def _deploy_on_router(self): return True def test_create(self): with mock.patch.object(lb_common, self._edge_getter ) as mock_get_edge, \ mock.patch.object(lb_common, 'add_vip_as_secondary_ip' ) as mock_vip_sec_ip, \ mock.patch.object(lb_common, 'add_vip_fw_rule' ) as mock_add_vip_fwr, \ mock.patch.object(lb_common, 'set_lb_firewall_default_rule' ) as mock_set_fw_rule, \ mock.patch.object(lb_common, 'enable_edge_acceleration' ) as mock_enable_edge_acceleration, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding_by_edge' ) as mock_get_lb_binding_by_edge, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_loadbalancer_binding' ) as mock_db_binding: mock_get_edge.return_value = LB_EDGE_ID mock_add_vip_fwr.return_value = LB_VIP_FWR_ID mock_get_lb_binding_by_edge.return_value = [] self.edge_driver.loadbalancer.create( self.context, self.lb_dict, self.completor) if self._deploy_on_router: mock_vip_sec_ip.assert_called_with(self.edge_driver.pool.vcns, LB_EDGE_ID, LB_VIP) mock_get_edge.assert_called_with(mock.ANY, mock.ANY, LB_SUBNET, LB_TENANT_ID) else: mock_set_fw_rule.assert_called_with( self.edge_driver.pool.vcns, LB_EDGE_ID, 'accept') mock_get_edge.assert_called_with(mock.ANY, mock.ANY, LB_ID, LB_VIP, mock.ANY, LB_TENANT_ID, 'compact') mock_add_vip_fwr.assert_called_with(self.edge_driver.pool.vcns, LB_EDGE_ID, LB_ID, LB_VIP) mock_db_binding.assert_called_with(self.context.session, LB_ID, LB_EDGE_ID, LB_VIP_FWR_ID, LB_VIP) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) mock_enable_edge_acceleration.assert_called_with( self.edge_driver.pool.vcns, LB_EDGE_ID) def test_update(self): new_lb = lb_models.LoadBalancer(LB_ID, 'yyy-yyy', 'lb-name', 'heh-huh', LB_SUBNET, 'port-id', LB_VIP) new_lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(new_lb) self.edge_driver.loadbalancer.update( self.context, self.lb_dict, new_lb_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete_old(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_binding, \ mock.patch.object(lb_common, 'del_vip_fw_rule') as mock_del_fwr, \ mock.patch.object(lb_common, 'del_vip_as_secondary_ip' ) as mock_vip_sec_ip, \ mock.patch.object(lb_common, 'set_lb_firewall_default_rule' ) as mock_set_fw_rule, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_loadbalancer_binding', ) as mock_del_binding, \ mock.patch.object(self.core_plugin, 'get_ports' ) as mock_get_ports, \ mock.patch.object(self.core_plugin, 'get_router', return_value={'router_type': 'exclusive'}), \ mock.patch.object(nsxv_db, 'get_nsxv_router_binding_by_edge' ) as mock_get_r_binding: mock_get_binding.return_value = LB_BINDING mock_get_ports.return_value = [] mock_get_r_binding.return_value = {'router_id': 'xxxx'} self.edge_driver.loadbalancer.delete( self.context, self.lb_dict, self.completor) mock_del_fwr.assert_called_with(self.edge_driver.pool.vcns, LB_EDGE_ID, LB_VIP_FWR_ID) mock_vip_sec_ip.assert_called_with(self.edge_driver.pool.vcns, LB_EDGE_ID, LB_VIP) mock_del_binding.assert_called_with(self.context.session, LB_ID) mock_set_fw_rule.assert_called_with( self.edge_driver.pool.vcns, LB_EDGE_ID, 'deny') self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete_new(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_binding, \ mock.patch.object(lb_common, 'set_lb_firewall_default_rule' ) as mock_set_fw_rule, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_loadbalancer_binding', ) as mock_del_binding, \ mock.patch.object(self.core_plugin, 'get_ports' ) as mock_get_ports, \ mock.patch.object(self.core_plugin.edge_manager, 'delete_lrouter' ) as mock_delete_lrouter, \ mock.patch.object(nsxv_db, 'get_nsxv_router_binding_by_edge' ) as mock_get_r_binding: mock_get_binding.return_value = LB_BINDING mock_get_ports.return_value = [] router_id = 'lbaas-xxxx' mock_get_r_binding.return_value = {'router_id': router_id} self.edge_driver.loadbalancer.delete( self.context, self.lb_dict, self.completor) mock_del_binding.assert_called_with(self.context.session, LB_ID) mock_set_fw_rule.assert_called_with( self.edge_driver.pool.vcns, LB_EDGE_ID, 'deny') mock_delete_lrouter.assert_called_with( mock.ANY, 'lbaas-' + LB_ID, dist=False) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_stats(self): pass def test_refresh(self): pass class TestEdgeLbaasV2LoadbalancerOnEdge(TestEdgeLbaasV2LoadbalancerOnRtr): @property def _edge_getter(self): return 'get_lbaas_edge_id' @property def _deploy_on_router(self): return False def setUp(self): super(TestEdgeLbaasV2LoadbalancerOnEdge, self).setUp() def test_create_with_flavor(self): flavor_name = 'large' with mock.patch.object(lb_common, 'get_lbaas_edge_id' ) as mock_get_edge, \ mock.patch.object(lb_common, 'add_vip_fw_rule' ) as mock_add_vip_fwr, \ mock.patch.object(lb_common, 'set_lb_firewall_default_rule' ) as mock_set_fw_rule, \ mock.patch.object(lb_common, 'enable_edge_acceleration' ) as mock_enable_edge_acceleration, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding_by_edge' ) as mock_get_lb_binding_by_edge, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_loadbalancer_binding' ) as mock_db_binding,\ mock.patch('neutron.services.flavors.flavors_plugin.FlavorsPlugin.' 'get_flavor', return_value={'name': flavor_name}): mock_get_edge.return_value = LB_EDGE_ID mock_add_vip_fwr.return_value = LB_VIP_FWR_ID mock_get_lb_binding_by_edge.return_value = [] self.lb.flavor_id = 'dummy' lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(self.lb) self.edge_driver.loadbalancer.create( self.context, lb_dict, self.completor) mock_add_vip_fwr.assert_called_with(self.edge_driver.pool.vcns, LB_EDGE_ID, LB_ID, LB_VIP) mock_db_binding.assert_called_with(self.context.session, LB_ID, LB_EDGE_ID, LB_VIP_FWR_ID, LB_VIP) mock_set_fw_rule.assert_called_with( self.edge_driver.pool.vcns, LB_EDGE_ID, 'accept') mock_get_edge.assert_called_with( mock.ANY, mock.ANY, LB_ID, LB_VIP, mock.ANY, LB_TENANT_ID, flavor_name) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) mock_enable_edge_acceleration.assert_called_with( self.edge_driver.pool.vcns, LB_EDGE_ID) self.lb.flavor_id = None def test_create_with_illegal_flavor(self): flavor_name = 'no_size' with mock.patch.object(lb_common, 'get_lbaas_edge_id' ) as mock_get_edge, \ mock.patch.object(lb_common, 'add_vip_fw_rule' ) as mock_add_vip_fwr, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding_by_edge' ) as mock_get_lb_binding_by_edge, \ mock.patch('neutron.services.flavors.flavors_plugin.FlavorsPlugin.' 'get_flavor', return_value={'name': flavor_name}): mock_get_edge.return_value = LB_EDGE_ID mock_add_vip_fwr.return_value = LB_VIP_FWR_ID mock_get_lb_binding_by_edge.return_value = [] self.lb.flavor_id = 'dummy' lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(self.lb) self.assertRaises( n_exc.InvalidInput, self.edge_driver.loadbalancer.create, self.context, lb_dict, self.completor) self.lb.flavor_id = None class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Listener, self).setUp() @property def _tested_entity(self): return 'listener' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'create_app_profile' ) as mock_create_app_prof, \ mock.patch.object(self.edge_driver.pool.vcns, 'create_vip' ) as mock_create_vip, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_listener_binding' ) as mock_add_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding', return_value=None): mock_get_lb_binding.return_value = LB_BINDING mock_create_app_prof.return_value = ( {'location': 'x/' + EDGE_APP_PROFILE_ID}, None) mock_create_vip.return_value = ( {'location': 'x/' + EDGE_VIP_ID}, None) self.edge_driver.listener.create( self.context, self.listener_dict, self.completor) mock_create_app_prof.assert_called_with(LB_EDGE_ID, EDGE_APP_PROF_DEF) mock_create_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_DEF) mock_add_binding.assert_called_with( self.context.session, LB_ID, LISTENER_ID, EDGE_APP_PROFILE_ID, EDGE_VIP_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'l-name', '', None, LB_ID, 'HTTP', protocol_port=8000, loadbalancer=self.lb, admin_state_up=True) new_listener.default_pool = self.pool new_listener_dict = lb_translators.lb_listener_obj_to_dict( new_listener) with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding', return_value=None), \ mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile' ) as mock_upd_app_prof, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' ) as mock_upd_vip: mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_lb_binding.return_value = LB_BINDING self.edge_driver.listener.update( self.context, self.listener_dict, new_listener_dict, self.completor) mock_upd_app_prof.assert_called_with(LB_EDGE_ID, EDGE_APP_PROFILE_ID, EDGE_APP_PROF_DEF) edge_vip_def = EDGE_VIP_DEF.copy() edge_vip_def['port'] = 8000 mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, edge_vip_def) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'delete_vip' ) as mock_del_vip, \ mock.patch.object(self.edge_driver.pool.vcns, 'delete_app_profile' ) as mock_del_app_prof, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_listener_binding' ) as mock_del_binding: mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_lb_binding.return_value = LB_BINDING self.edge_driver.listener.delete( self.context, self.listener_dict, self.completor) mock_del_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID) mock_del_app_prof.assert_called_with(LB_EDGE_ID, EDGE_APP_PROFILE_ID) mock_del_binding.assert_called_with(self.context.session, LB_ID, LISTENER_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Pool, self).setUp() @property def _tested_entity(self): return 'pool' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'create_pool' ) as mock_create_pool, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_pool_binding' ) as mock_add_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' ) as mock_upd_vip,\ mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile' ) as mock_upd_app_prof: mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_lb_binding.return_value = LB_BINDING mock_create_pool.return_value = ( {'location': 'x/' + EDGE_POOL_ID}, None) self.edge_driver.pool.create( self.context, self.pool_dict, self.completor) mock_create_pool.assert_called_with(LB_EDGE_ID, EDGE_POOL_DEF.copy()) mock_add_binding.assert_called_with(self.context.session, LB_ID, POOL_ID, EDGE_POOL_ID) edge_vip_def = EDGE_VIP_DEF.copy() edge_vip_def['defaultPoolId'] = EDGE_POOL_ID mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, edge_vip_def) mock_upd_app_prof.assert_called_with(LB_EDGE_ID, EDGE_APP_PROFILE_ID, EDGE_APP_PROF_DEF) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '', None, 'HTTP', 'LEAST_CONNECTIONS', listener=self.listener) new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool) list_bind = {'app_profile_id': EDGE_APP_PROFILE_ID} with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding,\ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding', return_value=list_bind),\ mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' ) as mock_upd_pool,\ mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' ) as mock_get_pool,\ mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile' ) as mock_upd_app_prof: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING fake_edge = EDGE_POOL_DEF.copy() fake_edge['monitorId'] = 'monitor-7' fake_edge['member'] = ['member1', 'member2'] mock_get_pool.return_value = (None, fake_edge) self.edge_driver.pool.update( self.context, self.pool_dict, new_pool_dict, self.completor) edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['algorithm'] = 'leastconn' edge_pool_def['monitorId'] = 'monitor-7' edge_pool_def['member'] = ['member1', 'member2'] mock_upd_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) mock_upd_app_prof.assert_called_with(LB_EDGE_ID, EDGE_APP_PROFILE_ID, EDGE_APP_PROF_DEF) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding,\ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' ) as mock_upd_vip, \ mock.patch.object(self.edge_driver.pool.vcns, 'delete_pool' ) as mock_del_pool, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_pool_binding' ) as mock_del_binding,\ mock.patch.object(lb_common, 'is_lb_on_router_edge' ) as mock_lb_router, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_app_profile' ): mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_listener_binding.return_value = LISTENER_BINDING mock_lb_router.return_value = False self.edge_driver.pool.delete( self.context, self.pool_dict, self.completor) mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, EDGE_VIP_DEF) mock_del_pool.assert_called_with(LB_EDGE_ID, EDGE_POOL_ID) mock_del_binding.assert_called_with( self.context.session, LB_ID, POOL_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Member, self).setUp() @property def _tested_entity(self): return 'member' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_router_binding_by_edge' ), \ mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' ) as mock_get_pool, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' ) as mock_update_pool: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_pool.return_value = (None, EDGE_POOL_DEF.copy()) self.edge_driver.member.create( self.context, self.member_dict, self.completor) edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['member'] = [EDGE_MEMBER_DEF] mock_update_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): new_member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, MEMBER_ADDRESS, 8000, 1, True, pool=self.pool) new_member_dict = lb_translators.lb_member_obj_to_dict(new_member) with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' ) as mock_get_pool, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' ) as mock_update_pool: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['member'] = [EDGE_MEMBER_DEF] mock_get_pool.return_value = (None, edge_pool_def) self.edge_driver.member.update( self.context, self.member_dict, new_member_dict, self.completor) edge_member_def = EDGE_MEMBER_DEF.copy() edge_member_def['port'] = 8000 edge_member_def['monitorPort'] = 8000 edge_member_def['condition'] = 'enabled' edge_pool_def['member'] = [edge_member_def] mock_update_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' ) as mock_get_pool, \ mock.patch.object(self.core_plugin, 'get_ports' ) as mock_get_ports, \ mock.patch.object(lb_common, 'is_lb_on_router_edge' ) as mock_lb_router, \ mock.patch.object(lb_common, 'delete_lb_interface' ) as mock_del_lb_iface, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' ) as mock_update_pool: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_lb_router.return_value = False edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['member'] = [EDGE_MEMBER_DEF] mock_get_pool.return_value = (None, edge_pool_def) mock_get_ports.return_value = [] self.edge_driver.member.delete( self.context, self.member_dict, self.completor) edge_pool_def['member'] = [] mock_update_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) mock_del_lb_iface.assert_called_with( self.context, self.core_plugin, LB_ID, None) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2HealthMonitor, self).setUp() @property def _tested_entity(self): return 'health_monitor' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding' ) as mock_get_mon_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'create_health_monitor') as mock_create_hm, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_monitor_binding' ) as mock_add_hm_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' ) as mock_get_pool, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' ) as mock_update_pool: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_mon_binding.return_value = None mock_create_hm.return_value = ( {'location': 'x/' + EDGE_HM_ID}, None) mock_get_pool.return_value = (None, EDGE_POOL_DEF.copy()) self.edge_driver.healthmonitor.create( self.context, self.hm_dict, self.completor) mock_create_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_DEF) mock_add_hm_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, HM_ID, LB_EDGE_ID, EDGE_HM_ID) edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['monitorId'] = [EDGE_HM_ID] mock_update_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): new_hm = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'PING', 3, 3, 3, pool=self.pool) new_hm_dict = lb_translators.lb_hm_obj_to_dict(new_hm) with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding' ) as mock_get_mon_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_health_monitor') as mock_upd_hm: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_mon_binding.return_value = HM_BINDING self.edge_driver.healthmonitor.update( self.context, self.hm_dict, new_hm_dict, self.completor) edge_hm_def = EDGE_HM_DEF.copy() edge_hm_def['maxRetries'] = 3 mock_upd_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_ID, edge_hm_def) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_monitor_binding' ) as mock_get_mon_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'delete_health_monitor') as mock_del_hm, \ mock.patch.object(self.edge_driver.pool.vcns, 'get_pool' ) as mock_get_pool, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_pool' ) as mock_update_pool, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_monitor_binding' ) as mock_del_binding: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_mon_binding.return_value = HM_BINDING edge_pool_def = EDGE_POOL_DEF.copy() edge_pool_def['monitorId'] = [EDGE_HM_ID] mock_get_pool.return_value = (None, edge_pool_def) self.edge_driver.healthmonitor.delete( self.context, self.hm_dict, self.completor) mock_del_hm.assert_called_with(LB_EDGE_ID, EDGE_HM_ID) edge_pool_def['monitorId'] = [] mock_update_pool.assert_called_with( LB_EDGE_ID, EDGE_POOL_ID, edge_pool_def) mock_del_binding.assert_called_with(self.context.session, LB_ID, POOL_ID, HM_ID, LB_EDGE_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2L7Policy, self).setUp() @property def _tested_entity(self): return 'l7policy' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsxv_db, 'add_nsxv_lbaas_l7policy_binding' ) as mock_add_l7policy_binding,\ mock.patch.object(self.edge_driver.pool.vcns, 'create_app_rule' ) as mock_create_rule, \ mock.patch.object(self.edge_driver.pool.vcns, 'get_vip' ) as mock_get_vip, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' ) as mock_upd_vip: mock_get_lb_binding.return_value = LB_BINDING mock_get_l7policy_binding.return_value = L7POL_BINDING mock_get_listener_binding.return_value = LISTENER_BINDING mock_create_rule.return_value = ( {'location': 'x/' + EDGE_RULE_ID}, None) mock_get_vip.return_value = (None, EDGE_VIP_DEF.copy()) self.edge_driver.l7policy.create( self.context, self.l7policy_dict, self.completor) mock_create_rule.assert_called_with(LB_EDGE_ID, EDGE_L7POL_DEF.copy()) mock_add_l7policy_binding.assert_called_with( self.context.session, L7POL_ID, LB_EDGE_ID, EDGE_RULE_ID) edge_vip_def = EDGE_VIP_DEF.copy() edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID] mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, edge_vip_def) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): url = 'http://www.test.com' new_pol = lb_models.L7Policy(L7POL_ID, LB_TENANT_ID, name='policy-test', description='policy-desc', listener_id=LISTENER_ID, action='REDIRECT_TO_URL', redirect_url=url, listener=self.listener, position=2) new_pol_dict = lb_translators.lb_l7policy_obj_to_dict(new_pol) with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'get_vip' ) as mock_get_vip, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' ) as mock_upd_vip, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule' ) as mock_update_rule: mock_get_lb_binding.return_value = LB_BINDING mock_get_l7policy_binding.return_value = L7POL_BINDING mock_get_listener_binding.return_value = LISTENER_BINDING edge_vip_def = EDGE_VIP_DEF.copy() edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID] mock_get_vip.return_value = (None, edge_vip_def) self.edge_driver.l7policy.update( self.context, self.l7policy_dict, new_pol_dict, self.completor) edge_rule_def = EDGE_L7POL_DEF.copy() edge_rule_def['script'] = "redirect location %s if TRUE" % url mock_update_rule.assert_called_with( LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) mock_upd_vip.assert_called() self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsxv_db, 'del_nsxv_lbaas_l7policy_binding' ) as mock_del_l7policy_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_pool_binding' ) as mock_get_pool_binding,\ mock.patch.object(nsxv_db, 'get_nsxv_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'delete_app_rule' ) as mock_del_app_rule, \ mock.patch.object(self.edge_driver.pool.vcns, 'get_vip' ) as mock_get_vip, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_vip' ) as mock_upd_vip: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_l7policy_binding.return_value = L7POL_BINDING edge_vip_def = EDGE_VIP_DEF.copy() edge_vip_def['applicationRuleId'] = [EDGE_RULE_ID] mock_get_vip.return_value = (None, edge_vip_def) self.edge_driver.l7policy.delete( self.context, self.l7policy_dict, self.completor) edge_vip_def2 = EDGE_VIP_DEF.copy() edge_vip_def2['applicationRuleId'] = [] mock_upd_vip.assert_called_with(LB_EDGE_ID, EDGE_VIP_ID, edge_vip_def2) mock_del_app_rule.assert_called_with(LB_EDGE_ID, EDGE_RULE_ID) mock_del_l7policy_binding.assert_called_with( self.context.session, L7POL_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2L7Rule, self).setUp() @property def _tested_entity(self): return 'l7rule' def test_create(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POL_BINDING # Create the first rule self.l7rule1.policy.rules = [self.l7rule1] rule1_dict = lb_translators.lb_l7rule_obj_to_dict(self.l7rule1) self.edge_driver.l7rule.create( self.context, rule1_dict, self.completor) edge_rule_def = EDGE_L7POL_DEF.copy() edge_rule_def['script'] = ( "acl %(rule_id)s hdr(key1) -i val1\n" "http-request deny if %(rule_id)s" % {'rule_id': L7RULE_ID1}) mock_update_rule.assert_called_with( LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) # Create the 2nd rule self.l7rule2.policy.rules = [self.l7rule1, self.l7rule2] rule2_dict = lb_translators.lb_l7rule_obj_to_dict(self.l7rule2) self.edge_driver.l7rule.create( self.context, rule2_dict, self.completor) edge_rule_def = EDGE_L7POL_DEF.copy() edge_rule_def['script'] = ( "acl %(rule_id1)s hdr(key1) -i val1\n" "acl %(rule_id2)s path_beg -i /images\n" "http-request deny if %(rule_id1)s !%(rule_id2)s" % {'rule_id1': L7RULE_ID1, 'rule_id2': L7RULE_ID2}) mock_update_rule.assert_called_with( LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): new_rule = lb_models.L7Rule(L7RULE_ID1, LB_TENANT_ID, l7policy_id=L7POL_ID, compare_type='EQUAL_TO', invert=False, type='HEADER', key='key2', value='val1', policy=self.l7policy) with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POL_BINDING new_rule.policy.rules = [new_rule] new_rule_dict = lb_translators.lb_l7rule_obj_to_dict(new_rule) self.edge_driver.l7rule.update( self.context, self.l7rule1_dict, new_rule_dict, self.completor) edge_rule_def = EDGE_L7POL_DEF.copy() edge_rule_def['script'] = ( "acl %(rule_id)s hdr(key2) -i val1\n" "http-request deny if %(rule_id)s" % {'rule_id': L7RULE_ID1}) mock_update_rule.assert_called_with( LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(nsxv_db, 'get_nsxv_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(self.edge_driver.pool.vcns, 'update_app_rule' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POL_BINDING self.l7rule1.policy.rules = [] rule_dict = lb_translators.lb_l7rule_obj_to_dict(self.l7rule1) self.edge_driver.l7rule.delete( self.context, rule_dict, self.completor) edge_rule_def = EDGE_L7POL_DEF.copy() edge_rule_def['script'] = ( "http-request deny if TRUE") mock_update_rule.assert_called_with( LB_EDGE_ID, EDGE_RULE_ID, edge_rule_def) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/test_fwaas_v2_driver.py0000644000175000017500000004447000000000000027353 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib.plugins import directory from vmware_nsx.db import nsxv_models from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.services.fwaas.nsx_v import edge_fwaas_driver_v2 from vmware_nsx.services.fwaas.nsx_v import fwaas_callbacks_v2 from vmware_nsx.tests.unit.nsx_v import test_plugin as test_v_plugin FAKE_FW_ID = 'fake_fw_uuid' FAKE_ROUTER_ID = 'fake_rtr_uuid' FAKE_PORT_ID = 'fake_port_uuid' FAKE_NET_ID = 'fake_net_uuid' FAKE_DB_OBJ = nsxv_models.NsxvEdgeVnicBinding(vnic_index='1') class NsxvFwaasTestCase(test_v_plugin.NsxVPluginV2TestCase): def setUp(self): super(NsxvFwaasTestCase, self).setUp() self.firewall = edge_fwaas_driver_v2.EdgeFwaasVDriverV2() self.plugin = directory.get_plugin() self.plugin.fwaas_callbacks = fwaas_callbacks_v2.\ NsxvFwaasCallbacksV2(False) self.plugin.fwaas_callbacks.fwaas_enabled = True self.plugin.fwaas_callbacks.fwaas_driver = self.firewall self.plugin.fwaas_callbacks.internal_driver = self.firewall self.plugin.init_is_complete = True self.plugin.metadata_proxy_handler = None # Start some mocks self.router = {'id': FAKE_ROUTER_ID, 'external_gateway_info': {'network_id': 'external'}, 'nsx_attributes': {'distributed': False, 'router_type': 'exclusive'}} self.distributed_router = {'id': FAKE_ROUTER_ID, 'external_gateway_info': {'network_id': 'external'}, 'nsx_attributes': {'distributed': True, 'router_type': 'exclusive'}} mock.patch.object(self.plugin, '_get_router', return_value=self.router).start() mock.patch.object(self.plugin, 'get_router', return_value=self.router).start() self.port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[self.port]).start() mock.patch.object(self.plugin, 'get_port', return_value=self.port).start() mock.patch.object(self.plugin, '_get_subnet_fw_rules', return_value=[]).start() mock.patch.object(self.plugin, '_get_dnat_fw_rule', return_value=[]).start() mock.patch.object(self.plugin, '_get_allocation_pools_fw_rule', return_value=[]).start() mock.patch.object(self.plugin, '_get_nosnat_subnets_fw_rules', return_value=[]).start() def _fake_rules_v4(self, is_ingress=True, is_conflict=False, cidr='10.24.4.0/24'): rule1 = {'enabled': True, 'action': 'allow', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '80', 'id': 'fake-fw-rule1', 'description': 'first rule', 'position': '0'} rule2 = {'enabled': True, 'action': 'reject', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '22:24', 'source_port': '1:65535', 'id': 'fake-fw-rule2', 'position': '1'} rule3 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'protocol': 'icmp', 'id': 'fake-fw-rule3', 'position': '2'} rule4 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'id': 'fake-fw-rule4', 'position': '3'} if is_ingress: if not is_conflict: rule1['source_ip_address'] = cidr else: rule1['destination_ip_address'] = cidr else: if not is_conflict: rule1['destination_ip_address'] = cidr else: rule1['source_ip_address'] = cidr return [rule1, rule2, rule3, rule4] def _fake_translated_rules(self, rules_list, nsx_port_id, is_ingress=True, logged=False): translated_rules = copy.copy(rules_list) for rule in translated_rules: if logged: rule['logged'] = True if is_ingress: if (not rule.get('destination_ip_address') or rule['destination_ip_address'].startswith('0.0.0.0')): if nsx_port_id: rule['destination_vnic_groups'] = [nsx_port_id] else: if (not rule.get('source_ip_address') or rule['source_ip_address'].startswith('0.0.0.0')): if nsx_port_id: rule['source_vnic_groups'] = [nsx_port_id] if rule.get('destination_ip_address'): if rule['destination_ip_address'].startswith('0.0.0.0'): del rule['destination_ip_address'] else: rule['destination_ip_address'] = [ rule['destination_ip_address']] if rule.get('source_ip_address'): if rule['source_ip_address'].startswith('0.0.0.0'): del rule['source_ip_address'] else: rule['source_ip_address'] = [ rule['source_ip_address']] rule['name'] = (fwaas_callbacks_v2.RULE_NAME_PREFIX + (rule.get('name') or rule['id']))[:30] if rule.get('id'): if is_ingress: rule['id'] = ('ingress-%s' % rule['id'])[:36] else: rule['id'] = ('egress-%s' % rule['id'])[:36] return translated_rules def _fake_empty_firewall_group(self): fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': True, 'tenant_id': 'tenant-uuid', 'ingress_rule_list': [], 'egress_rule_list': []} return fw_inst def _fake_firewall_group(self, rule_list, is_ingress=True, admin_state_up=True): _rule_list = copy.deepcopy(rule_list) for rule in _rule_list: rule['position'] = str(_rule_list.index(rule)) fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': admin_state_up, 'tenant_id': 'tenant-uuid', 'ingress_rule_list': [], 'egress_rule_list': []} if is_ingress: fw_inst['ingress_rule_list'] = _rule_list else: fw_inst['egress_rule_list'] = _rule_list return fw_inst def _fake_firewall_group_with_admin_down(self, rule_list, is_ingress=True): return self._fake_firewall_group( rule_list, is_ingress=is_ingress, admin_state_up=False) def _fake_apply_list_template(self, router): router_inst = router router_info_inst = mock.Mock() router_info_inst.router = router_inst router_info_inst.router_id = FAKE_ROUTER_ID apply_list = [(router_info_inst, FAKE_PORT_ID)] return apply_list def _fake_apply_list(self): return self._fake_apply_list_template(self.router) def _fake_distributed_apply_list(self): return self._fake_apply_list_template(self.distributed_router) def test_create_firewall_no_rules(self): apply_list = self._fake_apply_list() firewall = self._fake_empty_firewall_group() with mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall),\ mock.patch.object(self.plugin.fwaas_callbacks, '_get_port_firewall_group_id', return_value=FAKE_FW_ID),\ mock.patch.object(self.plugin.fwaas_callbacks, '_get_fw_group_from_plugin', return_value=firewall),\ mock.patch("vmware_nsx.db.nsxv_db.get_edge_vnic_binding", return_value=FAKE_DB_OBJ),\ mock.patch.object(edge_utils, "update_firewall") as update_fw,\ mock.patch.object(edge_utils, 'get_router_edge_id', return_value='edge-1'): self.firewall.create_firewall_group('nsx', apply_list, firewall) # expecting 2 block rules for the logical port (egress & ingress) # and last default allow all rule expected_rules = [ {'name': "Block port ingress", 'action': edge_firewall_driver.FWAAS_DENY, 'destination_vnic_groups': ['vnic-index-1'], 'logged': False}, {'name': "Block port egress", 'action': edge_firewall_driver.FWAAS_DENY, 'source_vnic_groups': ['vnic-index-1'], 'logged': False}] update_fw.assert_called_once_with( self.plugin.nsx_v, mock.ANY, FAKE_ROUTER_ID, {'firewall_rule_list': expected_rules}) def _setup_firewall_with_rules(self, func, is_ingress=True, is_conflict=False, cidr='10.24.4.0/24'): apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4(is_ingress=is_ingress, is_conflict=is_conflict, cidr=cidr) firewall = self._fake_firewall_group(rule_list, is_ingress=is_ingress) with mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall),\ mock.patch.object(self.plugin.fwaas_callbacks, '_get_port_firewall_group_id', return_value=FAKE_FW_ID),\ mock.patch.object(self.plugin.fwaas_callbacks, '_get_fw_group_from_plugin', return_value=firewall),\ mock.patch("vmware_nsx.db.nsxv_db.get_edge_vnic_binding", return_value=FAKE_DB_OBJ),\ mock.patch.object(edge_utils, "update_firewall") as update_fw,\ mock.patch.object(edge_utils, 'get_router_edge_id', return_value='edge-1'): func('nsx', apply_list, firewall) expected_rules = self._fake_translated_rules( rule_list, 'vnic-index-1', is_ingress=is_ingress) + [ {'name': "Block port ingress", 'action': edge_firewall_driver.FWAAS_DENY, 'destination_vnic_groups': ['vnic-index-1'], 'logged': False}, {'name': "Block port egress", 'action': edge_firewall_driver.FWAAS_DENY, 'source_vnic_groups': ['vnic-index-1'], 'logged': False}] update_fw.assert_called_once_with( self.plugin.nsx_v, mock.ANY, FAKE_ROUTER_ID, {'firewall_rule_list': expected_rules}) def test_create_firewall_with_ingress_rules(self): self._setup_firewall_with_rules(self.firewall.create_firewall_group) def test_update_firewall_with_ingress_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group) def test_create_firewall_with_egress_rules(self): self._setup_firewall_with_rules(self.firewall.create_firewall_group, is_ingress=False) def test_create_firewall_with_illegal_cidr(self): self._setup_firewall_with_rules(self.firewall.create_firewall_group, cidr='0.0.0.0/24') def test_update_firewall_with_egress_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group, is_ingress=False) def test_update_firewall_with_egress_conflicting_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group, is_ingress=False, is_conflict=True) def test_update_firewall_with_ingress_conflicting_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group, is_ingress=True, is_conflict=True) def test_delete_firewall(self): apply_list = self._fake_apply_list() firewall = self._fake_empty_firewall_group() with mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=None),\ mock.patch("vmware_nsx.db.db.get_nsx_switch_and_port_id", return_value=('vnic-index-1', 0)),\ mock.patch.object(edge_utils, "update_firewall") as update_fw,\ mock.patch.object(self.plugin.fwaas_callbacks, '_get_port_firewall_group_id', return_value=None),\ mock.patch.object(edge_utils, 'get_router_edge_id', return_value='edge-1'): self.firewall.delete_firewall_group('nsx', apply_list, firewall) update_fw.assert_called_once_with( self.plugin.nsx_v, mock.ANY, FAKE_ROUTER_ID, {'firewall_rule_list': []}) def test_create_firewall_with_admin_down(self): apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4() firewall = self._fake_firewall_group_with_admin_down(rule_list) with mock.patch.object(edge_utils, "update_firewall") as update_fw,\ mock.patch.object(self.plugin.fwaas_callbacks, '_get_port_firewall_group_id', return_value=None),\ mock.patch.object(edge_utils, 'get_router_edge_id', return_value='edge-1'): self.firewall.create_firewall_group('nsx', apply_list, firewall) update_fw.assert_called_once_with( self.plugin.nsx_v, mock.ANY, FAKE_ROUTER_ID, {'firewall_rule_list': []}) def _setup_dist_router_firewall_with_rules(self, func, is_ingress=True, is_conflict=False, cidr='10.24.4.0/24'): apply_list = self._fake_distributed_apply_list() rule_list = self._fake_rules_v4(is_ingress=is_ingress, is_conflict=is_conflict, cidr=cidr) firewall = self._fake_firewall_group(rule_list, is_ingress=is_ingress) with mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall),\ mock.patch.object(self.plugin.fwaas_callbacks, '_get_port_firewall_group_id', return_value=FAKE_FW_ID),\ mock.patch.object(self.plugin.fwaas_callbacks, '_get_fw_group_from_plugin', return_value=firewall),\ mock.patch.object(edge_utils, "update_firewall") as update_fw,\ mock.patch.object(edge_utils, 'get_router_edge_id', return_value='edge-1'),\ mock.patch.object(self.plugin.edge_manager, 'get_plr_by_tlr_id', return_value=FAKE_ROUTER_ID),\ mock.patch.object(self.plugin, '_get_router', return_value=self.distributed_router),\ mock.patch.object(self.plugin, 'get_router', return_value=self.distributed_router): func('nsx', apply_list, firewall) expected_rules = self._fake_translated_rules( rule_list, None, is_ingress=is_ingress) + [ {'name': "Block port ingress", 'action': edge_firewall_driver.FWAAS_DENY, 'logged': False}, {'name': "Block port egress", 'action': edge_firewall_driver.FWAAS_DENY, 'logged': False}] update_fw.assert_called_once_with( self.plugin.nsx_v, mock.ANY, FAKE_ROUTER_ID, {'firewall_rule_list': expected_rules}) def test_create_dist_router_firewall_with_ingress_rules(self): self._setup_dist_router_firewall_with_rules( self.firewall.create_firewall_group) def test_update_dist_router_firewall_with_ingress_rules(self): self._setup_dist_router_firewall_with_rules( self.firewall.update_firewall_group) def test_create_dist_router_firewall_with_egress_rules(self): self._setup_dist_router_firewall_with_rules( self.firewall.create_firewall_group, is_ingress=False) def test_create_dist_router_firewall_with_illegal_cidr(self): self._setup_dist_router_firewall_with_rules( self.firewall.create_firewall_group, cidr='0.0.0.0/24') def test_update_dist_router_firewall_with_egress_rules(self): self._setup_dist_router_firewall_with_rules( self.firewall.update_firewall_group, is_ingress=False) def test_update_dist_router_firewall_with_egress_conflicting_rules(self): self._setup_dist_router_firewall_with_rules( self.firewall.update_firewall_group, is_ingress=False, is_conflict=True) def test_update_dist_router_firewall_with_ingress_conflicting_rules(self): self._setup_dist_router_firewall_with_rules( self.firewall.update_firewall_group, is_ingress=True, is_conflict=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/test_lbaas_common.py0000644000175000017500000001211500000000000026711 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common EDGE_ID = 'edge-x' POOL_ID = 'b3dfb476-6fdf-4ddd-b6bd-e86ae78dc30b' def firewall_section_maker(if_ip_list, vip_ip_list): return ( '
' + POOL_ID + 'allow' 'Ipv4Address' + ','.join(if_ip_list) + '' 'Ipv4Address' + ','.join(vip_ip_list) + '' '
') def if_maker(ip_list): intf = { 'index': 1, 'name': 'internal1', 'addressGroups': { 'addressGroups': [ {'subnetPrefixLength': '24', 'secondaryAddresses': { 'ipAddress': ip_list, 'type': 'secondary_addresses'}, 'primaryAddress': '10.0.0.1', 'subnetMask': '255.255.255.0'}]}, 'portgroupName': 'pg1234', 'label': 'vNic_1', 'type': 'internal', 'portgroupId': 'virtualwire-31'} return intf def if_list_maker(ip_list): if_list = { 'vnics': [ {'index': 0, 'name': 'external', 'addressGroups': { 'addressGroups': [ {'subnetMask': '255.255.255.0', 'primaryAddress': '172.24.4.2', 'subnetPrefixLength': '24'}]}, 'portgroupName': 'VM Network', 'label': 'vNic_0', 'type': 'uplink', 'portgroupId': 'network-13'}, {'index': 1, 'name': 'internal1', 'addressGroups': { 'addressGroups': [ {'subnetPrefixLength': '24', 'secondaryAddresses': { 'ipAddress': ip_list, 'type': 'secondary_addresses'}, 'primaryAddress': '10.0.0.1', 'subnetMask': '255.255.255.0'}]}, 'portgroupName': 'pg1234', 'label': 'vNic_1', 'type': 'internal', 'portgroupId': 'virtualwire-31'}, {'index': 2, 'name': 'vnic2', 'addressGroups': {'addressGroups': []}, 'label': 'vNic_2', 'type': 'internal'}, {'index': 3, 'name': 'vnic3', 'addressGroups': {'addressGroups': []}, 'label': 'vNic_3', 'type': 'internal'}]} return if_list class TestLbaasCommon(base.BaseTestCase): def setUp(self): super(TestLbaasCommon, self).setUp() callbacks = mock.Mock() callbacks.plugin = mock.Mock() self.edge_driver = vcns_driver.VcnsDriver(callbacks) self.edge_driver._lb_driver_prop = mock.Mock() def _mock_edge_driver_vcns(self, attr): return mock.patch.object(self.edge_driver.vcns, attr) def test_add_vip_as_secondary_ip(self): update_if = if_maker(['10.0.0.6', '10.0.0.8']) with self._mock_edge_driver_vcns('get_interfaces') as mock_get_if,\ self._mock_edge_driver_vcns( 'update_interface') as mock_update_if: mock_get_if.return_value = (None, if_list_maker(['10.0.0.6'])) lb_common.add_vip_as_secondary_ip( self.edge_driver.vcns, EDGE_ID, '10.0.0.8') mock_update_if.assert_called_with(EDGE_ID, update_if) def test_del_vip_as_secondary_ip(self): update_if = if_maker(['10.0.0.6']) with self._mock_edge_driver_vcns('get_interfaces') as mock_get_if,\ self._mock_edge_driver_vcns( 'update_interface') as mock_update_if: mock_get_if.return_value = (None, if_list_maker(['10.0.0.6', '10.0.0.8'])) lb_common.del_vip_as_secondary_ip( self.edge_driver.vcns, EDGE_ID, '10.0.0.8') mock_update_if.assert_called_with(EDGE_ID, update_if) def test_get_edge_ip_addresses(self): get_if_list = if_list_maker(['10.0.0.6']) with mock.patch.object(self.edge_driver.vcns, 'get_interfaces', return_value=(None, get_if_list)): ip_list = lb_common.get_edge_ip_addresses(self.edge_driver.vcns, EDGE_ID) self.assertEqual(['172.24.4.2', '10.0.0.1'], ip_list) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/test_misc.py0000644000175000017500000000632700000000000025222 0ustar00coreycorey00000000000000# Copyright (c) 2014 VMware. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutron.tests import base from vmware_nsx.plugins.nsx_v.vshield.common import exceptions from vmware_nsx.plugins.nsx_v.vshield import vcns def raise_until_attempt(attempt, exception): def raises_until(): if raises_until.current_attempt < attempt: raises_until.current_attempt += 1 raise exception else: return raises_until.current_attempt raises_until.current_attempt = 1 return raises_until class TestMisc(base.BaseTestCase): response = """
Dummy
1 core-services
""" def test_retry_on_exception_one_attempt(self): success_on_first_attempt = raise_until_attempt( 1, exceptions.RequestBad(uri='', response='')) should_return_one = vcns.retry_upon_exception( exceptions.RequestBad, max_attempts=1)(success_on_first_attempt) self.assertEqual(1, should_return_one()) def test_retry_on_exception_five_attempts(self): success_on_fifth_attempt = raise_until_attempt( 5, exceptions.RequestBad(uri='', response='')) should_return_five = vcns.retry_upon_exception( exceptions.RequestBad, max_attempts=10)(success_on_fifth_attempt) self.assertEqual(5, should_return_five()) def test_retry_on_exception_exceed_attempts(self): success_on_fifth_attempt = raise_until_attempt( 5, exceptions.RequestBad(uri='', response='')) should_raise = vcns.retry_upon_exception( exceptions.RequestBad, max_attempts=4)(success_on_fifth_attempt) self.assertRaises(exceptions.RequestBad, should_raise) def test_retry_on_exception_exclude_error_codes_retry(self): success_on_fifth_attempt = raise_until_attempt( 5, exceptions.RequestBad(uri='', response=self.response)) # excluding another error code, so should retry should_return_five = vcns.retry_upon_exception_exclude_error_codes( exceptions.RequestBad, [2], max_attempts=10)(success_on_fifth_attempt) self.assertEqual(5, should_return_five()) def test_retry_on_exception_exclude_error_codes_raise(self): success_on_fifth_attempt = raise_until_attempt( 5, exceptions.RequestBad(uri='', response=self.response)) # excluding the returned error code, so no retries are expected should_raise = vcns.retry_upon_exception_exclude_error_codes( exceptions.RequestBad, [1], max_attempts=10)(success_on_fifth_attempt) self.assertRaises(exceptions.RequestBad, should_raise) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/test_nsxv_loadbalancer.py0000644000175000017500000001107200000000000027745 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils from neutron.tests import base from vmware_nsx.plugins.nsx_v.vshield import nsxv_loadbalancer from vmware_nsx.plugins.nsx_v.vshield import vcns class NsxvLoadbalancerTestCase(base.BaseTestCase): EDGE_OBJ_JSON = ( '{"accelerationEnabled":false,"applicationProfile":[{' '"applicationProfileId":"applicationProfile-1","insertXForwardedFor":' 'false,"name":"MDSrvProxy","persistence":{"cookieMode":"insert",' '"cookieName":"JSESSIONID","expire":"30","method":"cookie"},' '"serverSslEnabled":false,"sslPassthrough":false,"template":"HTTP"}],' '"applicationRule":[],"enableServiceInsertion":false,"enabled":true,' '"featureType":"loadbalancer_4.0","logging":{"enable":false,' '"logLevel":"info"},"monitor":[{"interval":10,"maxRetries":3,"method":' '"GET","monitorId":"monitor-1","name":"MDSrvMon","timeout":15,"type":' '"http","url":"/"}],"pool":[{"algorithm":"round-robin",' '"applicationRuleId":[],"member":[{"condition":"enabled","ipAddress":' '"192.168.0.39","maxConn":0,"memberId":"member-1","minConn":0,' '"monitorPort":8775,"name":"Member-1","port":8775,"weight":1}],' '"monitorId":["monitor-1"],"name":"MDSrvPool","poolId":"pool-1",' '"transparent":false}],"version":6,"virtualServer":[{' '"accelerationEnabled":false,"applicationProfileId":' '"applicationProfile-1","applicationRuleId":[],"connectionLimit":0,' '"defaultPoolId":"pool-1","enableServiceInsertion":false,' '"enabled":true,"ipAddress":"169.254.0.3","name":"MdSrv",' '"port":"8775","protocol":"http","virtualServerId":' '"virtualServer-1"}]}') OUT_OBJ_JSON = ( '{"accelerationEnabled": false, "applicationProfile": [{' '"applicationProfileId": "applicationProfile-1", ' '"insertXForwardedFor": false, "name": "MDSrvProxy", "persistence": ' '{"expire": "30", "method": "cookie"}, "serverSslEnabled": false, ' '"sslPassthrough": false, "template": "HTTP"}],' ' "enableServiceInsertion": false, "enabled": true, "featureType": ' '"loadbalancer_4.0", "monitor": [{"interval": 10, "maxRetries": 3, ' '"method": "GET", "monitorId": "monitor-1", "name": "MDSrvMon", ' '"timeout": 15, "type": "http", "url": "/"}], "pool": [{"algorithm":' ' "round-robin", "member": [{"condition": "enabled", "ipAddress": ' '"192.168.0.39", "maxConn": 0, "memberId": "member-1", "minConn": 0, ' '"monitorPort": 8775, "name": "Member-1", "port": 8775, "weight": 1}],' ' "monitorId": ["monitor-1"], "name": "MDSrvPool", "poolId": "pool-1",' ' "transparent": false}], "virtualServer": [{"accelerationEnabled": ' 'false, "applicationProfileId": "applicationProfile-1", ' '"connectionLimit": 0, "defaultPoolId": "pool-1", ' '"enableServiceInsertion": false, "enabled": true, "ipAddress": ' '"169.254.0.3", "name": "MdSrv", "port": "8775", "protocol": ' '"http", "virtualServerId": "virtualServer-1"}]}') LB_URI = '/api/4.0/edges/%s/loadbalancer/config' EDGE_1 = 'edge-x' EDGE_2 = 'edge-y' def setUp(self): super(NsxvLoadbalancerTestCase, self).setUp() self._lb = nsxv_loadbalancer.NsxvLoadbalancer() self._vcns = vcns.Vcns(None, None, None, None, True) def test_get_edge_loadbalancer(self): h = None v = jsonutils.loads(self.EDGE_OBJ_JSON) with mock.patch.object(self._vcns, 'do_request', return_value=(h, v)) as mock_do_request: lb = nsxv_loadbalancer.NsxvLoadbalancer.get_loadbalancer( self._vcns, self.EDGE_1) lb.submit_to_backend(self._vcns, self.EDGE_2) mock_do_request.assert_called_with( vcns.HTTP_PUT, self.LB_URI % self.EDGE_2, self.OUT_OBJ_JSON, format='json', encode=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/test_plugin.py0000644000175000017500000111203200000000000025555 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import copy import re import decorator from eventlet import greenthread import mock import netaddr from neutron.db import securitygroups_db as sg_db from neutron.extensions import address_scope from neutron.extensions import l3 from neutron.extensions import securitygroup as secgrp from neutron.tests.unit import _test_extension_portbindings as test_bindings import neutron.tests.unit.db.test_allowedaddresspairs_db as test_addr_pair import neutron.tests.unit.db.test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.extensions import base as extension from neutron.tests.unit.extensions import test_address_scope from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts import neutron.tests.unit.extensions.test_l3 as test_l3_plugin import neutron.tests.unit.extensions.test_l3_ext_gw_mode as test_ext_gw_mode import neutron.tests.unit.extensions.test_portsecurity as test_psec import neutron.tests.unit.extensions.test_securitygroup as ext_sg from neutron.tests.unit import testlib_api from neutron_lib.api.definitions import allowedaddresspairs as addrp_apidef from neutron_lib.api.definitions import dvr as dvr_apidef from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import l3_ext_gw_mode as l3_egm_apidef from neutron_lib.api.definitions import l3_flavors as l3fav_apidef from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api.definitions import router_availability_zone as raz_apidef from neutron_lib.api import validators from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib.db import resource_extend from neutron_lib import exceptions as n_exc from neutron_lib.plugins import constants as plugin_const from neutron_lib.plugins import directory from neutron_lib.plugins import utils from neutron_lib.services.qos import constants as qos_consts from neutron_lib.utils import helpers from oslo_config import cfg from oslo_utils import uuidutils import six from testtools import matchers import webob.exc from vmware_nsx._i18n import _ from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.common import nsx_constants from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils as c_utils from vmware_nsx.db import nsxv_db from vmware_nsx.dvs import dvs from vmware_nsx.dvs import dvs_utils from vmware_nsx.extensions import projectpluginmap from vmware_nsx.extensions import routersize as router_size from vmware_nsx.extensions import routertype as router_type from vmware_nsx.extensions import vnicindex as ext_vnic_idx from vmware_nsx.plugins.common import plugin as com_plugin from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.drivers import ( distributed_router_driver as dist_router_driver) from vmware_nsx.plugins.nsx_v.drivers import ( exclusive_router_driver as ex_router_driver) from vmware_nsx.plugins.nsx_v.drivers import ( shared_router_driver as router_driver) from vmware_nsx.plugins.nsx_v import md_proxy from vmware_nsx.plugins.nsx_v.vshield.common import constants as vcns_const from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc from vmware_nsx.plugins.nsx_v.vshield import edge_appliance_driver from vmware_nsx.plugins.nsx_v.vshield import edge_firewall_driver from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.services.qos.nsx_v import utils as qos_utils from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.extensions import test_vnic_index from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns from vmware_nsx.tests.unit import test_utils PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' _uuid = uuidutils.generate_uuid def set_az_in_config(name, resource_pool_id="respool-7", datastore_id="datastore-7", edge_ha=False, ha_datastore_id=None): group_name = 'az:%s' % name cfg.CONF.set_override('availability_zones', [name], group="nsxv") config.register_nsxv_azs(cfg.CONF, [name]) cfg.CONF.set_override("resource_pool_id", resource_pool_id, group=group_name) cfg.CONF.set_override("datastore_id", datastore_id, group=group_name) cfg.CONF.set_override("edge_ha", edge_ha, group=group_name) cfg.CONF.set_override("ha_datastore_id", ha_datastore_id, group=group_name) # Override subnet creation in some tests to create a subnet with dhcp # disabled @decorator.decorator def with_no_dhcp_subnet(f, *args, **kwargs): obj = args[0] obj.subnet = obj.no_dhcp_subnet result = f(*args, **kwargs) obj.subnet = obj.original_subnet return result class NsxVPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): def _create_network(self, fmt, name, admin_state_up, arg_list=None, providernet_args=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'network': {'name': name, 'admin_state_up': admin_state_up, 'tenant_id': tenant_id}} # Fix to allow the router:external attribute and any other # attributes containing a colon to be passed with # a double underscore instead kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items()) if extnet_apidef.EXTERNAL in kwargs: arg_list = (extnet_apidef.EXTERNAL, ) + (arg_list or ()) attrs = kwargs if providernet_args: attrs.update(providernet_args) for arg in (('admin_state_up', 'tenant_id', 'shared') + (arg_list or ())): # Arg must be present and not empty if arg in kwargs: data['network'][arg] = kwargs[arg] network_req = self.new_create_request('networks', data, fmt) if set_context and tenant_id: # create a specific auth context for this request network_req.environ['neutron.context'] = context.Context( '', tenant_id) return network_req.get_response(self.api) @contextlib.contextmanager def subnet(self, network=None, **kwargs): # Override the subnet method to automatically disable dhcp on external # subnets or ipv6 subnets, unless specified. set_context = kwargs.get('set_context', False) with test_plugin.optional_ctx( network, self.network, set_context=set_context, tenant_id=kwargs.get('tenant_id')) as network_to_use: if 'enable_dhcp' not in kwargs: if kwargs.get('ip_version') == 6: kwargs['enable_dhcp'] = False else: # Read the network itself, as the network in the args # does not content this value net = self._show('networks', network_to_use['network']['id']) if net['network']['router:external']: kwargs['enable_dhcp'] = False subnet = self._make_subnet(self.fmt, network_to_use, kwargs.get( 'gateway_ip', constants.ATTR_NOT_SPECIFIED), kwargs.get('cidr', '10.0.0.0/24'), kwargs.get('subnetpool_id'), kwargs.get('allocation_pools'), kwargs.get('ip_version', 4), kwargs.get('enable_dhcp', True), kwargs.get('dns_nameservers'), kwargs.get('host_routes'), segment_id=kwargs.get('segment_id'), shared=kwargs.get('shared'), ipv6_ra_mode=kwargs.get('ipv6_ra_mode'), ipv6_address_mode=kwargs.get( 'ipv6_address_mode'), tenant_id=kwargs.get('tenant_id'), set_context=set_context) yield subnet @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') def setUp(self, mock_deploy_edge, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None, with_md_proxy=True, with_octavia=False, **kwargs): test_utils.override_nsx_ini_test() mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) mock_vcns_instance = mock_vcns.start() self.fc2 = fake_vcns.FakeVcns() mock_vcns_instance.return_value = self.fc2 edge_utils.query_dhcp_service_config = mock.Mock(return_value=[]) self.mock_create_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'create_dhcp_edge_service')) self.mock_create_dhcp_service.start() mock_update_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'update_dhcp_edge_service')) mock_update_dhcp_service.start() mock_delete_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'delete_dhcp_edge_service')) mock_delete_dhcp_service.start() mock_check_backup_edge_pools = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, '_check_backup_edge_pools')) mock_check_backup_edge_pools.start() mock_deploy_backup_edges_at_backend = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, '_deploy_backup_edges_at_backend')) mock_deploy_backup_edges_at_backend.start() mock.patch( 'neutron_lib.rpc.Connection.consume_in_threads', return_value=[]).start() self.default_res_pool = 'respool-28' cfg.CONF.set_override("resource_pool_id", self.default_res_pool, group="nsxv") set_az_in_config('az7') # Add the metadata configuration self.with_md_proxy = with_md_proxy if self.with_md_proxy: cfg.CONF.set_override('mgt_net_moid', 'net-1', group="nsxv") cfg.CONF.set_override('mgt_net_proxy_ips', ['2.2.2.2'], group="nsxv") cfg.CONF.set_override('mgt_net_proxy_netmask', '255.255.255.0', group="nsxv") cfg.CONF.set_override('mgt_net_default_gateway', '1.1.1.1', group="nsxv") cfg.CONF.set_override('nova_metadata_ips', ['3.3.3.3'], group="nsxv") # Add some mocks required for the md code mock.patch.object(edge_utils, "update_internal_interface").start() # Skip Octavia init because of RPC conflicts if not with_octavia: mock.patch("vmware_nsx.services.lbaas.octavia.octavia_listener." "NSXOctaviaListener.__init__", return_value=None).start() mock.patch("vmware_nsx.services.lbaas.octavia.octavia_listener." "NSXOctaviaStatisticsCollector.__init__", return_value=None).start() if service_plugins is not None: # override the service plugins only if specified directly super(NsxVPluginV2TestCase, self).setUp( plugin=plugin, service_plugins=service_plugins, ext_mgr=ext_mgr) else: super(NsxVPluginV2TestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr) self.addCleanup(self.fc2.reset_all) plugin_instance = directory.get_plugin() # handle TVD plugin case if plugin_instance.is_tvd_plugin(): plugin_instance = plugin_instance.get_plugin_by_type( projectpluginmap.NsxPlugins.NSX_V) plugin_instance.real_get_edge = plugin_instance._get_edge_id_by_rtr_id plugin_instance._get_edge_id_by_rtr_id = mock.Mock() plugin_instance._get_edge_id_by_rtr_id.return_value = False plugin_instance._get_edge_id_and_az_by_rtr_id = mock.Mock() plugin_instance._get_edge_id_and_az_by_rtr_id.return_value = ( False, False) # call init_complete manually. The event is not called in unit tests plugin_instance.init_complete(None, None, {}) self.context = context.get_admin_context() self.original_subnet = self.subnet self.internal_net_id = None if self.with_md_proxy: self.internal_net_id = nsxv_db.get_nsxv_internal_network_for_az( self.context.session, vcns_const.InternalEdgePurposes.INTER_EDGE_PURPOSE, 'default')['network_id'] def no_dhcp_subnet(self, *args, **kwargs): if 'enable_dhcp' in kwargs: return self.original_subnet(*args, **kwargs) return self.original_subnet(*args, enable_dhcp=False, **kwargs) def _get_core_plugin_with_dvs(self): # enable dvs features to allow policy with QOS cfg.CONF.set_default('use_dvs_features', True, 'nsxv') plugin = directory.get_plugin() with mock.patch.object(dvs_utils, 'dvs_create_session'): plugin._vcm = dvs.VCManager() return plugin def _remove_md_proxy_from_list(self, items): for r in items[:]: if (r.get('tenant_id') == nsxv_constants.INTERNAL_TENANT_ID or r.get('name') == 'inter-edge-net'): items.remove(r) def deserialize(self, content_type, response): """Override list actions to skip metadata internal objects This will allow most tests to run with mdproxy """ ctype = 'application/%s' % content_type data = self._deserializers[ctype].deserialize(response.body)['body'] for resource in ['networks', 'subnets', 'ports']: if data.get(resource): self._remove_md_proxy_from_list(data[resource]) return data def _list(self, resource, fmt=None, neutron_context=None, query_params=None, expected_code=webob.exc.HTTPOk.code): fmt = fmt or self.fmt req = self.new_list_request(resource, fmt, query_params) if neutron_context: req.environ['neutron.context'] = neutron_context res = req.get_response(self._api_for_resource(resource)) self.assertEqual(expected_code, res.status_int) if query_params and '_id=' in query_params: # Do not remove objects if their id was requested specifically return super(NsxVPluginV2TestCase, self).deserialize(fmt, res) else: return self.deserialize(fmt, res) def _test_list_with_pagination(self, resource, items, sort, limit, expected_page_num, resources=None, query_params='', verify_key='id'): """Override list actions to skip metadata internal objects This will allow most tests to run with mdproxy """ if not resources: resources = '%ss' % resource query_str = query_params + '&' if query_params else '' query_str = query_str + ("limit=%s&sort_key=%s&" "sort_dir=%s") % (limit, sort[0], sort[1]) req = self.new_list_request(resources, params=query_str) items_res = [] page_num = 0 api = self._api_for_resource(resources) resource = resource.replace('-', '_') resources = resources.replace('-', '_') while req: page_num = page_num + 1 res = super(NsxVPluginV2TestCase, self).deserialize( self.fmt, req.get_response(api)) self.assertThat(len(res[resources]), matchers.LessThan(limit + 1)) items_res = items_res + res[resources] req = None if '%s_links' % resources in res: for link in res['%s_links' % resources]: if link['rel'] == 'next': content_type = 'application/%s' % self.fmt req = testlib_api.create_request(link['href'], '', content_type) self.assertEqual(len(res[resources]), limit) # skip md-proxy objects orig_items_num = len(items_res) self._remove_md_proxy_from_list(items_res) # Test number of pages only if no mdproxy entries were removed if orig_items_num == len(items_res): self.assertEqual(expected_page_num, page_num) self.assertEqual([item[resource][verify_key] for item in items], [n[verify_key] for n in items_res]) def _test_list_with_pagination_reverse(self, resource, items, sort, limit, expected_page_num, resources=None, query_params=''): """Override list actions to skip metadata internal objects This will allow most tests to run with mdproxy """ if not resources: resources = '%ss' % resource resource = resource.replace('-', '_') api = self._api_for_resource(resources) marker = items[-1][resource]['id'] query_str = query_params + '&' if query_params else '' query_str = query_str + ("limit=%s&page_reverse=True&" "sort_key=%s&sort_dir=%s&" "marker=%s") % (limit, sort[0], sort[1], marker) req = self.new_list_request(resources, params=query_str) item_res = [items[-1][resource]] page_num = 0 resources = resources.replace('-', '_') while req: page_num = page_num + 1 res = super(NsxVPluginV2TestCase, self).deserialize( self.fmt, req.get_response(api)) self.assertThat(len(res[resources]), matchers.LessThan(limit + 1)) res[resources].reverse() item_res = item_res + res[resources] req = None if '%s_links' % resources in res: for link in res['%s_links' % resources]: if link['rel'] == 'previous': content_type = 'application/%s' % self.fmt req = testlib_api.create_request(link['href'], '', content_type) self.assertEqual(len(res[resources]), limit) # skip md-proxy objects orig_items_num = len(item_res) self._remove_md_proxy_from_list(item_res) # Test number of pages only if no mdproxy entries were removed if orig_items_num == len(item_res): self.assertEqual(expected_page_num, page_num) expected_res = [item[resource]['id'] for item in items] expected_res.reverse() self.assertEqual(expected_res, [n['id'] for n in item_res]) class TestNetworksV2(test_plugin.TestNetworksV2, NsxVPluginV2TestCase): def _test_create_bridge_network(self, vlan_id=0): net_type = vlan_id and 'vlan' or 'flat' name = 'bridge_net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, net_type), (pnet.PHYSICAL_NETWORK, 'tzuuid'), (pnet.SEGMENTATION_ID, vlan_id)] providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'tzuuid'} if vlan_id: providernet_args[pnet.SEGMENTATION_ID] = vlan_id with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_bridge_network(self): self._test_create_bridge_network() def test_create_bridge_vlan_network(self): self._test_create_bridge_network(vlan_id=123) def test_get_vlan_network_name(self): p = directory.get_plugin() net_id = uuidutils.generate_uuid() dvs_id = 'dvs-10' net = {'name': '', 'id': net_id} # Empty net['name'] should yield dvs_id-net_id as a name for the # port group. expected = '%s-%s' % (dvs_id, net_id) self.assertEqual(expected, p._get_vlan_network_name(net, dvs_id)) # If network name is provided then it should yield # dvs_id-net_name-net_id as a name for the port group. net = {'name': 'pele', 'id': net_id} expected = '%s-%s-%s' % (dvs_id, 'pele', net_id) self.assertEqual(expected, p._get_vlan_network_name(net, dvs_id)) name = 'X' * 500 net = {'name': name, 'id': net_id} expected = '%s-%s-%s' % (dvs_id, name[:36], net_id) self.assertEqual(expected, p._get_vlan_network_name(net, dvs_id)) def test_get_vlan_network_name_with_net_name_missing(self): p = directory.get_plugin() net_id = uuidutils.generate_uuid() dvs_id = 'dvs-10' net = {'id': net_id} # Missing net['name'] should yield dvs_id-net_id as a name for the # port group. expected = '%s-%s' % (dvs_id, net_id) self.assertEqual(expected, p._get_vlan_network_name(net, dvs_id)) def _test_generate_tag(self, vlan_id): net_type = 'vlan' name = 'bridge_net' plugin = directory.get_plugin() plugin._network_vlans = utils.parse_network_vlan_ranges( cfg.CONF.nsxv.network_vlan_ranges) expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, net_type), (pnet.PHYSICAL_NETWORK, 'dvs-70'), (pnet.SEGMENTATION_ID, vlan_id)] providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'dvs-70'} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_bridge_vlan_generate(self): cfg.CONF.set_default('network_vlan_ranges', 'dvs-70', 'nsxv') self._test_generate_tag(1) def test_create_bridge_vlan_generate_range(self): cfg.CONF.set_default('network_vlan_ranges', 'dvs-70:100:110', 'nsxv') self._test_generate_tag(100) def test_create_bridge_vlan_network_outofrange_returns_400(self): with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_bridge_network(vlan_id=5000) self.assertEqual(ctx_manager.exception.code, 400) def test_create_external_portgroup_network(self): name = 'ext_net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (extnet_apidef.EXTERNAL, True), (pnet.NETWORK_TYPE, 'portgroup'), (pnet.PHYSICAL_NETWORK, 'tzuuid')] providernet_args = {pnet.NETWORK_TYPE: 'portgroup', pnet.PHYSICAL_NETWORK: 'tzuuid', extnet_apidef.EXTERNAL: True} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, extnet_apidef.EXTERNAL)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_portgroup_network(self): name = 'pg_net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, 'portgroup'), (pnet.PHYSICAL_NETWORK, 'tzuuid')] providernet_args = {pnet.NETWORK_TYPE: 'portgroup', pnet.PHYSICAL_NETWORK: 'tzuuid'} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) # try to create another one on the same physical net will failure res = self._create_network( self.fmt, name, True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) data = self.deserialize(self.fmt, res) self.assertIn('NeutronError', data) def test_delete_network_after_removing_subnet(self): gateway_ip = '10.0.0.1' cidr = '10.0.0.0/24' fmt = 'json' # Create new network res = self._create_network(fmt=fmt, name='net', admin_state_up=True) network = self.deserialize(fmt, res) subnet = self._make_subnet(fmt, network, gateway_ip, cidr, ip_version=4) req = self.new_delete_request('subnets', subnet['subnet']['id']) sub_del_res = req.get_response(self.api) self.assertEqual(sub_del_res.status_int, 204) req = self.new_delete_request('networks', network['network']['id']) net_del_res = req.get_response(self.api) self.assertEqual(net_del_res.status_int, 204) def test_list_networks_with_shared(self): with self.network(name='net1'): with self.network(name='net2', shared=True): req = self.new_list_request('networks') res = self.deserialize('json', req.get_response(self.api)) ###self._remove_md_proxy_from_list(res['networks']) self.assertEqual(len(res['networks']), 2) req_2 = self.new_list_request('networks') req_2.environ['neutron.context'] = context.Context('', 'somebody') res = self.deserialize('json', req_2.get_response(self.api)) ###self._remove_md_proxy_from_list(res['networks']) # tenant must see a single network self.assertEqual(len(res['networks']), 1) def test_create_network_name_exceeds_40_chars(self): name = 'this_is_a_network_whose_name_is_longer_than_40_chars' with self.network(name=name) as net: # Assert neutron name is not truncated self.assertEqual(net['network']['name'], name) def test_create_update_network_allow_multiple_addresses_spoofguard(self): # allow_multiple_addresses flag is True, first step is to check that # when port-security-allowed is false - spoofguard policy is not # created. # next step is to update port-security-allowed to true - spoofguard # policy is now created for this network. q_context = context.Context('', 'tenant_1') providernet_args = {psec.PORTSECURITY: False} cfg.CONF.set_default('allow_multiple_ip_addresses', True, 'nsxv') res = self._create_network(fmt='json', name='net-1', admin_state_up=True, providernet_args=providernet_args, arg_list=(psec.PORTSECURITY,)) network1 = self.deserialize(self.fmt, res) net1_id = network1['network']['id'] # not creating spoofguard policy self.assertIsNone(nsxv_db.get_spoofguard_policy_id(q_context.session, net1_id)) args = {'network': {psec.PORTSECURITY: True}} req = self.new_update_request('networks', args, network1['network']['id'], fmt='json') res = self.deserialize('json', req.get_response(self.api)) net1_id = res['network']['id'] # creating spoofguard policy self.assertIsNotNone(nsxv_db.get_spoofguard_policy_id( q_context.session, net1_id)) def test_update_network_with_admin_false(self): data = {'network': {'admin_state_up': False}} with self.network() as net: plugin = directory.get_plugin() self.assertRaises(NotImplementedError, plugin.update_network, context.get_admin_context(), net['network']['id'], data) def test_create_extend_dvs_provider_network(self): name = 'provider_net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, 'flat'), (pnet.PHYSICAL_NETWORK, 'dvs-uuid')] providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'dvs-uuid'} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_same_vlan_network_with_different_dvs(self): name = 'dvs-provider-net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, 'vlan'), (pnet.SEGMENTATION_ID, 43), (pnet.PHYSICAL_NETWORK, 'dvs-uuid-1')] providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 43, pnet.PHYSICAL_NETWORK: 'dvs-uuid-1'} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) expected_same_vlan = [(pnet.NETWORK_TYPE, 'vlan'), (pnet.SEGMENTATION_ID, 43), (pnet.PHYSICAL_NETWORK, 'dvs-uuid-2')] providernet_args_1 = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 43, pnet.PHYSICAL_NETWORK: 'dvs-uuid-2'} with self.network(name=name, providernet_args=providernet_args_1, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net1: for k, v in expected_same_vlan: self.assertEqual(net1['network'][k], v) def test_create_vlan_network_with_multiple_dvs(self): name = 'multi-dvs-vlan-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-3'} p = directory.get_plugin() with mock.patch.object( p, '_create_vlan_network_at_backend', # Return three netmorefs as side effect side_effect=[_uuid(), _uuid(), _uuid()]) as vlan_net_call: with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)): # _create_vlan_network_at_backend is expected to be called # three times since we have three DVS IDs in the physical # network attribute. self.assertEqual(3, vlan_net_call.call_count) def test_create_vlan_network_with_multiple_dvs_backend_failure(self): net_data = {'name': 'vlan-net', 'tenant_id': self._tenant_id, pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-3'} network = {'network': net_data} p = directory.get_plugin() with mock.patch.object( p, '_create_vlan_network_at_backend', # Return two successful netmorefs and fail on the backend # for the third netmoref creation as side effect. side_effect=[_uuid(), _uuid(), nsxv_exc.NsxPluginException(err_msg='')]): with mock.patch.object( p, '_delete_backend_network') as delete_net_call: self.assertRaises(nsxv_exc.NsxPluginException, p.create_network, context.get_admin_context(), network) # Two successfully created port groups should be rolled back # on the failure of third port group creation. self.assertEqual(2, delete_net_call.call_count) def test_create_vlan_network_with_multiple_dvs_not_found_failure(self): net_data = {'name': 'vlan-net', 'tenant_id': self._tenant_id, pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-3'} network = {'network': net_data} p = directory.get_plugin() with mock.patch.object( p, '_validate_provider_create', side_effect=[nsxv_exc.NsxResourceNotFound(res_id='dvs-2', res_name='dvs_id')]): with mock.patch.object( p, '_create_vlan_network_at_backend') as create_net_call: self.assertRaises(nsxv_exc.NsxResourceNotFound, p.create_network, context.get_admin_context(), network) # Verify no port group is created on the backend. self.assertEqual(0, create_net_call.call_count) def test_create_vlan_network_with_multiple_dvs_ignore_duplicate_dvs(self): name = 'multi-dvs-vlan-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-1'} p = directory.get_plugin() with mock.patch.object( p, '_create_vlan_network_at_backend', # Return two netmorefs as side effect side_effect=[_uuid(), _uuid()]) as vlan_net_call: with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)): # _create_vlan_network_at_backend is expected to be called # two times since we have only two unique DVS IDs in the # physical network attribute. self.assertEqual(2, vlan_net_call.call_count) def test_update_vlan_network_add_dvs(self): name = 'multi-dvs-vlan-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2'} p = directory.get_plugin() with mock.patch.object( p, '_create_vlan_network_at_backend', # Return 3 netmorefs as side effect side_effect=[_uuid(), _uuid(), _uuid()]) as vlan_net_call: with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net: # _create_vlan_network_at_backend is expected to be called # 2 times since we have 2 DVS IDs in the physical # network attribute. self.assertEqual(2, vlan_net_call.call_count) self.assertEqual('dvs-1, dvs-2', net['network'][pnet.PHYSICAL_NETWORK]) # Add another dvs data = {'network': {pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2, dvs-3'}} req = self.new_update_request('networks', data, net['network']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(3, vlan_net_call.call_count) self.assertEqual('dvs-1, dvs-2, dvs-3', res['network'][pnet.PHYSICAL_NETWORK]) # make sure it is updates also in the DB req = self.new_show_request('networks', net['network']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual('dvs-1, dvs-2, dvs-3', res['network'][pnet.PHYSICAL_NETWORK]) # update again - with no real change req = self.new_update_request('networks', data, net['network']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(3, vlan_net_call.call_count) self.assertEqual('dvs-1, dvs-2, dvs-3', res['network'][pnet.PHYSICAL_NETWORK]) def test_update_vlan_network_remove_dvs(self): name = 'multi-dvs-vlan-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 100, pnet.PHYSICAL_NETWORK: 'dvs-1, dvs-2'} p = directory.get_plugin() with mock.patch.object( p, '_create_vlan_network_at_backend', # Return 2 netmorefs as side effect side_effect=[_uuid(), _uuid()]) as vlan_net_call,\ mock.patch.object( p, '_delete_backend_network') as del_net: with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net: # _create_vlan_network_at_backend is expected to be called # 2 times since we have 2 DVS IDs in the physical # network attribute. self.assertEqual(2, vlan_net_call.call_count) self.assertEqual('dvs-1, dvs-2', net['network'][pnet.PHYSICAL_NETWORK]) # Keep only dvs-1 (Remove dvs-2) data = {'network': {pnet.PHYSICAL_NETWORK: 'dvs-1'}} req = self.new_update_request('networks', data, net['network']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(2, vlan_net_call.call_count) del_net.assert_called_once() self.assertEqual('dvs-1', res['network'][pnet.PHYSICAL_NETWORK]) # make sure it is updates also in the DB req = self.new_show_request('networks', net['network']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual('dvs-1', res['network'][pnet.PHYSICAL_NETWORK]) def test_get_dvs_ids_for_multiple_dvs_vlan_network(self): p = directory.get_plugin() default_dvs = 'fake_dvs_id' # If no DVS-ID is provided as part of physical network, return # global DVS-ID configured in nsx.ini physical_network = constants.ATTR_NOT_SPECIFIED self.assertEqual(['fake_dvs_id'], p._get_dvs_ids( physical_network, default_dvs)) # If DVS-IDs are provided as part of physical network as a comma # separated string, return them as a list of DVS-IDs. physical_network = 'dvs-1,dvs-2, dvs-3' expected_dvs_ids = ['dvs-1', 'dvs-2', 'dvs-3'] self.assertEqual(expected_dvs_ids, sorted(p._get_dvs_ids(physical_network, default_dvs))) # Ignore extra commas ',' in the physical_network attribute. physical_network = ',,,dvs-1,dvs-2,, dvs-3,' expected_dvs_ids = ['dvs-1', 'dvs-2', 'dvs-3'] self.assertEqual(expected_dvs_ids, sorted(p._get_dvs_ids(physical_network, default_dvs))) # Ignore duplicate DVS-IDs in the physical_network attribute. physical_network = ',,,dvs-1,dvs-2,, dvs-2,' expected_dvs_ids = ['dvs-1', 'dvs-2'] self.assertEqual(expected_dvs_ids, sorted(p._get_dvs_ids(physical_network, default_dvs))) def test_create_vxlan_with_tz_provider_network(self): name = 'provider_net_vxlan' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, 'vxlan'), (pnet.PHYSICAL_NETWORK, 'vdnscope-2')] providernet_args = {pnet.NETWORK_TYPE: 'vxlan', pnet.PHYSICAL_NETWORK: 'vdnscope-2'} with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_vxlan_with_tz_provider_network_not_found_fail(self): name = 'provider_net_vxlan' data = {'network': { 'name': name, 'tenant_id': self._tenant_id, pnet.SEGMENTATION_ID: constants.ATTR_NOT_SPECIFIED, pnet.NETWORK_TYPE: 'vxlan', pnet.PHYSICAL_NETWORK: 'vdnscope-2'}} p = directory.get_plugin() with mock.patch.object(p.nsx_v.vcns, 'validate_vdn_scope', side_effect=[False]): self.assertRaises(nsxv_exc.NsxResourceNotFound, p.create_network, context.get_admin_context(), data) def test_create_network_with_qos_no_dvs_fail(self): # network creation should fail if the qos policy parameter exists, # and no use_dvs_features configured data = {'network': { 'name': 'test-qos', 'tenant_id': self._tenant_id, 'qos_policy_id': _uuid()}} plugin = directory.get_plugin() with mock.patch.object(plugin, '_validate_qos_policy_id'): self.assertRaises(n_exc.InvalidInput, plugin.create_network, context.get_admin_context(), data) def test_update_network_with_qos_no_dvs_fail(self): # network update should fail if the qos policy parameter exists, # and no use_dvs_features configured data = {'network': {'qos_policy_id': _uuid()}} with self.network() as net: plugin = directory.get_plugin() self.assertRaises(n_exc.InvalidInput, plugin.update_network, context.get_admin_context(), net['network']['id'], data) @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') @mock.patch.object(qos_utils.NsxVQosRule, '_init_from_policy_id') def test_create_network_with_qos_policy(self, fake_init_from_policy, fake_dvs_update): # enable dvs features to allow policy with QOS plugin = self._get_core_plugin_with_dvs() ctx = context.get_admin_context() # Mark init as complete, as otherwise QoS won't be called plugin.init_is_complete = True # fake policy id policy_id = _uuid() data = {'network': { 'name': 'test-qos', 'tenant_id': self._tenant_id, 'qos_policy_id': policy_id, 'port_security_enabled': False, 'admin_state_up': False, 'shared': False }} with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_network_policy_id', return_value=policy_id),\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): # create the network - should succeed and translate the policy id net = plugin.create_network(ctx, data) self.assertEqual(policy_id, net[qos_consts.QOS_POLICY_ID]) fake_init_from_policy.assert_called_once_with(ctx, policy_id) self.assertTrue(fake_dvs_update.called) # Get network should also return the qos policy id net2 = plugin.get_network(ctx, net['id']) self.assertEqual(policy_id, net2[qos_consts.QOS_POLICY_ID]) @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') @mock.patch.object(qos_utils.NsxVQosRule, '_init_from_policy_id') def test_update_network_with_qos_policy(self, fake_init_from_policy, fake_dvs_update): # enable dvs features to allow policy with QOS plugin = self._get_core_plugin_with_dvs() ctx = context.get_admin_context() # create the network without qos policy data = {'network': { 'name': 'test-qos', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False }} net = plugin.create_network(ctx, data) # fake policy id policy_id = _uuid() data['network']['qos_policy_id'] = policy_id # update the network - should succeed and translate the policy id with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_network_policy_id', return_value=policy_id),\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): res = plugin.update_network(ctx, net['id'], data) self.assertEqual(policy_id, res[qos_consts.QOS_POLICY_ID]) fake_init_from_policy.assert_called_once_with(ctx, policy_id) self.assertTrue(fake_dvs_update.called) # Get network should also return the qos policy id net2 = plugin.get_network(ctx, net['id']) self.assertEqual(policy_id, net2[qos_consts.QOS_POLICY_ID]) def test_create_network_with_bad_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-qos', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': ['bad_hint'] }} self.assertRaises(n_exc.NeutronException, p.create_network, ctx, data) def test_create_network_with_az_hint(self): az_name = 'az7' set_az_in_config(az_name) p = directory.get_plugin() p._availability_zones_data = nsx_az.NsxVAvailabilityZones() ctx = context.get_admin_context() data = {'network': { 'name': 'test-qos', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': [az_name] }} # network creation should succeed net = p.create_network(ctx, data) self.assertEqual([az_name], net['availability_zone_hints']) # the availability zone is still empty until subnet creation self.assertEqual([], net['availability_zones']) def test_list_networks_with_fields(self): with self.network(name='net1'): req = self.new_list_request('networks', params='fields=name') res = self.deserialize(self.fmt, req.get_response(self.api)) self._remove_md_proxy_from_list(res['networks']) self.assertEqual(1, len(res['networks'])) net = res['networks'][0] self.assertEqual('net1', net['name']) self.assertNotIn('id', net) self.assertNotIn('tenant_id', net) self.assertNotIn('project_id', net) def test_list_networks_without_pk_in_fields_pagination_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_cannot_delete_md_net(self): if self.internal_net_id: req = self.new_delete_request('networks', self.internal_net_id) net_del_res = req.get_response(self.api) self.assertEqual(net_del_res.status_int, 400) class TestVnicIndex(NsxVPluginV2TestCase, test_vnic_index.VnicIndexDbTestCase): def test_update_port_twice_with_the_same_index(self): """Tests that updates which does not modify the port vnic index association do not produce any errors """ with self.subnet() as subnet: with self.port(subnet=subnet) as port: res = self._port_index_update(port['port']['id'], 2) self.assertEqual(2, res['port'][ext_vnic_idx.VNIC_INDEX]) res = self._port_index_update(port['port']['id'], 2) self.assertEqual(2, res['port'][ext_vnic_idx.VNIC_INDEX]) class TestPortsV2(NsxVPluginV2TestCase, test_plugin.TestPortsV2, test_bindings.PortBindingsTestCase, test_bindings.PortBindingsHostTestCaseMixin, test_bindings.PortBindingsVnicTestCaseMixin): VIF_TYPE = nsx_constants.VIF_TYPE_DVS HAS_PORT_FILTER = True def test_is_mac_in_use(self): ctx = context.get_admin_context() with self.port() as port: net_id = port['port']['network_id'] mac = port['port']['mac_address'] self.assertTrue(self.plugin._is_mac_in_use(ctx, net_id, mac)) mac2 = '00:22:00:44:00:66' # other mac, same network self.assertFalse(self.plugin._is_mac_in_use(ctx, net_id, mac2)) net_id2 = port['port']['id'] # other net uuid, same mac self.assertTrue(self.plugin._is_mac_in_use(ctx, net_id2, mac)) @with_no_dhcp_subnet def test_duplicate_mac_generation(self): return super(TestPortsV2, self).test_duplicate_mac_generation() def test_get_ports_count(self): with self.port(), self.port(), self.port(), self.port() as p: tenid = p['port']['tenant_id'] ctx = context.Context(user_id=None, tenant_id=tenid, is_admin=False) pl = directory.get_plugin() count = pl.get_ports_count(ctx, filters={'tenant_id': [tenid]}) # Each port above has subnet => we have an additional port # for DHCP self.assertEqual(8, count) @with_no_dhcp_subnet def test_requested_ips_only(self): return super(TestPortsV2, self).test_requested_ips_only() def test_delete_network_port_exists_owned_by_network_race(self): self.skipTest('Skip need to address in future') def test_create_port_with_too_many_fixed_ips(self): self.skipTest('DHCP only supports one binding') def test_create_port_invalid_fixed_ip_address_v6_pd_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_invalid_fixed_ip_address_v6_pd_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_invalid_subnet_v6_pd_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_mac_v6_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_invalid_fixed_ip_address_v6_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_excluding_ipv6_slaac_subnet_from_fixed_ips(self): self.skipTest('No DHCP v6 Support yet') def test_requested_subnet_id_v6_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_ip_allocation_for_ipv6_subnet_slaac_address_mode(self): self.skipTest('No DHCP v6 Support yet') def test_requested_fixed_ip_address_v6_slaac_router_iface(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_with_ipv6_slaac_subnet_in_fixed_ips(self): self.skipTest('No DHCP v6 Support yet') def test_requested_invalid_fixed_ip_address_v6_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_delete_port_with_ipv6_slaac_address(self): self.skipTest('No DHCP v6 Support yet') def test_ip_allocation_for_ipv6_2_subnet_slaac_mode(self): self.skipTest('No DHCP v6 Support yet') def _test_create_port_with_ipv6_subnet_in_fixed_ips(self, addr_mode, ipv6_pd=False): self.skipTest('No DHCP v6 Support yet') def test_update_port_with_new_ipv6_slaac_subnet_in_fixed_ips(self): self.skipTest('No DHCP v6 Support yet') def test_create_port_anticipating_allocation(self): self.skipTest('Multiple fixed ips on a port are not supported') @with_no_dhcp_subnet def test_list_ports(self): return super(TestPortsV2, self).test_list_ports() @with_no_dhcp_subnet def test_list_ports_public_network(self): return super(TestPortsV2, self).test_list_ports_public_network() @with_no_dhcp_subnet def test_list_ports_with_pagination_emulated(self): return super(TestPortsV2, self).test_list_ports_with_pagination_emulated() @with_no_dhcp_subnet def test_list_ports_with_pagination_native(self): return super(TestPortsV2, self).test_list_ports_with_pagination_native() @with_no_dhcp_subnet def test_list_ports_with_sort_emulated(self): return super(TestPortsV2, self).test_list_ports_with_sort_emulated() @with_no_dhcp_subnet def test_list_ports_with_sort_native(self): return super(TestPortsV2, self).test_list_ports_with_sort_native() def test_list_ports_filtered_by_security_groups(self): ctx = context.get_admin_context() with self.port() as port1, self.port() as port2: query_params = "security_groups=%s" % ( port1['port']['security_groups'][0]) ports_data = self._list('ports', query_params=query_params) self.assertEqual(set([port1['port']['id'], port2['port']['id']]), set([port['id'] for port in ports_data['ports']])) query_params = "security_groups=%s&id=%s" % ( port1['port']['security_groups'][0], port1['port']['id']) ports_data = self._list('ports', query_params=query_params) self.assertEqual(port1['port']['id'], ports_data['ports'][0]['id']) self.assertEqual(1, len(ports_data['ports'])) temp_sg = {'security_group': {'tenant_id': 'some_tenant', 'name': '', 'description': 's'}} sg_dbMixin = sg_db.SecurityGroupDbMixin() sg = sg_dbMixin.create_security_group(ctx, temp_sg) sg_dbMixin._delete_port_security_group_bindings( ctx, port2['port']['id']) sg_dbMixin._create_port_security_group_binding( ctx, port2['port']['id'], sg['id']) port2['port']['security_groups'][0] = sg['id'] query_params = "security_groups=%s" % ( port1['port']['security_groups'][0]) ports_data = self._list('ports', query_params=query_params) self.assertEqual(port1['port']['id'], ports_data['ports'][0]['id']) self.assertEqual(1, len(ports_data['ports'])) query_params = "security_groups=%s" % ( (port2['port']['security_groups'][0])) ports_data = self._list('ports', query_params=query_params) self.assertEqual(port2['port']['id'], ports_data['ports'][0]['id']) def test_update_port_delete_ip(self): # This test case overrides the default because the nsx plugin # implements port_security/security groups and it is not allowed # to remove an ip address from a port unless the security group # is first removed. with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [], secgrp.SECURITYGROUPS: []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) self.assertEqual(res['port']['fixed_ips'], data['port']['fixed_ips']) def _update_port_index(self, port_id, device_id, index): data = {'port': {'device_owner': 'compute:None', 'device_id': device_id, 'vnic_index': index}} req = self.new_update_request('ports', data, port_id) res = self.deserialize('json', req.get_response(self.api)) return res def _test_update_port_index_and_spoofguard( self, ip_version, subnet_cidr, port_ip, port_mac, ipv6_lla): q_context = context.Context('', 'tenant_1') device_id = _uuid() with self.subnet(ip_version=ip_version, enable_dhcp=(False if ip_version == 6 else True), cidr=subnet_cidr, gateway_ip=None) as subnet, \ mock.patch.object(edge_utils.EdgeManager, 'delete_dhcp_binding') as delete_dhcp_binding: fixed_ip_data = [{'ip_address': port_ip, 'subnet_id': subnet['subnet']['id']}] with self.port(subnet=subnet, device_id=device_id, mac_address=port_mac, fixed_ips=fixed_ip_data) as port: # set port as compute first res = self._update_port_index( port['port']['id'], device_id, None) self.assertIsNone(res['port']['vnic_index']) self.fc2.approve_assigned_addresses = ( mock.Mock().approve_assigned_addresses) self.fc2.publish_assigned_addresses = ( mock.Mock().publish_assigned_addresses) self.fc2.inactivate_vnic_assigned_addresses = ( mock.Mock().inactivate_vnic_assigned_addresses) vnic_index = 3 res = self._update_port_index( port['port']['id'], device_id, vnic_index) self.assertEqual(vnic_index, res['port']['vnic_index']) policy_id = nsxv_db.get_spoofguard_policy_id( q_context.session, port['port']['network_id']) vnic_id = '%s.%03d' % (device_id, vnic_index) # Verify that the spoofguard policy assigned and published expected_ips = [port_ip] if ipv6_lla: expected_ips.append(ipv6_lla) (self.fc2.approve_assigned_addresses. assert_called_once_with(policy_id, vnic_id, port_mac, expected_ips)) (self.fc2.publish_assigned_addresses. assert_called_once_with(policy_id, vnic_id)) # Updating the vnic_index to None implies the vnic does # no longer obtain the addresses associated with this port, # we need to inactivate previous addresses configurations for # this vnic in the context of this network spoofguard policy. res = self._update_port_index(port['port']['id'], '', None) (self.fc2.inactivate_vnic_assigned_addresses. assert_called_once_with(policy_id, vnic_id)) self.assertTrue(delete_dhcp_binding.called) def test_update_port_index(self): ip_version = 4 subnet_cidr = '10.0.0.0/24' port_ip = '10.0.0.8' port_mac = '00:00:00:00:00:02' ipv6_lla = None self._test_update_port_index_and_spoofguard( ip_version, subnet_cidr, port_ip, port_mac, ipv6_lla) def test_update_port_index_ipv6(self): ip_version = 6 subnet_cidr = 'ae80::/64' port_mac = '00:00:00:00:00:02' ipv6_lla = 'fe80::200:ff:fe00:2' port_ip = 'ae80::2' self._test_update_port_index_and_spoofguard( ip_version, subnet_cidr, port_ip, port_mac, ipv6_lla) def test_update_port_with_compute_device_owner(self): """ Test that DHCP binding is created when ports 'device_owner' is updated to compute, for example when attaching an interface to a instance with existing port. """ with self.port() as port: with mock.patch(PLUGIN_NAME + '._create_dhcp_static_binding'): update = {'port': {'device_owner'}} self.new_update_request('ports', update, port['port']['id']) @with_no_dhcp_subnet def test_ports_vif_host(self): return super(TestPortsV2, self).test_ports_vif_host() @with_no_dhcp_subnet def test_ports_vif_host_update(self): return super(TestPortsV2, self).test_ports_vif_host_update() @with_no_dhcp_subnet def test_ports_vif_details(self): return super(TestPortsV2, self).test_ports_vif_details() @with_no_dhcp_subnet def test_ports_vnic_type(self): return super(TestPortsV2, self).test_ports_vnic_type() @with_no_dhcp_subnet def test_ports_vnic_type_list(self): cfg.CONF.set_default('allow_overlapping_ips', True) vnic_arg = {portbindings.VNIC_TYPE: self.vnic_type} with self.subnet(enable_dhcp=False) as subnet,\ self.port(subnet, name='name1', arg_list=(portbindings.VNIC_TYPE,), **vnic_arg) as port1,\ self.port(subnet, name='name2') as port2,\ self.port(subnet, name='name3', arg_list=(portbindings.VNIC_TYPE,), **vnic_arg) as port3: self._test_list_resources('port', (port1, port2, port3), query_params='%s=%s' % ( portbindings.VNIC_TYPE, self.vnic_type)) def test_port_invalid_vnic_type(self): with self._test_create_direct_network(vlan_id=7) as network: kwargs = {portbindings.VNIC_TYPE: 'invalid', psec.PORTSECURITY: False} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) def test_range_allocation(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_requested_subnet_id_v4_and_v6(self): self.skipTest('Multiple fixed ips on a port are not supported') @with_no_dhcp_subnet def test_update_port_update_ip(self): return super(TestPortsV2, self).test_update_port_update_ip() @with_no_dhcp_subnet def test_update_port_update_ips(self): return super(TestPortsV2, self).test_update_port_update_ips() def test_update_port_update_ip_dhcp(self): #Test updating a port IP when the device owner is DHCP with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_owner=constants.DEVICE_OWNER_DHCP) as port: data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} plugin = directory.get_plugin() ctx = context.get_admin_context() with mock.patch.object( plugin.edge_manager, 'update_dhcp_edge_service') as update_dhcp: plugin.update_port(ctx, port['port']['id'], data) self.assertTrue(update_dhcp.called) def test_update_port_update_ip_compute(self): #Test that updating a port IP succeed if the device owner starts #with compute. owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'xxx' with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_id=_uuid(), device_owner=owner) as port: data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} plugin = directory.get_plugin() with mock.patch.object( plugin.edge_manager, 'delete_dhcp_binding') as delete_dhcp: with mock.patch.object( plugin.edge_manager, 'create_static_binding') as create_static: with mock.patch.object( plugin.edge_manager, 'create_dhcp_bindings') as create_dhcp: plugin.update_port(context.get_admin_context(), port['port']['id'], data) self.assertTrue(delete_dhcp.called) self.assertTrue(create_static.called) self.assertTrue(create_dhcp.called) def test_update_port_update_ip_and_owner_fail(self): #Test that updating a port IP and device owner at the same #transaction fails with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_owner='aaa') as port: data = {'port': {'device_owner': 'bbb', 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': "10.0.0.10"}]}} plugin = directory.get_plugin() self.assertRaises(n_exc.BadRequest, plugin.update_port, context.get_admin_context(), port['port']['id'], data) def test_update_port_update_ip_router(self): #Test that updating a port IP succeed if the device owner is a router owner = constants.DEVICE_OWNER_ROUTER_GW router_id = _uuid() old_ip = '10.0.0.3' new_ip = '10.0.0.10' with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_id=router_id, device_owner=owner, fixed_ips=[{'ip_address': old_ip}]) as port: data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} plugin = directory.get_plugin() ctx = context.get_admin_context() router_obj = router_driver.RouterSharedDriver(plugin) with mock.patch.object(plugin, '_find_router_driver', return_value=router_obj): with mock.patch.object( router_obj, 'update_router_interface_ip') as update_router: port_id = port['port']['id'] plugin.update_port(ctx, port_id, data) net_id = port['port']['network_id'] update_router.assert_called_once_with( ctx, router_id, port_id, net_id, old_ip, new_ip, "255.255.255.0") def test_update_port_update_ip_unattached_router(self): #Test that updating a port IP succeed if the device owner is a router #and the shared router is not attached to any edge yet owner = constants.DEVICE_OWNER_ROUTER_GW router_id = _uuid() old_ip = '10.0.0.3' new_ip = '10.0.0.10' with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_id=router_id, device_owner=owner, fixed_ips=[{'ip_address': old_ip}]) as port: data = {'port': {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} plugin = directory.get_plugin() ctx = context.get_admin_context() router_obj = router_driver.RouterSharedDriver(plugin) with mock.patch.object(plugin, '_find_router_driver', return_value=router_obj): # make sure the router will not be attached to an edge with mock.patch.object( edge_utils, 'get_router_edge_id', return_value=None): port_id = port['port']['id'] # The actual test here is that this call does not # raise an exception new_port = plugin.update_port(ctx, port_id, data) ips = new_port['fixed_ips'] self.assertEqual(len(ips), 1) self.assertEqual(ips[0]['ip_address'], new_ip) self.assertEqual(ips[0]['subnet_id'], subnet['subnet']['id']) def test_update_port_delete_ip_router(self): #Test that deleting a port IP succeed if the device owner is a router owner = constants.DEVICE_OWNER_ROUTER_GW router_id = _uuid() old_ip = '10.0.0.3' with self.subnet(enable_dhcp=False) as subnet: with self.port(subnet=subnet, device_id=router_id, device_owner=owner, fixed_ips=[{'ip_address': old_ip}]) as port: data = {'port': {'fixed_ips': []}} plugin = directory.get_plugin() ctx = context.get_admin_context() router_obj = router_driver.RouterSharedDriver(plugin) with mock.patch.object(plugin, '_find_router_driver', return_value=router_obj): with mock.patch.object( router_obj, 'update_router_interface_ip') as update_router: port_id = port['port']['id'] plugin.update_port(ctx, port_id, data) net_id = port['port']['network_id'] update_router.assert_called_once_with( ctx, router_id, port_id, net_id, old_ip, None, None) def test_update_port_add_additional_ip(self): """Test update of port with additional IP fails.""" with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_port_additional_ip(self): """Test that creation of port with additional IP fails.""" with self.subnet() as subnet: data = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_port_update_ip_address_only(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_requested_invalid_fixed_ips(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_requested_subnet_id_v4_and_v6_slaac(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_update_dhcp_port_with_exceeding_fixed_ips(self): self.skipTest('Updating dhcp port IP is not supported') def test_create_router_port_ipv4_and_ipv6_slaac_no_fixed_ips(self): self.skipTest('No DHCP v6 Support yet') def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): # This test should fail as the NSX-v plugin should cause Neutron to # return a 400 status code with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: super(TestPortsV2, self).\ test_create_port_with_multiple_ipv4_and_ipv6_subnets() self.assertEqual(ctx_manager.exception.code, 400) @with_no_dhcp_subnet def test_list_ports_for_network_owner(self): return super(TestPortsV2, self).test_list_ports_for_network_owner() def test_mac_duplication(self): # create 2 networks res = self._create_network(fmt=self.fmt, name='net1', admin_state_up=True) network1 = self.deserialize(self.fmt, res) net1_id = network1['network']['id'] res = self._create_network(fmt=self.fmt, name='net2', admin_state_up=True) network2 = self.deserialize(self.fmt, res) net2_id = network2['network']['id'] # create a port on the first network mac = '33:00:00:00:00:01' res = self._create_port(self.fmt, net_id=net1_id, arg_list=('mac_address',), mac_address=mac) port1 = self.deserialize('json', res) self.assertEqual(mac, port1['port']['mac_address']) # creating another port on a different network with the same mac # should fail res = self._create_port(self.fmt, net_id=net2_id, arg_list=('mac_address',), mac_address=mac) port2 = self.deserialize('json', res) self.assertEqual("MacAddressInUse", port2['NeutronError']['type']) def _test_create_direct_network(self, vlan_id=0): net_type = vlan_id and 'vlan' or 'flat' name = 'direct_net' providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'tzuuid'} if vlan_id: providernet_args[pnet.SEGMENTATION_ID] = vlan_id return self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) def test_create_port_vnic_direct(self): with self._test_create_direct_network(vlan_id=7) as network: # Check that port security conflicts kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, psec.PORTSECURITY: True} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) # Check that security group conflicts kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, 'security_groups': [ '4cd70774-cc67-4a87-9b39-7d1db38eb087'], psec.PORTSECURITY: False} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, webob.exc.HTTPBadRequest.code) # All is kosher so we can create the port kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE,), **kwargs) port = self.deserialize('json', res) self.assertEqual("direct", port['port'][portbindings.VNIC_TYPE]) def test_create_port_vnic_direct_invalid_network(self): with self.network(name='not vlan/flat') as net: kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, psec.PORTSECURITY: False} net_id = net['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_vnic_direct(self): with self._test_create_direct_network(vlan_id=7) as network: with self.subnet(network=network) as subnet: with self.port(subnet=subnet) as port: # need to do two updates as the update for port security # disabled requires that it can only change 2 items data = {'port': {psec.PORTSECURITY: False, 'security_groups': []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(portbindings.VNIC_NORMAL, res['port'][portbindings.VNIC_TYPE]) data = {'port': {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(portbindings.VNIC_DIRECT, res['port'][portbindings.VNIC_TYPE]) def test_delete_network_port_exists_owned_by_network_port_not_found(self): """Tests that we continue to gracefully delete the network even if a neutron:dhcp-owned port was deleted concurrently. """ res = self._create_network(fmt=self.fmt, name='net', admin_state_up=True) network = self.deserialize(self.fmt, res) network_id = network['network']['id'] self._create_port(self.fmt, network_id, device_owner=constants.DEVICE_OWNER_DHCP) # Raise PortNotFound when trying to delete the port to simulate a # concurrent delete race; note that we actually have to delete the port # "out of band" otherwise deleting the network will fail because of # constraints in the data model. plugin = directory.get_plugin() orig_delete = plugin.delete_port def fake_delete_port(context, id, force_delete_dhcp=False): # Delete the port for real from the database and then raise # PortNotFound to simulate the race. self.assertIsNone(orig_delete( context, id, force_delete_dhcp=force_delete_dhcp)) raise n_exc.PortNotFound(port_id=id) p = mock.patch.object(plugin, 'delete_port') mock_del_port = p.start() mock_del_port.side_effect = fake_delete_port req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_create_port_sec_disabled_and_provider_rule(self): with self.network() as network: kwargs = {'provider_security_groups': [uuidutils.generate_uuid()], 'port_security_enabled': False} res = self._create_port(self.fmt, network['network']['id'], arg_list=('provider_security_groups', 'port_security_enabled'), **kwargs) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_update_port_sec_disabled_and_provider_rule(self): with self.port() as port: with mock.patch( PLUGIN_NAME + '._get_provider_security_groups_on_port'): data = {'port': {'port_security_enabled': False}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual("PortSecurityAndIPRequiredForSecurityGroups", res['NeutronError']['type']) def test_port_add_to_spoofguard_allow_multiple_addresses(self): # allow_multiple_addresses flag is True, first step is to check that # when port-security-allowed is false - spoofguard policy is not # created. # next step is to update port-security-allowed to true - spoofguard # policy is now created for this network. providernet_args = {psec.PORTSECURITY: False} cfg.CONF.set_default('allow_multiple_ip_addresses', True, 'nsxv') res = self._create_network(fmt='json', name='net-1', admin_state_up=True, providernet_args=providernet_args, arg_list=(psec.PORTSECURITY,)) network1 = self.deserialize(self.fmt, res) net1_id = network1['network']['id'] with self.subnet(network=network1, cidr='10.0.0.0/24'): # create a compute port with port security address_pairs = [{'ip_address': '192.168.1.1'}] device_id = _uuid() vnic_index = 3 compute_port_create = self._create_port( 'json', net1_id, arg_list=( 'port_security_enabled', 'device_id', 'device_owner', 'allowed_address_pairs',), port_security_enabled=True, device_id=device_id, device_owner='compute:None', allowed_address_pairs=address_pairs) port = self.deserialize('json', compute_port_create) port = self._update_port_index( port['port']['id'], device_id, vnic_index) # Verify the port is added to the spoofguard policy with mock.patch.object( self.plugin, '_update_vnic_assigned_addresses') as \ update_approved_port: args = {'network': {psec.PORTSECURITY: True}} req = self.new_update_request('networks', args, net1_id, fmt='json') req.get_response(self.api) # The expected vnic-id format by NsxV update_approved_port.assert_called_once_with( mock.ANY, mock.ANY, '%s.%03d' % (device_id, vnic_index)) def test_port_add_to_spoofguard_allow_multiple_addresses_fail(self): # allow_multiple_addresses flag is True, first step is to check that # when port-security-allowed is false - spoofguard policy is not # created. # next step is to update port-security-allowed to true but the port # has CIDR defined as a address pair - action is aborted. # policy is now created for this network. providernet_args = {psec.PORTSECURITY: False} cfg.CONF.set_default('allow_multiple_ip_addresses', True, 'nsxv') res = self._create_network(fmt='json', name='net-1', admin_state_up=True, providernet_args=providernet_args, arg_list=(psec.PORTSECURITY,)) network1 = self.deserialize(self.fmt, res) net1_id = network1['network']['id'] with self.subnet(network=network1, cidr='10.0.0.0/24'): # create a compute port with port security address_pairs = [{'ip_address': '192.168.1.0/24'}] device_id = _uuid() vnic_index = 3 compute_port_create = self._create_port( 'json', net1_id, arg_list=( 'port_security_enabled', 'device_id', 'device_owner', 'allowed_address_pairs',), port_security_enabled=True, device_id=device_id, device_owner='compute:None', allowed_address_pairs=address_pairs) port = self.deserialize('json', compute_port_create) port = self._update_port_index( port['port']['id'], device_id, vnic_index) # Action is failed due to CIDR defined in the port. args = {'network': {psec.PORTSECURITY: True}} plugin = directory.get_plugin() self.assertRaises(n_exc.BadRequest, plugin.update_network, context.get_admin_context(), net1_id, args) class TestSubnetsV2(NsxVPluginV2TestCase, test_plugin.TestSubnetsV2): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(TestSubnetsV2, self).setUp() self.context = context.get_admin_context() def _test_subnet_update_ipv4_and_ipv6_pd_subnets(self, ra_addr_mode): self.skipTest('No DHCP v6 Support yet') def test__subnet_ipv6_not_supported(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'gateway': 'fe80::1', 'cidr': '2607:f0d0:1002:51::/64', 'ip_version': '6', 'tenant_id': network['network']['tenant_id']}} subnet_req = self.new_create_request('subnets', data) res = subnet_req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) def test_create_subnet_ipv6_gw_is_nw_start_addr(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_gw_is_nw_start_addr_canonicalize(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_gw_is_nw_end_addr(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_first_ip_owned_by_router(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_first_ip_owned_by_non_router(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_out_of_cidr_global(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_pd_gw_values(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_port_on_network(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): self.skipTest('No DHCP v6 Support yet') def test_delete_subnet_ipv6_slaac_port_exists(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_out_of_cidr_lla(self): self.skipTest('No DHCP v6 Support yet') def test_xxxa(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_only_ip_version_v6(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_ipv6_address_mode_fails(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_with_v6_allocation_pool(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_with_v6_pd_allocation_pool(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_ipv6_ra_mode_fails(self): self.skipTest('No DHCP v6 Support yet') def test_delete_subnet_ipv6_slaac_router_port_exists(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_inconsistent_ipv6_gatewayv4(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_ipv6_attributes_fails(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_ipv6_cannot_disable_dhcp(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_stateless(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_statefull(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_no_mode(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_ip_already_allocated(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_dhcpv6_stateless_with_ip_already_allocated(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnets_bulk_native_ipv6(self): self.skipTest('No DHCP v6 Support yet') def _create_subnet_bulk(self, fmt, number, net_id, name, ip_version=4, **kwargs): base_data = {'subnet': {'network_id': net_id, 'ip_version': ip_version, 'enable_dhcp': False, 'tenant_id': self._tenant_id}} if 'ipv6_mode' in kwargs: base_data['subnet']['ipv6_ra_mode'] = kwargs['ipv6_mode'] base_data['subnet']['ipv6_address_mode'] = kwargs['ipv6_mode'] # auto-generate cidrs as they should not overlap base_cidr = "10.0.%s.0/24" if ip_version == constants.IP_VERSION_6: base_cidr = "fd%s::/64" # auto-generate cidrs as they should not overlap overrides = dict((k, v) for (k, v) in zip(range(number), [{'cidr': base_cidr % num} for num in range(number)])) kwargs.update({'override': overrides}) return self._create_bulk(fmt, number, 'subnet', base_data, **kwargs) @with_no_dhcp_subnet def test_create_subnet_nonzero_cidr(self): return super(TestSubnetsV2, self).test_create_subnet_nonzero_cidr() def test_create_subnet_ipv6_attributes(self): # Expected to fail for now as we don't support IPv6 for NSXv cidr = "fe80::/80" with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: self._test_create_subnet(cidr=cidr) self.assertEqual(ctx_manager.exception.code, 400) def test_create_subnet_with_different_dhcp_server(self): self.mock_create_dhcp_service.stop() name = 'dvs-provider-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 43, pnet.PHYSICAL_NETWORK: 'dvs-uuid'} with self.network(name=name, do_delete=False, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net: self._test_create_subnet(network=net, cidr='10.0.0.0/24') dhcp_router_id = (vcns_const.DHCP_EDGE_PREFIX + net['network']['id'])[:36] dhcp_server_id = nsxv_db.get_nsxv_router_binding( self.context.session, dhcp_router_id)['edge_id'] providernet_args_1 = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 43, pnet.PHYSICAL_NETWORK: 'dvs-uuid-1'} with self.network(name=name, do_delete=False, providernet_args=providernet_args_1, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net1: self._test_create_subnet(network=net1, cidr='10.0.1.0/24') router_id = (vcns_const.DHCP_EDGE_PREFIX + net1['network']['id'])[:36] dhcp_server_id_1 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id)['edge_id'] self.assertNotEqual(dhcp_server_id, dhcp_server_id_1) def test_create_subnet_with_different_dhcp_by_flat_net(self): self.mock_create_dhcp_service.stop() name = 'flat-net' providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'dvs-uuid'} with self.network(name=name, do_delete=False, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: self._test_create_subnet(network=net, cidr='10.0.0.0/24') dhcp_router_id = (vcns_const.DHCP_EDGE_PREFIX + net['network']['id'])[:36] dhcp_server_id = nsxv_db.get_nsxv_router_binding( self.context.session, dhcp_router_id)['edge_id'] providernet_args_1 = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'dvs-uuid'} with self.network(name=name, do_delete=False, providernet_args=providernet_args_1, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net1: self._test_create_subnet(network=net1, cidr='10.0.1.0/24') router_id = (vcns_const.DHCP_EDGE_PREFIX + net1['network']['id'])[:36] dhcp_server_id_1 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id)['edge_id'] self.assertNotEqual(dhcp_server_id, dhcp_server_id_1) def test_create_subnets_with_different_tenants_non_shared(self): cfg.CONF.set_override('share_edges_between_tenants', False, group="nsxv") self.mock_create_dhcp_service.stop() # create 2 networks with different tenants with self.network(name='net1', tenant_id='fake1') as net1,\ self.network(name='net2', tenant_id='fake2') as net2: # create 2 non-overlapping subnets self._test_create_subnet(network=net1, cidr='10.0.0.0/24') router_id1 = (vcns_const.DHCP_EDGE_PREFIX + net1['network']['id'])[:36] edge1 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id1)['edge_id'] self._test_create_subnet(network=net2, cidr='20.0.0.0/24') router_id2 = (vcns_const.DHCP_EDGE_PREFIX + net2['network']['id'])[:36] edge2 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id2)['edge_id'] # make sure we have 2 separate dhcp edges self.assertNotEqual(edge1, edge2) def test_create_subnets_with_different_tenants_shared(self): cfg.CONF.set_override('share_edges_between_tenants', True, group="nsxv") self.mock_create_dhcp_service.stop() # create 2 networks with different tenants with self.network(name='net1', tenant_id='fake1') as net1,\ self.network(name='net2', tenant_id='fake2') as net2: # create 2 non-overlapping subnets self._test_create_subnet(network=net1, cidr='10.0.0.0/24') router_id1 = (vcns_const.DHCP_EDGE_PREFIX + net1['network']['id'])[:36] edge1 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id1)['edge_id'] self._test_create_subnet(network=net2, cidr='20.0.0.0/24') router_id2 = (vcns_const.DHCP_EDGE_PREFIX + net2['network']['id'])[:36] edge2 = nsxv_db.get_nsxv_router_binding( self.context.session, router_id2)['edge_id'] # make sure we have both networks on the same dhcp edges self.assertEqual(edge1, edge2) def test_create_subnet_ipv6_slaac_with_db_reference_error(self): self.skipTest('Currently not supported') def test_create_subnet_ipv6_slaac_with_port_not_found(self): self.skipTest('Currently not supported') def test_bulk_create_subnet_ipv6_auto_addr_with_port_on_network(self): self.skipTest('Currently not supported') def test_create_subnet_ipv6_gw_values(self): # This test should fail with response code 400 as IPv6 subnets with # DHCP are not supported by this plugin with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: super(TestSubnetsV2, self).test_create_subnet_ipv6_gw_values() self.assertEqual(ctx_manager.exception.code, 400) def test_create_subnet_only_ip_version_v6_old(self): self.skipTest('Currently not supported') def test_create_subnet_reserved_network(self): self.mock_create_dhcp_service.stop() name = 'overlap-reserved-net' providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: 'dvs-uuid'} with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.network(name=name, do_delete=False, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) as net: self._test_create_subnet(network=net, cidr='169.254.128.128/25') self.assertEqual(ctx_manager.exception.code, 400) def test_cannot_delete_md_subnet(self): if self.internal_net_id: query_params = "network_id=%s" % self.internal_net_id res = self._list('subnets', neutron_context=self.context, query_params=query_params) internal_sub = res['subnets'][0]['id'] req = self.new_delete_request('subnets', internal_sub) net_del_res = req.get_response(self.api) self.assertEqual(net_del_res.status_int, 400) class TestSubnetPoolsV2(NsxVPluginV2TestCase, test_plugin.TestSubnetsV2): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(TestSubnetPoolsV2, self).setUp() self.context = context.get_admin_context() def test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets(self): self.skipTest('No DHCP v6 Support yet') def test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_gw_is_nw_start_addr(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_gw_is_nw_start_addr_canonicalize(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_gw_is_nw_end_addr(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_first_ip_owned_by_router(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_first_ip_owned_by_non_router(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_out_of_cidr_global(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_stateless(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_V6_pd_slaac(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_ip_already_allocated(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_dhcpv6_stateless_with_ip_already_allocated(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): self.skipTest('Not supported') def test_create_subnet_ipv6_gw_values(self): self.skipTest('Not supported') def test_create_subnet_ipv6_out_of_cidr_lla(self): self.skipTest('Not supported') def test_create_subnet_ipv6_pd_gw_values(self): self.skipTest('Not supported') def test_create_subnet_ipv6_slaac_with_db_reference_error(self): self.skipTest('Not supported') def test_create_subnet_ipv6_slaac_with_port_not_found(self): self.skipTest('Not supported') def test_bulk_create_subnet_ipv6_auto_addr_with_port_on_network(self): self.skipTest('Currently not supported') def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): self.skipTest('Not supported') def test_create_subnet_ipv6_slaac_with_port_on_network(self): self.skipTest('Not supported') def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self): self.skipTest('Not supported') def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self): self.skipTest('Not supported') def test_create_subnet_only_ip_version_v6(self): self.skipTest('Not supported') def test_create_subnet_with_v6_allocation_pool(self): self.skipTest('Not supported') def test_create_subnet_with_v6_pd_allocation_pool(self): self.skipTest('Not supported') def test_delete_subnet_ipv6_slaac_port_exists(self): self.skipTest('Not supported') def test_delete_subnet_ipv6_slaac_router_port_exists(self): self.skipTest('Not supported') def test_update_subnet_inconsistent_ipv6_gatewayv4(self): self.skipTest('Not supported') def test_update_subnet_inconsistent_ipv6_hostroute_dst_v4(self): self.skipTest('Not supported') def test_update_subnet_inconsistent_ipv6_hostroute_np_v4(self): self.skipTest('Not supported') def test_update_subnet_ipv6_address_mode_fails(self): self.skipTest('Not supported') def test_update_subnet_ipv6_attributes_fails(self): self.skipTest('Not supported') def test_update_subnet_ipv6_cannot_disable_dhcp(self): self.skipTest('Not supported') def test_update_subnet_ipv6_ra_mode_fails(self): self.skipTest('Not supported') def test_create_subnet_only_ip_version_v6_old(self): self.skipTest('Currently not supported') def test_create_subnets_bulk_native_ipv6(self): self.skipTest('No DHCP v6 Support yet') class TestBasicGet(test_plugin.TestBasicGet, NsxVPluginV2TestCase): pass class TestV2HTTPResponse(test_plugin.TestV2HTTPResponse, NsxVPluginV2TestCase): pass class TestL3ExtensionManager(object): def get_resources(self): # Simulate extension of L3 attribute map l3.L3().update_attributes_map( l3_egm_apidef.RESOURCE_ATTRIBUTE_MAP) l3.L3().update_attributes_map( dvr_apidef.RESOURCE_ATTRIBUTE_MAP) l3.L3().update_attributes_map( router_type.EXTENDED_ATTRIBUTES_2_0) l3.L3().update_attributes_map( router_size.EXTENDED_ATTRIBUTES_2_0) l3.L3().update_attributes_map( raz_apidef.RESOURCE_ATTRIBUTE_MAP) l3.L3().update_attributes_map( l3fav_apidef.RESOURCE_ATTRIBUTE_MAP) return (l3.L3.get_resources() + address_scope.Address_scope.get_resources()) def get_actions(self): return [] def get_request_extensions(self): return [] class L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxVPluginV2TestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): cfg.CONF.set_override('task_status_check_interval', 200, group="nsxv") cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) ext_mgr = ext_mgr or TestL3ExtensionManager() super(L3NatTest, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.plugin_instance = directory.get_plugin() self._plugin_name = "%s.%s" % ( self.plugin_instance.__module__, self.plugin_instance.__class__.__name__) self._plugin_class = self.plugin_instance.__class__ def tearDown(self): plugin = directory.get_plugin() _manager = plugin.nsx_v.task_manager # wait max ~10 seconds for all tasks to be finished for i in range(100): if not _manager.has_pending_task(): break greenthread.sleep(0.1) if _manager.has_pending_task(): _manager.show_pending_tasks() raise Exception(_("Tasks not completed")) _manager.stop() # Ensure the manager thread has been stopped self.assertIsNone(_manager._thread) super(L3NatTest, self).tearDown() def _create_l3_ext_network(self, vlan_id=None): name = 'l3_ext_net' return self.network(name=name, router__external=True) def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, set_context=False, arg_list=None, **kwargs): tenant_id = tenant_id or _uuid() data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up: data['router']['admin_state_up'] = admin_state_up for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): # Arg must be present and not empty if kwargs.get(arg): data['router'][arg] = kwargs[arg] router_req = self.new_create_request('routers', data, fmt) if set_context and tenant_id: # create a specific auth context for this request router_req.environ['neutron.context'] = context.Context( '', tenant_id) return router_req.get_response(self.ext_api) def _make_router(self, fmt, tenant_id, name=None, admin_state_up=None, external_gateway_info=None, set_context=False, arg_list=None, **kwargs): if external_gateway_info: arg_list = ('external_gateway_info', ) + (arg_list or ()) res = self._create_router(fmt, tenant_id, name, admin_state_up, set_context, arg_list=arg_list, external_gateway_info=external_gateway_info, **kwargs) return self.deserialize(fmt, res) @contextlib.contextmanager def router(self, name=None, admin_state_up=True, fmt=None, tenant_id=None, external_gateway_info=None, set_context=False, **kwargs): # avoid name duplication of edge if not name: name = _uuid() router = self._make_router(fmt or self.fmt, tenant_id, name, admin_state_up, external_gateway_info, set_context, **kwargs) yield router def _recursive_sort_list(self, lst): sorted_list = [] for ele in lst: if isinstance(ele, list): sorted_list.append(self._recursive_sort_list(ele)) elif isinstance(ele, dict): sorted_list.append(self._recursive_sort_dict(ele)) else: sorted_list.append(ele) return sorted(sorted_list, key=helpers.safe_sort_key) def _recursive_sort_dict(self, dct): sorted_dict = {} for k, v in dct.items(): if isinstance(v, list): sorted_dict[k] = self._recursive_sort_list(v) elif isinstance(v, dict): sorted_dict[k] = self._recursive_sort_dict(v) else: sorted_dict[k] = v return sorted_dict def _update_router_enable_snat(self, router_id, network_id, enable_snat): return self._update('routers', router_id, {'router': {'external_gateway_info': {'network_id': network_id, 'enable_snat': enable_snat}}}) def test_floatingip_association_on_unowned_router(self): self.skipTest("Currently no support in plugin for this") def test_router_add_gateway_no_subnet(self): self.skipTest('No support for no subnet gateway set') def test_floatingip_create_different_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_router_add_interface_multiple_ipv4_subnet_port_returns_400(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_router_add_interface_multiple_ipv6_subnet_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_floatingip_update_different_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_create_multiple_floatingips_same_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def _set_net_external(self, net_id): self._update('networks', net_id, {'network': {extnet_apidef.EXTERNAL: True}}) def _add_external_gateway_to_router(self, router_id, network_id, expected_code=webob.exc.HTTPOk.code, neutron_context=None, ext_ips=None): ext_ips = ext_ips or [] body = {'router': {'external_gateway_info': {'network_id': network_id}}} if ext_ips: body['router']['external_gateway_info'][ 'external_fixed_ips'] = ext_ips return self._update('routers', router_id, body, expected_code=expected_code, neutron_context=neutron_context) def test_router_add_gateway_no_subnet_forbidden(self): with self.router() as r: with self.network() as n: self._set_net_external(n['network']['id']) self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], expected_code=webob.exc.HTTPBadRequest.code) class L3NatTestCaseBase(test_l3_plugin.L3NatTestCaseMixin): def test_create_floatingip_with_specific_ip(self): with self.subnet(cidr='10.0.0.0/24', enable_dhcp=False) as s: network_id = s['subnet']['network_id'] self._set_net_external(network_id) fp = self._make_floatingip(self.fmt, network_id, floating_ip='10.0.0.10') self.assertEqual('10.0.0.10', fp['floatingip']['floating_ip_address']) def test_floatingip_same_external_and_internal(self): # Select router with subnet's gateway_ip for floatingip when # routers connected to same subnet and external network. with self.subnet(cidr="10.0.0.0/24", enable_dhcp=False) as exs,\ self.subnet(cidr="12.0.0.0/24", gateway_ip="12.0.0.50", enable_dhcp=False) as ins: network_ex_id = exs['subnet']['network_id'] self._set_net_external(network_ex_id) r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] with self.router() as r1,\ self.router() as r2,\ self.port(subnet=ins, fixed_ips=r2i_fixed_ips) as r2i_port: self._add_external_gateway_to_router( r1['router']['id'], network_ex_id) self._router_interface_action('add', r2['router']['id'], None, r2i_port['port']['id']) self._router_interface_action('add', r1['router']['id'], ins['subnet']['id'], None) self._add_external_gateway_to_router( r2['router']['id'], network_ex_id) with self.port(subnet=ins, fixed_ips=[{'ip_address': '12.0.0.8'}] ) as private_port: fp = self._make_floatingip(self.fmt, network_ex_id, private_port['port']['id']) self.assertEqual(r1['router']['id'], fp['floatingip']['router_id']) def test_floatingip_multi_external_one_internal(self): with self.subnet(cidr="10.0.0.0/24", enable_dhcp=False) as exs1,\ self.subnet(cidr="11.0.0.0/24", enable_dhcp=False) as exs2,\ self.subnet(cidr="12.0.0.0/24", enable_dhcp=False) as ins1: network_ex_id1 = exs1['subnet']['network_id'] network_ex_id2 = exs2['subnet']['network_id'] self._set_net_external(network_ex_id1) self._set_net_external(network_ex_id2) r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] with self.router() as r1,\ self.router() as r2,\ self.port(subnet=ins1, fixed_ips=r2i_fixed_ips) as r2i_port: self._add_external_gateway_to_router( r1['router']['id'], network_ex_id1) self._router_interface_action('add', r1['router']['id'], ins1['subnet']['id'], None) self._add_external_gateway_to_router( r2['router']['id'], network_ex_id2) self._router_interface_action('add', r2['router']['id'], None, r2i_port['port']['id']) with self.port(subnet=ins1, fixed_ips=[{'ip_address': '12.0.0.3'}] ) as private_port: fp1 = self._make_floatingip(self.fmt, network_ex_id1, private_port['port']['id']) fp2 = self._make_floatingip(self.fmt, network_ex_id2, private_port['port']['id']) self.assertEqual(fp1['floatingip']['router_id'], r1['router']['id']) self.assertEqual(fp2['floatingip']['router_id'], r2['router']['id']) def _get_md_proxy_fw_rules(self): if not self.with_md_proxy: return [] return md_proxy.get_router_fw_rules() @mock.patch.object(edge_utils, "update_firewall") def test_router_set_gateway_with_nosnat(self, mock): expected_fw = [{'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': [], 'destination_ip_address': []} ] + self._get_md_proxy_fw_rules() nosnat_fw = [{'action': 'allow', 'enabled': True, 'name': 'No SNAT Rule', 'source_vnic_groups': ["external"], 'destination_ip_address': []}] with self.router() as r1,\ self.subnet() as ext_subnet,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2: self._set_net_external(ext_subnet['subnet']['network_id']) self._router_interface_action( 'add', r1['router']['id'], s1['subnet']['id'], None) expected_fw[0]['source_ip_address'] = ['11.0.0.0/24'] expected_fw[0]['destination_ip_address'] = ['11.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) self._add_external_gateway_to_router( r1['router']['id'], ext_subnet['subnet']['network_id']) fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) self._update_router_enable_snat( r1['router']['id'], ext_subnet['subnet']['network_id'], False) nosnat_fw[0]['destination_ip_address'] = ['11.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw + nosnat_fw), self._recursive_sort_list(fw_rules)) self._router_interface_action('add', r1['router']['id'], s2['subnet']['id'], None) expected_fw[0]['source_ip_address'] = ['12.0.0.0/24', '11.0.0.0/24'] expected_fw[0]['destination_ip_address'] = ['12.0.0.0/24', '11.0.0.0/24'] nosnat_fw[0]['destination_ip_address'] = ['11.0.0.0/24', '12.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw + nosnat_fw), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) expected_fw[0]['source_ip_address'] = ['12.0.0.0/24'] expected_fw[0]['destination_ip_address'] = ['12.0.0.0/24'] nosnat_fw[0]['destination_ip_address'] = ['12.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw + nosnat_fw), self._recursive_sort_list(fw_rules)) self._update_router_enable_snat( r1['router']['id'], ext_subnet['subnet']['network_id'], True) fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r1['router']['id'], s2['subnet']['id'], None) self._remove_external_gateway_from_router( r1['router']['id'], ext_subnet['subnet']['network_id']) def test_router_add_interface_port_bad_tenant_returns_404(self): self.skipTest('TBD') def test_router_add_interface_subnet_with_bad_tenant_returns_404(self): self.skipTest('TBD') def test__notify_gateway_port_ip_changed(self): self.skipTest('not supported') def test_router_add_interface_multiple_ipv6_subnets_same_net(self): """Test router-interface-add for multiple ipv6 subnets on a network. Verify that adding multiple ipv6 subnets from the same network to a router places them all on the same router interface. """ with self.router() as r, self.network() as n: with self.subnet( network=n, cidr='fd00::1/64', enable_dhcp=False, ip_version=6) as s1, self.subnet( network=n, cidr='fd01::1/64', ip_version=6, enable_dhcp=False) as s2: body = self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) pid1 = body['port_id'] body = self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) pid2 = body['port_id'] self.assertEqual(pid1, pid2) port = self._show('ports', pid1) self.assertEqual(2, len(port['port']['fixed_ips'])) port_subnet_ids = [fip['subnet_id'] for fip in port['port']['fixed_ips']] self.assertIn(s1['subnet']['id'], port_subnet_ids) self.assertIn(s2['subnet']['id'], port_subnet_ids) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_router_add_interface_ipv6_port_existing_network_returns_400(self): """Ensure unique IPv6 router ports per network id. Adding a router port containing one or more IPv6 subnets with the same network id as an existing router port should fail. This is so there is no ambiguity regarding on which port to add an IPv6 subnet when executing router-interface-add with a subnet and no port. """ with self.network() as n, self.router() as r: with self.subnet(network=n, cidr='fd00::/64', ip_version=6, enable_dhcp=False) as s1, ( self.subnet(network=n, cidr='fd01::/64', ip_version=6, enable_dhcp=False)) as s2: with self.port(subnet=s1) as p: self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) exp_code = webob.exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], None, p['port']['id'], expected_code=exp_code) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) def test_subnet_dhcp_metadata_with_update(self): self.plugin_instance.metadata_proxy_handler = mock.Mock() with self.subnet(cidr="10.0.0.0/24", enable_dhcp=True) as s1: subnet_id = s1['subnet']['id'] is_dhcp_meta = self.plugin_instance.is_dhcp_metadata( context.get_admin_context(), subnet_id) self.assertTrue(is_dhcp_meta) port_data = {'port': {'tenant_id': s1['subnet']['tenant_id'], 'network_id': s1['subnet']['network_id'], 'device_owner': 'compute:None'}} req = self.new_create_request( 'ports', port_data).get_response(self.api) port_req = self.deserialize(self.fmt, req) subnet_data = {'subnet': {'enable_dhcp': False}} self.new_update_request( 'subnets', subnet_data, s1['subnet']['id']).get_response(self.api) is_dhcp_meta = self.plugin_instance.is_dhcp_metadata( context.get_admin_context(), subnet_id) self.assertFalse(is_dhcp_meta) self.new_delete_request('ports', port_req['port']['id']) def test_router_add_gateway_notifications(self): with self.router() as r,\ self._create_l3_ext_network() as ext_net,\ self.subnet(network=ext_net): with mock.patch.object(registry, 'publish') as publish: self._add_external_gateway_to_router( r['router']['id'], ext_net['network']['id']) expected = [mock.call( resources.ROUTER_GATEWAY, events.AFTER_CREATE, mock.ANY, payload=mock.ANY)] publish.assert_has_calls(expected) def test_router_delete_ipv6_slaac_subnet_inuse_returns_409(self): self.skipTest('No DHCP v6 Support yet') def test_router_delete_dhcpv6_stateless_subnet_inuse_returns_409(self): self.skipTest('No DHCP v6 Support yet') def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self): self.skipTest('No DHCP v6 Support yet') def test_router_remove_ipv6_subnet_from_interface(self): self.skipTest('No DHCP v6 Support yet') def test_router_update_gateway_add_multiple_prefixes_ipv6(self): self.skipTest('No DHCP v6 Support yet') def test_router_concurrent_delete_upon_subnet_create(self): self.skipTest('No DHCP v6 Support yet') def test_router_update_gateway_upon_subnet_create_ipv6(self): self.skipTest('No DHCP v6 Support yet') def test_router_update_gateway_upon_subnet_create_max_ips_ipv6(self): self.skipTest('No DHCP v6 Support yet') def test_floatingip_via_router_interface_returns_201(self): self.skipTest('not supported') def test_floatingip_via_router_interface_returns_404(self): self.skipTest('not supported') def test_floatingip_update_subnet_gateway_disabled(self): self.skipTest('not supported') class IPv6ExpectedFailuresTestMixin(object): def test_router_add_interface_ipv6_subnet(self): self.skipTest('Not supported') def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self): # This returns a 400 too, but as an exception is raised the response # code need to be asserted differently with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: super(IPv6ExpectedFailuresTestMixin, self).\ test_router_add_iface_ipv6_ext_ra_subnet_returns_400() self.assertEqual(ctx_manager.exception.code, 400) def test_router_add_gateway_multiple_subnets_ipv6(self): self.skipTest('not supported') class TestExclusiveRouterTestCase(L3NatTest, L3NatTestCaseBase, test_l3_plugin.L3NatDBIntTestCase, IPv6ExpectedFailuresTestMixin, NsxVPluginV2TestCase, test_address_scope.AddressScopeTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(TestExclusiveRouterTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.plugin_instance.nsx_v.is_subnet_in_use = mock.Mock() self.plugin_instance.nsx_v.is_subnet_in_use.return_value = False self._default_tenant_id = self._tenant_id self._router_tenant_id = 'test-router-tenant' def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, set_context=False, arg_list=None, **kwargs): tenant_id = tenant_id or _uuid() data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up: data['router']['admin_state_up'] = admin_state_up for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): # Arg must be present and not empty if arg in kwargs and kwargs[arg]: data['router'][arg] = kwargs[arg] data['router']['router_type'] = kwargs.get('router_type', 'exclusive') router_req = self.new_create_request('routers', data, fmt) if set_context and tenant_id: # create a specific auth context for this request router_req.environ['neutron.context'] = context.Context( '', tenant_id) return router_req.get_response(self.ext_api) def _test_create_l3_ext_network(self, vlan_id=0): name = 'l3_ext_net' expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (extnet_apidef.EXTERNAL, True)] with self._create_l3_ext_network(vlan_id) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_router_fail_at_the_backend(self): p = directory.get_plugin() edge_manager = p.edge_manager with mock.patch.object(edge_manager, 'create_lrouter', side_effect=[n_exc.NeutronException]): router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'router_type': 'exclusive'}} self.assertRaises(n_exc.NeutronException, p.create_router, context.get_admin_context(), router) self._test_list_resources('router', ()) def test_create_l3_ext_network_with_dhcp(self): with self._create_l3_ext_network() as net: with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: with self.subnet(network=net, enable_dhcp=True): self.assertEqual(ctx_manager.exception.code, 400) def test_create_l3_ext_network_without_vlan(self): self._test_create_l3_ext_network() def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None, validate_ext_gw=False, router_ctx=None): tenant_id = self._router_tenant_id if router_ctx else self._tenant_id with self._create_l3_ext_network(vlan_id) as net: with self.subnet(network=net, enable_dhcp=False) as s: data = {'router': {'tenant_id': tenant_id}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} router_req = self.new_create_request( 'routers', data, self.fmt, context=router_ctx) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self.assertEqual( s['subnet']['network_id'], (router['router']['external_gateway_info'] ['network_id'])) if validate_ext_gw: pass def test_router_create_with_gwinfo_and_l3_ext_net(self): self._test_router_create_with_gwinfo_and_l3_ext_net() def test_router_create_with_gwinfo_and_l3_ext_net_with_vlan(self): self._test_router_create_with_gwinfo_and_l3_ext_net(444) def test_router_create_with_gwinfo_and_l3_ext_net_with_non_admin(self): ctx = context.Context(user_id=None, tenant_id=self._router_tenant_id, is_admin=False) self._test_router_create_with_gwinfo_and_l3_ext_net(router_ctx=ctx) def test_router_create_with_different_sizes(self): data = {'router': { 'tenant_id': 'whatever', 'name': 'test_router', 'router_type': 'exclusive'}} for size in ['compact', 'large', 'xlarge', 'quadlarge']: data['router']['router_size'] = size router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self.assertEqual(size, router['router']['router_size']) def test_router_create_overriding_default_edge_size(self): data = {'router': { 'tenant_id': 'whatever', 'name': 'test_router', 'router_type': 'exclusive'}} cfg.CONF.set_override('exclusive_router_appliance_size', 'xlarge', group='nsxv') router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self.assertEqual('xlarge', router['router']['router_size']) def test_router_add_gateway_invalid_network_returns_404(self): # NOTE(salv-orlando): This unit test has been overridden # as the nsx plugin support the ext_gw_mode extension # which mandates an uuid for the external network identifier with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], uuidutils.generate_uuid(), expected_code=webob.exc.HTTPNotFound.code) def test_router_rename(self): with self.router(name='old_name') as r: with mock.patch.object(edge_appliance_driver.EdgeApplianceDriver, 'rename_edge') as edge_rename: new_name = 'new_name' router_id = r['router']['id'] # get the edge of this router plugin = directory.get_plugin() router_obj = ex_router_driver.RouterExclusiveDriver(plugin) ctx = context.get_admin_context() edge_id = router_obj._get_edge_id_or_raise(ctx, router_id) # update the name body = self._update('routers', router_id, {'router': {'name': new_name}}) self.assertEqual(new_name, body['router']['name']) edge_rename.assert_called_once_with( edge_id, new_name + '-' + router_id) def test_router_resize(self): with self.router() as r: with mock.patch.object(edge_appliance_driver.EdgeApplianceDriver, 'resize_edge') as edge_resize: new_size = 'large' router_id = r['router']['id'] # get the edge of this router plugin = directory.get_plugin() router_obj = ex_router_driver.RouterExclusiveDriver(plugin) ctx = context.get_admin_context() edge_id = router_obj._get_edge_id_or_raise(ctx, router_id) # update the router size body = self._update('routers', router_id, {'router': {'router_size': new_size}}) self.assertEqual(new_size, body['router']['router_size']) edge_resize.assert_called_once_with(edge_id, new_size) def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None, validate_ext_gw=False, distributed=False, router_ctx=None): if router_ctx: self._tenant_id = self._router_tenant_id with self.router( arg_list=('distributed',), distributed=distributed, set_context=True, tenant_id=self._tenant_id) as r: self._tenant_id = self._default_tenant_id with self.subnet() as s1: with self._create_l3_ext_network(vlan_id) as net: with self.subnet(network=net, enable_dhcp=False) as s2: self._set_net_external(s1['subnet']['network_id']) try: self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id'], neutron_context=router_ctx) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s1['subnet']['network_id']) # Plug network with external mapping self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id'], neutron_context=router_ctx) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s2['subnet']['network_id']) if validate_ext_gw: pass finally: # Cleanup self._remove_external_gateway_from_router( r['router']['id'], s2['subnet']['network_id']) def test_router_update_gateway_on_l3_ext_net(self): self._test_router_update_gateway_on_l3_ext_net() def test_router_update_gateway_on_l3_ext_net_with_non_admin(self): ctx = context.Context(user_id=None, tenant_id=self._router_tenant_id, is_admin=False) self._test_router_update_gateway_on_l3_ext_net(router_ctx=ctx) def test_router_update_gateway_on_l3_ext_net_with_vlan(self): self._test_router_update_gateway_on_l3_ext_net(444) def test_router_update_gateway_with_existing_floatingip(self): with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as subnet: with self.floatingip_with_assoc() as fip: self._add_external_gateway_to_router( fip['floatingip']['router_id'], subnet['subnet']['network_id'], expected_code=webob.exc.HTTPConflict.code) def test_router_list_by_tenant_id(self): with self.router(), self.router(): with self.router(tenant_id='custom') as router: self._test_list_resources('router', [router], query_params="tenant_id=custom") def test_create_l3_ext_network_with_vlan(self): self._test_create_l3_ext_network(666) def test_floatingip_with_assoc_fails(self): self._test_floatingip_with_assoc_fails( self._plugin_name + '._check_and_get_fip_assoc') def test_floatingip_with_invalid_create_port(self): self._test_floatingip_with_invalid_create_port(self._plugin_name) def test_floatingip_update(self): super(TestExclusiveRouterTestCase, self).test_floatingip_update( constants.FLOATINGIP_STATUS_DOWN) def test_floating_ip_no_snat(self): """Cannot add floating ips to a router with disabled snat""" with self.router() as r1,\ self.subnet() as ext_subnet,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.port(subnet=s1) as private_port: # Add interfaces to the router self._router_interface_action( 'add', r1['router']['id'], s1['subnet']['id'], None) self._set_net_external(ext_subnet['subnet']['network_id']) self._add_external_gateway_to_router( r1['router']['id'], ext_subnet['subnet']['network_id']) # disable snat self._update_router_enable_snat( r1['router']['id'], ext_subnet['subnet']['network_id'], False) # create a floating ip and associate it to the router should fail self.assertRaises( object, self._make_floatingip, self.fmt, ext_subnet['subnet']['network_id'], private_port['port']['id']) # now enable snat and try again self._update_router_enable_snat( r1['router']['id'], ext_subnet['subnet']['network_id'], True) self._make_floatingip( self.fmt, ext_subnet['subnet']['network_id'], private_port['port']['id']) # now shouldn't be able to disable snat self.assertRaises( object, self._update_router_enable_snat, r1['router']['id'], ext_subnet['subnet']['network_id'], False) def test_floatingip_disassociate(self): with self.port() as p: private_sub = {'subnet': {'id': p['port']['fixed_ips'][0]['subnet_id']}} with self.floatingip_no_assoc(private_sub) as fip: self.assertEqual(fip['floatingip']['status'], constants.FLOATINGIP_STATUS_DOWN) port_id = p['port']['id'] body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': port_id}}) self.assertEqual(body['floatingip']['port_id'], port_id) self.assertEqual(body['floatingip']['status'], constants.FLOATINGIP_STATUS_ACTIVE) # Disassociate body = self._update('floatingips', fip['floatingip']['id'], {'floatingip': {'port_id': None}}) body = self._show('floatingips', fip['floatingip']['id']) self.assertIsNone(body['floatingip']['port_id']) self.assertIsNone(body['floatingip']['fixed_ip_address']) self.assertEqual(body['floatingip']['status'], constants.FLOATINGIP_STATUS_DOWN) def test_update_floatingip_with_edge_router_update_failure(self): p = directory.get_plugin() with self.subnet() as subnet,\ self.port(subnet=subnet) as p1,\ self.port(subnet=subnet) as p2: p1_id = p1['port']['id'] p2_id = p2['port']['id'] with self.floatingip_with_assoc(port_id=p1_id) as fip: with self._mock_edge_router_update_with_exception(): self.assertRaises(object, p.update_floatingip, context.get_admin_context(), fip['floatingip']['id'], floatingip={'floatingip': {'port_id': p2_id}}) res = self._list( 'floatingips', query_params="port_id=%s" % p1_id) self.assertEqual(len(res['floatingips']), 1) res = self._list( 'floatingips', query_params="port_id=%s" % p2_id) self.assertEqual(len(res['floatingips']), 0) def test_create_floatingip_with_edge_router_update_failure(self): p = directory.get_plugin() with self.subnet(cidr='200.0.0.0/24') as public_sub: public_network_id = public_sub['subnet']['network_id'] self._set_net_external(public_network_id) with self.port() as private_port: port_id = private_port['port']['id'] tenant_id = private_port['port']['tenant_id'] subnet_id = private_port['port']['fixed_ips'][0]['subnet_id'] with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._router_interface_action('add', r['router']['id'], subnet_id, None) floatingip = {'floatingip': { 'tenant_id': tenant_id, 'floating_network_id': public_network_id, 'port_id': port_id}} with self._mock_edge_router_update_with_exception(): self.assertRaises(object, p.create_floatingip, context.get_admin_context(), floatingip=floatingip) res = self._list( 'floatingips', query_params="port_id=%s" % port_id) self.assertEqual(len(res['floatingips']), 0) # Cleanup self._router_interface_action('remove', r['router']['id'], subnet_id, None) self._remove_external_gateway_from_router( r['router']['id'], public_network_id) @contextlib.contextmanager def _mock_edge_router_update_with_exception(self): nsx_router_update = PLUGIN_NAME + '._update_edge_router' with mock.patch(nsx_router_update) as update_edge: update_edge.side_effect = object() yield update_edge @mock.patch.object(edge_utils, "update_firewall") def test_router_interfaces_with_update_firewall(self, mock): s1_cidr = '10.0.0.0/24' s2_cidr = '11.0.0.0/24' with self.router() as r,\ self.subnet(cidr=s1_cidr) as s1,\ self.subnet(cidr=s2_cidr) as s2: self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) expected_cidrs = [s1_cidr, s2_cidr] expected_fw = [{'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': expected_cidrs, 'destination_ip_address': expected_cidrs} ] + self._get_md_proxy_fw_rules() fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) @mock.patch.object(edge_utils, "update_firewall") def test_router_interfaces_with_update_firewall_metadata(self, mock): self.plugin_instance.metadata_proxy_handler = mock.Mock() s1_cidr = '10.0.0.0/24' s2_cidr = '11.0.0.0/24' with self.router() as r,\ self.subnet(cidr=s1_cidr) as s1,\ self.subnet(cidr=s2_cidr) as s2: self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) # build the list of expected fw rules expected_cidrs = [s1_cidr, s2_cidr] fw_rule = {'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': expected_cidrs, 'destination_ip_address': expected_cidrs} vse_rule = {'action': 'allow', 'enabled': True, 'name': 'VSERule', 'source_vnic_groups': ['vse'], 'destination_vnic_groups': ['external']} dest_intern = [md_proxy.INTERNAL_SUBNET] md_inter = {'action': 'deny', 'destination_ip_address': dest_intern, 'enabled': True, 'name': 'MDInterEdgeNet'} dest_srvip = [md_proxy.METADATA_IP_ADDR] vsmdienet = {'action': 'allow', 'destination_ip_address': [md_proxy.INTERNAL_SUBNET], 'enabled': True, 'name': 'VSEMDInterEdgeNet', 'source_vnic_groups': ['vse']} md_srvip = {'action': 'allow', 'destination_ip_address': dest_srvip, 'destination_port': '80,443,8775', 'enabled': True, 'name': 'MDServiceIP', 'protocol': 'tcp'} expected_fw = [fw_rule, vsmdienet, vse_rule, md_inter, md_srvip] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) # Also test the md_srvip conversion: drv = edge_firewall_driver.EdgeFirewallDriver() rule = drv._convert_firewall_rule(md_srvip) exp_service = {'service': [{'port': [80, 443, 8775], 'protocol': 'tcp'}]} exp_rule = {'action': 'accept', 'application': exp_service, 'destination': {'ipAddress': dest_srvip}, 'enabled': True, 'name': 'MDServiceIP'} self.assertEqual(exp_rule, rule) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) @mock.patch.object(edge_utils, "update_firewall") def test_router_interfaces_with_update_firewall_metadata_conf(self, mock): """Test the metadata proxy firewall rule with configured ports """ cfg.CONF.set_override('metadata_service_allowed_ports', ['55', ' 66 ', '55', '77'], group='nsxv') self.plugin_instance.metadata_proxy_handler = mock.Mock() s1_cidr = '10.0.0.0/24' with self.router() as r,\ self.subnet(cidr=s1_cidr) as s1: self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) # build the expected fw rule # at this stage the string of ports is not sorted/unique/validated dest_srvip = [md_proxy.METADATA_IP_ADDR] rule_name = 'MDServiceIP' md_srvip = {'action': 'allow', 'destination_ip_address': dest_srvip, 'destination_port': '80,443,8775,55,66,55,77', 'enabled': True, 'name': rule_name, 'protocol': 'tcp'} # compare it to the rule with the same name fw_rules = mock.call_args[0][3]['firewall_rule_list'] rule_found = False for fw_rule in fw_rules: if (validators.is_attr_set(fw_rule.get("name")) and fw_rule['name'] == rule_name): self.assertEqual(md_srvip, fw_rule) rule_found = True break self.assertTrue(rule_found) # Also test the rule conversion # Ports should be sorted & unique, and ignore non numeric values drv = edge_firewall_driver.EdgeFirewallDriver() rule = drv._convert_firewall_rule(md_srvip) exp_service = {'service': [{'port': [55, 66, 77, 80, 443, 8775], 'protocol': 'tcp'}]} exp_rule = {'action': 'accept', 'application': exp_service, 'destination': {'ipAddress': dest_srvip}, 'enabled': True, 'name': 'MDServiceIP'} self.assertEqual(exp_rule, rule) @mock.patch.object(edge_utils, "update_firewall") def test_router_interfaces_different_tenants_update_firewall(self, mock): tenant_id = _uuid() other_tenant_id = _uuid() s1_cidr = '10.0.0.0/24' s2_cidr = '11.0.0.0/24' with self.router(tenant_id=tenant_id) as r,\ self.network(tenant_id=tenant_id) as n1,\ self.network(tenant_id=other_tenant_id) as n2,\ self.subnet(network=n1, cidr=s1_cidr) as s1,\ self.subnet(network=n2, cidr=s2_cidr) as s2: self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None, tenant_id=tenant_id) expected_cidrs = [s1_cidr, s2_cidr] expected_fw = [{'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': expected_cidrs, 'destination_ip_address': expected_cidrs} ] + self._get_md_proxy_fw_rules() fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(self._recursive_sort_list(expected_fw), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None, tenant_id=tenant_id) self._router_interface_action('remove', r['router']['id'], s2['subnet']['id'], None) expected_fw = self._get_md_proxy_fw_rules() fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual(expected_fw, fw_rules) def test_create_router_gateway_fails(self): self.skipTest('not supported') def test_migrate_exclusive_router_to_shared(self): with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as s: data = {'router': {'tenant_id': 'whatever'}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} data['router']['router_type'] = 'exclusive' router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) # update the router type: router_id = router['router']['id'] self._update('routers', router_id, {'router': {'router_type': 'shared'}}) # get the updated router and check it's type body = self._show('routers', router_id) self.assertEqual('shared', body['router']['router_type']) @mock.patch.object(edge_utils.EdgeManager, 'update_interface_addr') def test_router_update_gateway_with_different_external_subnet(self, mock): # This test calls the backend, so we need a mock for the edge_utils super( TestExclusiveRouterTestCase, self).test_router_update_gateway_with_different_external_subnet() @mock.patch.object(edge_utils.EdgeManager, 'update_interface_addr') def test_router_add_interface_multiple_ipv6_subnets_same_net(self, mock): # This test calls the backend, so we need a mock for the edge_utils super( TestExclusiveRouterTestCase, self).test_router_add_interface_multiple_ipv6_subnets_same_net() def _fake_rename_edge(self, edge_id, name): raise vcns_exc.VcnsApiException( status=400, header={'status': 200}, uri='fake_url', response='') def test_create_router_with_update_error(self): p = directory.get_plugin() # make sure there is an available edge so we will use backend update available_edge = {'edge_id': 'edge-11', 'router_id': 'fake_id'} nsxv_db.add_nsxv_router_binding( context.get_admin_context().session, available_edge['router_id'], available_edge['edge_id'], None, constants.ACTIVE) with mock.patch.object(p.edge_manager, '_get_available_router_binding', return_value=available_edge): # Mock for update_edge task failure with mock.patch.object( p.edge_manager.nsxv_manager, 'rename_edge', side_effect=self._fake_rename_edge): router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'router_type': 'exclusive'}} # router creation should succeed returned_router = p.create_router(context.get_admin_context(), router) # router status should be 'error' self.assertEqual(constants.ERROR, returned_router['status']) # check the same after get_router new_router = p.get_router(context.get_admin_context(), returned_router['id']) self.assertEqual(constants.ERROR, new_router['status']) def test_create_router_with_bad_az_hint(self): p = directory.get_plugin() router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'router_type': 'exclusive', 'availability_zone_hints': ['bad_hint']}} self.assertRaises(n_exc.NeutronException, p.create_router, context.get_admin_context(), router) def test_create_router_with_az_hint(self): az_name = 'az7' set_az_in_config(az_name) p = directory.get_plugin() p._availability_zones_data = nsx_az.NsxVAvailabilityZones() p._get_edge_id_by_rtr_id = p.real_get_edge router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'router_type': 'exclusive', 'availability_zone_hints': [az_name]}} # router creation should succeed returned_router = p.create_router(context.get_admin_context(), router) self.assertEqual([az_name], returned_router['availability_zone_hints']) self.assertEqual([az_name], returned_router['availability_zones']) def test_create_router_with_default_az(self): az_name = 'az7' set_az_in_config(az_name) cfg.CONF.set_override('default_availability_zones', [az_name]) p = directory.get_plugin() p._availability_zones_data = nsx_az.NsxVAvailabilityZones() p._get_edge_id_by_rtr_id = p.real_get_edge router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'router_type': 'exclusive'}} # router creation should succeed returned_router = p.create_router(context.get_admin_context(), router) self.assertEqual([], returned_router['availability_zone_hints']) self.assertEqual([az_name], returned_router['availability_zones']) def test_floatingip_update_to_same_port_id_twice(self): self.skipTest('Plugin changes floating port status') def test_router_add_interface_ipv6_subnet(self): self.skipTest('Not supported') def test_router_add_gateway_multiple_subnets_ipv6(self): self.skipTest('not supported') def test_router_add_interface_by_port_other_tenant_address_out_of_pool( self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_router_add_interface_by_port_other_tenant_address_in_pool(self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_router_add_interface_by_port_admin_address_out_of_pool(self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_update_subnet_gateway_for_external_net(self): plugin = directory.get_plugin() router_obj = ex_router_driver.RouterExclusiveDriver(plugin) with mock.patch.object(plugin, '_find_router_driver', return_value=router_obj): with mock.patch.object(router_obj, '_update_nexthop') as update_nexthop: super(TestExclusiveRouterTestCase, self).test_update_subnet_gateway_for_external_net() self.assertTrue(update_nexthop.called) def _test_create_subnetpool(self, prefixes, expected=None, admin=False, **kwargs): keys = kwargs.copy() keys.setdefault('tenant_id', self._tenant_id) with self.subnetpool(prefixes, admin, **keys) as subnetpool: self._validate_resource(subnetpool, keys, 'subnetpool') if expected: self._compare_resource(subnetpool, expected, 'subnetpool') return subnetpool def test_router_no_snat_with_different_address_scope(self): """Test that if the router has no snat, you cannot add an interface from a different address scope than the gateway. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: self._set_net_external(ext_net['network']['id']) as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) # create a regular network on another address scope with self.address_scope(name='as2') as addr_scope2, \ self.network() as net: as_id2 = addr_scope2['address_scope']['id'] subnet2 = netaddr.IPNetwork('20.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as_id2) subnetpool_id2 = subnetpool2['subnetpool']['id'] data = {'subnet': { 'network_id': net['network']['id'], 'subnetpool_id': subnetpool_id2, 'ip_version': 4, 'tenant_id': net['network']['tenant_id']}} req = self.new_create_request('subnets', data) int_subnet = self.deserialize( self.fmt, req.get_response(self.api)) # create a no snat router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['subnet']['network_id']) self._update_router_enable_snat( r['router']['id'], ext_subnet['subnet']['network_id'], False) # should fail adding the interface to the router err_code = webob.exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], int_subnet['subnet']['id'], None, err_code) def _create_subnet_and_add_to_router(self, subnetpool_id, router_id): # create a regular network on the given subnet pool with self.network() as net: data = {'subnet': { 'network_id': net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'tenant_id': net['network']['tenant_id']}} req = self.new_create_request('subnets', data) int_subnet = self.deserialize( self.fmt, req.get_response(self.api)) # Add the interface to the router self._router_interface_action( 'add', router_id, int_subnet['subnet']['id'], None) return int_subnet def test_router_no_snat_with_same_address_scope(self): """Test that if the router has no snat, you can add an interface from the same address scope as the gateway. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: self._set_net_external(ext_net['network']['id']) as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) # create a regular network on the same address scope # and create a no snat router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['subnet']['network_id']) self._update_router_enable_snat( r['router']['id'], ext_subnet['subnet']['network_id'], False) # should succeed adding the interface to the router self._create_subnet_and_add_to_router( subnetpool_id, r['router']['id']) def test_router_address_scope_snat_rules(self): """Test that if the router interface had the same address scope as the gateway - snat rule is not added, but firewall rule is. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: self._set_net_external(ext_net['network']['id']) as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) # create a regular network on the same address scope # and create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['subnet']['network_id']) # Add the interface to the router with mock.patch.object( edge_utils, 'update_nat_rules') as update_nat,\ mock.patch.object( edge_utils, 'update_firewall') as update_fw: int_subnet = self._create_subnet_and_add_to_router( subnetpool_id, r['router']['id']) # make sure snat rules are not added update_nat.assert_called_once_with( mock.ANY, mock.ANY, r['router']['id'], [], []) # check fw rules fw_rules = update_fw.call_args[0][3][ 'firewall_rule_list'] exp_fw_len = 6 if self.with_md_proxy else 2 pool_rule_ind = 5 if self.with_md_proxy else 1 pool_rule = fw_rules[pool_rule_ind] self.assertEqual(exp_fw_len, len(fw_rules)) self.assertEqual('Allocation Pool Rule', pool_rule['name']) self.assertEqual('allow', pool_rule['action']) self.assertEqual(int_subnet['subnet']['cidr'], pool_rule['destination_ip_address'][0]) self.assertEqual('external', pool_rule['source_vnic_groups'][0]) def test_router_address_scope_fw_rules(self): """Test that if the router interfaces has different address scope there are separate fw rules """ # create a router, networks, and address scopes with self.address_scope(name='as1') as addr_scope1, \ self.address_scope(name='as2') as addr_scope2, \ self.router() as r: as1_id = addr_scope1['address_scope']['id'] as2_id = addr_scope2['address_scope']['id'] pool1 = netaddr.IPNetwork('10.10.10.0/21') subnetpool1 = self._test_create_subnetpool( [pool1.cidr], name='sp1', min_prefixlen='24', address_scope_id=as1_id) pool2 = netaddr.IPNetwork('20.20.20.0/21') subnetpool2 = self._test_create_subnetpool( [pool2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as2_id) subnetpool_id1 = subnetpool1['subnetpool']['id'] subnetpool_id2 = subnetpool2['subnetpool']['id'] # Add the interfaces to the router with mock.patch.object( edge_utils, 'update_nat_rules'),\ mock.patch.object(edge_utils, 'update_firewall') as update_fw: # create subnets on the 2 subnet pools, and attach to router subnet1 = self._create_subnet_and_add_to_router( subnetpool_id1, r['router']['id']) subnet2 = self._create_subnet_and_add_to_router( subnetpool_id2, r['router']['id']) subnet3 = self._create_subnet_and_add_to_router( subnetpool_id2, r['router']['id']) expected_rules = [ {'enabled': True, 'destination_ip_address': [subnet1['subnet']['cidr']], 'action': 'allow', 'name': 'Subnet Rule', 'source_ip_address': [subnet1['subnet']['cidr']]}, {'enabled': True, 'destination_ip_address': [subnet2['subnet']['cidr'], subnet3['subnet']['cidr']], 'action': 'allow', 'name': 'Subnet Rule', 'source_ip_address': [subnet2['subnet']['cidr'], subnet3['subnet']['cidr']]} ] + self._get_md_proxy_fw_rules() # check the final fw rules fw_rules = update_fw.call_args[0][3][ 'firewall_rule_list'] self.assertEqual(len(expected_rules), len(fw_rules)) self.assertEqual(self._recursive_sort_list(expected_rules), self._recursive_sort_list(fw_rules)) def _prepare_external_subnet_on_address_scope(self, ext_net, address_scope): self._set_net_external(ext_net['network']['id']) as_id = address_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) return ext_subnet['subnet'] def _test_router_address_scope_change(self, change_gw=False): """When subnetpool address scope changes, and router that was originally under same address scope, results having different address scopes, relevant snat rules are added. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self.network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, addr_scope) # create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['network_id']) # create a regular network on same address scope # and verify no snat change as_id = addr_scope['address_scope']['id'] subnet2 = netaddr.IPNetwork('40.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as_id) subnetpool2_id = subnetpool2['subnetpool']['id'] self._create_subnet_and_add_to_router( subnetpool2_id, r['router']['id']) # change address scope of the first subnetpool with self.address_scope(name='as2') as addr_scope2,\ mock.patch.object(edge_utils, 'update_nat_rules') as update_nat,\ mock.patch.object(edge_utils, 'update_firewall') as update_fw: as2_id = addr_scope2['address_scope']['id'] data = {'subnetpool': { 'address_scope_id': as2_id}} if change_gw: subnetpool_to_update = ext_subnet['subnetpool_id'] else: subnetpool_to_update = subnetpool2_id req = self.new_update_request('subnetpools', data, subnetpool_to_update) req.get_response(self.api) # Verify that the snat & fw rule are being updated update_nat.assert_called_once() update_fw.assert_called_once() def test_router_address_scope_change(self): self._test_router_address_scope_change() def test_router_address_scope_gw_change(self): self._test_router_address_scope_change(change_gw=True) def test_router_add_interface_delete_port_after_failure(self): with self.router() as r, self.subnet(enable_dhcp=False) as s: plugin = directory.get_plugin() # inject a failure in the update port that happens at the end # to ensure the port gets deleted with mock.patch.object( plugin, 'update_port', side_effect=n_exc.InvalidInput(error_message='x')): self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, webob.exc.HTTPBadRequest.code) exp_num_of_ports = 1 if self.with_md_proxy else 0 ports = plugin.get_ports(context.get_admin_context()) self.assertEqual(exp_num_of_ports, len(ports)) class ExtGwModeTestCase(NsxVPluginV2TestCase, test_ext_gw_mode.ExtGwModeIntTestCase): def test_router_gateway_set_fail_after_port_create(self): self.skipTest("TBD") class NsxVSecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): test_utils.override_nsx_ini_test() mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) mock_vcns_instance = mock_vcns.start() self.fc2 = fake_vcns.FakeVcns() mock_vcns_instance.return_value = self.fc2 edge_utils.query_dhcp_service_config = mock.Mock(return_value=[]) mock_create_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'create_dhcp_edge_service')) mock_create_dhcp_service.start() mock_update_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'update_dhcp_edge_service')) mock_update_dhcp_service.start() mock_delete_dhcp_service = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, 'delete_dhcp_edge_service')) mock_delete_dhcp_service.start() mock_check_backup_edge_pools = mock.patch("%s.%s" % ( vmware.EDGE_MANAGE_NAME, '_check_backup_edge_pools')) mock_check_backup_edge_pools.start() c_utils.spawn_n = mock.Mock(side_effect=lambda f: f()) super(NsxVSecurityGroupsTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) self.plugin = directory.get_plugin() self.addCleanup(self.fc2.reset_all) self.original_subnet = self.subnet def no_dhcp_subnet(self, *args, **kwargs): if 'enable_dhcp' in kwargs: return self.original_subnet(*args, **kwargs) return self.original_subnet(*args, enable_dhcp=False, **kwargs) class NsxVTestSecurityGroup(ext_sg.TestSecurityGroups, NsxVSecurityGroupsTestCase): @mock.patch.object(edge_utils.EdgeManager, '_deploy_edge') def setUp(self, mock_deploy, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(NsxVTestSecurityGroup, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) plugin_instance = directory.get_plugin() plugin_instance._get_edge_id_by_rtr_id = mock.Mock() plugin_instance._get_edge_id_by_rtr_id.return_value = False plugin_instance._get_edge_id_and_az_by_rtr_id = mock.Mock() plugin_instance._get_edge_id_and_az_by_rtr_id.return_value = ( False, False) @with_no_dhcp_subnet def test_list_ports_security_group(self): return super(NsxVTestSecurityGroup, self).test_list_ports_security_group() def test_vnic_security_group_membership(self): p = directory.get_plugin() self.fc2.add_member_to_security_group = ( mock.Mock().add_member_to_security_group) self.fc2.remove_member_from_security_group = ( mock.Mock().remove_member_from_security_group) nsx_sg_id = str(self.fc2._securitygroups['ids']) device_id = _uuid() port_index = 0 # The expected vnic-id format by NsxV vnic_id = '%s.%03d' % (device_id, port_index) with self.port(device_id=device_id, device_owner='compute:None') as port: (self.fc2.add_member_to_security_group .assert_called_once_with(p.sg_container_id, nsx_sg_id)) self.fc2.add_member_to_security_group.reset_mock() data = {'port': {'vnic_index': port_index}} self.new_update_request('ports', data, port['port']['id']).get_response(self.api) # The vnic should be added as a member to the nsx-security-groups # which match the port security-groups (self.fc2.add_member_to_security_group .assert_called_once_with(nsx_sg_id, vnic_id)) # The vnic should be removed from the nsx-security-groups which match # the deleted port security-groups #TODO(kobis): Port is not removed automatically # (self.fc2.remove_member_from_security_group # .assert_called_once_with(nsx_sg_id, vnic_id)) def test_create_secgroup_deleted_upon_fw_section_create_fail(self): _context = context.Context('', 'tenant_id') sg = {'security_group': {'name': 'default', 'tenant_id': 'tenant_id', 'description': ''}} expected_id = str(self.fc2._securitygroups['ids']) with mock.patch.object(self.fc2, 'create_section') as create_section: with mock.patch.object(self.fc2, 'delete_security_group') as delete_sg: create_section.side_effect = webob.exc.HTTPInternalServerError self.assertRaises(webob.exc.HTTPInternalServerError, self.plugin.create_security_group, _context.elevated(), sg, default_sg=True) delete_sg.assert_called_once_with(expected_id) def test_create_security_group_rule_duplicate_rules(self): name = 'webservers' description = 'my webservers' with mock.patch.object(self.plugin.nsx_v.vcns, 'remove_rule_from_section') as rm_rule_mock: with self.security_group(name, description) as sg: rule = self._build_security_group_rule( sg['security_group']['id'], 'ingress', constants.PROTO_NAME_TCP, '22', '22') self._create_security_group_rule(self.fmt, rule) res = self._create_security_group_rule(self.fmt, rule) self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPConflict.code, res.status_int) rm_rule_mock.assert_called_once_with(mock.ANY, mock.ANY) def test_create_security_group_rule_with_specific_id(self): # This test is aimed to test the security-group db mixin pass def _plugin_update_security_group(self, context, id, logging): data = {'security_group': {'logging': logging}} security_group = ( self.plugin.update_security_group(context, id, data)) return security_group def _plugin_create_security_group(self, context, logging=False): data = {'security_group': {'name': 'SG', 'tenant_id': 'tenant_id', 'description': ''}} if logging: data['security_group']['logging'] = True security_group = ( self.plugin.create_security_group(context, data, False)) return security_group def test_create_security_group_default_logging(self): _context = context.get_admin_context() sg = self._plugin_create_security_group(_context) self.assertFalse(sg['logging']) def test_create_security_group_with_logging(self): _context = context.get_admin_context() sg = self._plugin_create_security_group(_context, logging=True) self.assertTrue(sg['logging']) def test_update_security_group_with_logging(self): _context = context.get_admin_context() sg = self._plugin_create_security_group(_context) sg = self._plugin_update_security_group(_context, sg['id'], True) self.assertTrue(sg['logging']) def _create_default_sg(self, ctx): self.plugin._ensure_default_security_group(ctx, 'tenant_id') def test_create_security_group_default_nsx_name(self): _context = context.get_admin_context() self._create_default_sg(_context) with mock.patch.object(self.plugin.nsx_v.vcns, 'create_security_group', return_value=({}, '3')) as nsxv_create: self._plugin_create_security_group(_context) created_sg = nsxv_create.call_args[0] created_name = created_sg[0]['securitygroup']['name'] self.assertTrue(re.match(r'SG \(.*\)', created_name)) def test_create_security_group_non_default_nsx_name(self): # Use non default nsx name format cfg.CONF.set_override('nsx_sg_name_format', '%(name)s [%(id)s]', group="nsxv") _context = context.get_admin_context() self._create_default_sg(_context) with mock.patch.object(self.plugin.nsx_v.vcns, 'create_security_group', return_value=({}, '3')) as nsxv_create: self._plugin_create_security_group(_context) created_sg = nsxv_create.call_args[0] created_name = created_sg[0]['securitygroup']['name'] self.assertTrue(re.match(r'SG \[.*\]', created_name)) def test_create_security_group_rule_bulk(self): """Verify that bulk rule create updates the backend section once""" fake_update_sect = self.fc2.update_section def mock_update_section(section_uri, request, h): return fake_update_sect(section_uri, request, h) plugin = directory.get_plugin() with self.security_group() as sg,\ mock.patch.object(plugin.nsx_v.vcns, 'update_section', side_effect=mock_update_section) as update_sect: rule1 = self._build_security_group_rule(sg['security_group']['id'], 'ingress', 'tcp', '22', '22', '10.0.0.1/24') rule2 = self._build_security_group_rule(sg['security_group']['id'], 'ingress', 'tcp', '23', '23', '10.0.0.1/24') rules = {'security_group_rules': [rule1['security_group_rule'], rule2['security_group_rule']]} res = self._create_security_group_rule(self.fmt, rules) ret = self.deserialize(self.fmt, res) self.assertEqual(webob.exc.HTTPCreated.code, res.status_int) self.assertEqual(2, len(ret['security_group_rules'])) update_sect.assert_called_once() def test_create_security_group_rule_protocol_as_number_range(self): self.skipTest('not supported') def test_create_security_group_rule_protocol_as_number_with_port(self): self.skipTest('not supported') def test_create_security_group_rule_with_remote_group(self): with self.security_group() as sg1, self.security_group() as sg2: security_group_id = sg1['security_group']['id'] direction = "ingress" remote_group_id = sg2['security_group']['id'] protocol = "tcp" keys = [('remote_group_id', remote_group_id), ('security_group_id', security_group_id), ('direction', direction), ('protocol', protocol)] with self.security_group_rule( security_group_id, direction=direction, protocol=protocol, remote_group_id=remote_group_id) as rule: for k, v, in keys: self.assertEqual(rule['security_group_rule'][k], v) def test_delete_security_group_rule_with_remote_group(self): com_plugin.subscribe() with self.security_group() as sg1, self.security_group() as sg2: security_group_id = sg1['security_group']['id'] direction = "ingress" remote_group_id = sg2['security_group']['id'] protocol = "tcp" with self.security_group_rule( security_group_id, direction=direction, protocol=protocol, remote_group_id=remote_group_id) as rule,\ mock.patch.object( self.plugin, "delete_security_group_rule") as del_rule: # delete sg2 self._delete('security-groups', remote_group_id, webob.exc.HTTPNoContent.code) # verify the rule was deleted del_rule.assert_called_once_with( mock.ANY, rule["security_group_rule"]["id"]) class TestVdrTestCase(L3NatTest, L3NatTestCaseBase, test_l3_plugin.L3NatDBIntTestCase, IPv6ExpectedFailuresTestMixin, NsxVPluginV2TestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): # init the availability zones in the configuration of the plugin self.az_name = 'az7' set_az_in_config(self.az_name) super(TestVdrTestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.plugin_instance.nsx_v.is_subnet_in_use = mock.Mock() self.plugin_instance.nsx_v.is_subnet_in_use.return_value = False self._default_tenant_id = self._tenant_id self._router_tenant_id = 'test-router-tenant' def _get_md_proxy_fw_rules(self): return [] @mock.patch.object(edge_utils.EdgeManager, 'update_interface_addr') def test_router_update_gateway_with_different_external_subnet(self, mock): # This test calls the backend, so we need a mock for the edge_utils super( TestVdrTestCase, self).test_router_update_gateway_with_different_external_subnet() def test_floatingip_multi_external_one_internal(self): self.skipTest('skipped') def test_router_add_gateway_multiple_subnets_ipv6(self): self.skipTest('not supported') def test_router_add_interface_ipv6_subnet(self): self.skipTest('Not supported') def test_router_add_interface_dup_subnet2_returns_400(self): self.skipTest('skipped') def test_floatingip_same_external_and_internal(self): self.skipTest('skipped') def test_router_add_interface_by_port_other_tenant_address_out_of_pool( self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_router_add_interface_by_port_other_tenant_address_in_pool(self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_router_add_interface_by_subnet_other_tenant_subnet_returns_400( self): # distributes router creation by another tenant is blocked by policy self.skipTest('not supported') def test_router_add_interface_by_port_admin_address_out_of_pool(self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_create_router_fail_at_the_backend(self): p = directory.get_plugin() edge_manager = p.edge_manager with mock.patch.object(edge_manager, 'create_lrouter', side_effect=[n_exc.NeutronException]): router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'fake_tenant', 'distributed': True}} self.assertRaises(n_exc.NeutronException, p.create_router, context.get_admin_context(), router) self._test_list_resources('router', ()) def test_update_port_device_id_to_different_tenants_router(self): self.skipTest('TBD') def test_router_add_and_remove_gateway_tenant_ctx(self): self.skipTest('TBD') def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, set_context=False, arg_list=None, **kwargs): tenant_id = tenant_id or _uuid() data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up: data['router']['admin_state_up'] = admin_state_up for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): # Arg must be present and not empty if arg in kwargs and kwargs[arg]: data['router'][arg] = kwargs[arg] if 'distributed' in kwargs: data['router']['distributed'] = kwargs['distributed'] else: data['router']['distributed'] = True if ('availability_zone_hints' in kwargs and kwargs['availability_zone_hints'] is not None): data['router']['availability_zone_hints'] = kwargs[ 'availability_zone_hints'] if kwargs.get('router_type'): data['router']['router_type'] = kwargs.get('router_type') router_req = self.new_create_request('routers', data, fmt) if set_context and tenant_id: # create a specific auth context for this request router_req.environ['neutron.context'] = context.Context( '', tenant_id) return router_req.get_response(self.ext_api) def _test_router_plr_binding(self, expected_size='compact', availability_zone=None): """Test PLR router bindings Create a distributed router with an external network and check that the router was created as it should from the binding entry """ # create a distributed router tenant_id = _uuid() router_ctx = context.Context('', tenant_id) az_hints = [availability_zone] if availability_zone else None res = self._create_router(self.fmt, tenant_id, distributed=True, availability_zone_hints=az_hints) r = self.deserialize(self.fmt, res) self.assertIn('router', r) with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as s2: # Plug network with external mapping self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id'], neutron_context=router_ctx) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s2['subnet']['network_id']) # make sure the plr router was created, with the expected data plr_id = self.plugin_instance.edge_manager.get_plr_by_tlr_id( router_ctx, r['router']['id']) binding = nsxv_db.get_nsxv_router_binding( router_ctx.session, plr_id) self.assertEqual(expected_size, binding['appliance_size']) self.assertEqual('ACTIVE', binding['status']) self.assertIsNotNone(binding['edge_id']) self.assertEqual('service', binding['edge_type']) self.assertTrue(binding['router_id'].startswith('plr')) if availability_zone: self.assertEqual( availability_zone, binding['availability_zone']) else: self.assertEqual('default', binding['availability_zone']) # Cleanup self._remove_external_gateway_from_router( r['router']['id'], s2['subnet']['network_id']) def test_router_plr_binding_default_size(self): self._test_router_plr_binding() def test_router_plr_binding_configured_size(self): cfg.CONF.set_override('exclusive_router_appliance_size', 'large', group="nsxv") self._test_router_plr_binding(expected_size='large') def test_router_plr_binding_default_az(self): self._test_router_plr_binding(availability_zone='default') def test_router_plr_binding_with_az(self): self._test_router_plr_binding(availability_zone=self.az_name) def test_router_binding_with_az(self): """Check distributed router creation with an availability zone """ # create a distributed router tenant_id = _uuid() router_ctx = context.Context('', tenant_id) res = self._create_router(self.fmt, tenant_id, distributed=True, availability_zone_hints=[self.az_name]) r = self.deserialize(self.fmt, res) self.assertIn('router', r) # check that we have an edge for this router, with the correct # availability zone binding = nsxv_db.get_nsxv_router_binding( router_ctx.session, r['router']['id']) self.assertEqual('compact', binding['appliance_size']) self.assertEqual('ACTIVE', binding['status']) self.assertIsNotNone(binding['edge_id']) self.assertEqual('vdr', binding['edge_type']) self.assertEqual(binding['router_id'], r['router']['id']) self.assertEqual(self.az_name, binding['availability_zone']) def _test_router_create_with_distributed(self, dist_input, dist_expected, return_code=201, **kwargs): data = {'tenant_id': 'whatever'} data['name'] = 'router1' data['distributed'] = dist_input for k, v in six.iteritems(kwargs): data[k] = v router_req = self.new_create_request( 'routers', {'router': data}, self.fmt) res = router_req.get_response(self.ext_api) self.assertEqual(return_code, res.status_int) if res.status_int == 201: router = self.deserialize(self.fmt, res) self.assertIn('distributed', router['router']) if dist_input: self.assertNotIn('router_type', router['router']) self.assertEqual(dist_expected, router['router']['distributed']) def test_create_router_fails_with_router_type(self): self._test_router_create_with_distributed(True, True, return_code=400, router_type="shared") def test_router_create_distributed(self): self._test_router_create_with_distributed(True, True) def test_router_create_not_distributed(self): self._test_router_create_with_distributed(False, False) def test_router_create_distributed_unspecified(self): self._test_router_create_with_distributed(None, False) def _test_create_router_with_az_hint(self, with_hint): # init the availability zones in the plugin az_name = 'az7' set_az_in_config(az_name) p = directory.get_plugin() p._availability_zones_data = nsx_az.NsxVAvailabilityZones() # create a router with/without hints router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'FAKE_TENANT', 'distributed': True}} if with_hint: router['router']['availability_zone_hints'] = [az_name] returned_router = p.create_router(context.get_admin_context(), router) # availability zones is still empty because the router is not attached if with_hint: self.assertEqual([az_name], returned_router['availability_zone_hints']) else: self.assertEqual([], returned_router['availability_zone_hints']) edge_id = edge_utils.get_router_edge_id( context.get_admin_context(), returned_router['id']) res_az = nsxv_db.get_edge_availability_zone( context.get_admin_context().session, edge_id) expected_az = az_name if with_hint else 'default' self.assertEqual(expected_az, res_az) def test_create_router_with_az_hint(self): self._test_create_router_with_az_hint(True) def test_create_router_without_az_hint(self): self._test_create_router_with_az_hint(False) def test_floatingip_with_assoc_fails(self): self._test_floatingip_with_assoc_fails( self._plugin_name + '._check_and_get_fip_assoc') def test_floatingip_update(self): super(TestVdrTestCase, self).test_floatingip_update( constants.FLOATINGIP_STATUS_DOWN) def test_floatingip_with_invalid_create_port(self): self._test_floatingip_with_invalid_create_port(self._plugin_name) def test_router_add_gateway_invalid_network_returns_404(self): with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], uuidutils.generate_uuid(), expected_code=webob.exc.HTTPNotFound.code) def test_router_add_interfaces_with_multiple_subnets_on_same_network(self): with self.router() as r,\ self.network() as n,\ self.subnet(network=n) as s1,\ self.subnet(network=n, cidr='11.0.0.0/24') as s2: self._router_interface_action('add', r['router']['id'], s1['subnet']['id'], None) err_code = webob.exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None, err_code) self._router_interface_action('remove', r['router']['id'], s1['subnet']['id'], None) def test_router_add_interface_with_external_net_fail(self): with self.router() as r,\ self.network() as n,\ self.subnet(network=n) as s: # Set the network as an external net net_id = n['network']['id'] self._set_net_external(net_id) err_code = webob.exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], s['subnet']['id'], None, err_code) def test_different_type_routers_add_interfaces_on_same_network_pass(self): with self.router() as dist, \ self.router(distributed=False, router_type='shared') as shared, \ self.router(distributed=False, router_type='exclusive') as excl: with self.network() as n: with self.subnet(network=n) as s1, \ self.subnet(network=n, cidr='11.0.0.0/24') as s2, \ self.subnet(network=n, cidr='12.0.0.0/24') as s3: self._router_interface_action('add', shared['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', excl['router']['id'], s2['subnet']['id'], None) self._router_interface_action('add', dist['router']['id'], s3['subnet']['id'], None) self._router_interface_action('remove', dist['router']['id'], s3['subnet']['id'], None) self._router_interface_action('remove', excl['router']['id'], s2['subnet']['id'], None) self._router_interface_action('remove', shared['router']['id'], s1['subnet']['id'], None) def test_router_update_type_fails(self): """Check distributed router cannot change it's type """ # create a distributed router tenant_id = _uuid() res = self._create_router(self.fmt, tenant_id, distributed=True) r = self.deserialize(self.fmt, res) router_id = r['router']['id'] # make sure changing the type fails self._update('routers', router_id, {'router': {'router_type': 'shared'}}, expected_code=400) self._update('routers', router_id, {'router': {'router_type': 'exclusive'}}, expected_code=400) self._update('routers', router_id, {'router': {'distributed': False}}, expected_code=400) # make sure keeping the type is ok self._update('routers', router_id, {'router': {'distributed': True}}, expected_code=200) def test_router_update_size_fails(self): """Check distributed router cannot change it's type """ # create a distributed router tenant_id = _uuid() res = self._create_router(self.fmt, tenant_id, distributed=True) r = self.deserialize(self.fmt, res) router_id = r['router']['id'] # make sure changing the type fails self._update('routers', router_id, {'router': {'router_size': 'small'}}, expected_code=400) def test_router_add_interface_multiple_ipv4_subnets(self): self.skipTest('TBD') def test_router_remove_ipv6_subnet_from_interface(self): self.skipTest('TBD') def test_router_add_interface_multiple_ipv6_subnets_same_net(self): self.skipTest('TBD') def test_router_add_interface_multiple_ipv6_subnets_different_net(self): self.skipTest('TBD') def test_create_router_gateway_fails(self): self.skipTest('not supported') def test_floatingip_update_to_same_port_id_twice(self): self.skipTest('Plugin changes floating port status') def test_update_subnet_gateway_for_external_net(self): plugin = directory.get_plugin() router_obj = dist_router_driver.RouterDistributedDriver(plugin) with mock.patch.object(plugin, '_find_router_driver', return_value=router_obj): with mock.patch.object(router_obj, '_update_nexthop') as update_nexthop: super(TestVdrTestCase, self).test_update_subnet_gateway_for_external_net() self.assertTrue(update_nexthop.called) def test_router_add_interface_ipv6_port_existing_network_returns_400(self): """Ensure unique IPv6 router ports per network id. Adding a router port containing one or more IPv6 subnets with the same network id as an existing router port should fail. This is so there is no ambiguity regarding on which port to add an IPv6 subnet when executing router-interface-add with a subnet and no port. """ with self.network() as n, self.router() as r: with self.subnet(network=n, cidr='fd00::/64', ip_version=6, enable_dhcp=False) as s1, ( self.subnet(network=n, cidr='fd01::/64', ip_version=6, enable_dhcp=False)) as s2: with self.port(subnet=s1) as p: exp_code = webob.exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], s2['subnet']['id'], None, expected_code=exp_code) self._router_interface_action('add', r['router']['id'], None, p['port']['id'], expected_code=exp_code) def test_router_update_with_size_fail(self): """Distributed router currently does not support router size update """ with self.router() as r: router_id = r['router']['id'] body = self._show('routers', router_id) body['router']['router_size'] = 'small' self._update('routers', router_id, body, expected_code=400, neutron_context=context.get_admin_context()) class TestNSXvAllowedAddressPairs(NsxVPluginV2TestCase, test_addr_pair.TestAllowedAddressPairs): def setUp(self, plugin=PLUGIN_NAME): super(TestNSXvAllowedAddressPairs, self).setUp(plugin=plugin) # NOTE: the tests below are skipped due to the fact that they update the # mac address. The NSX|V does not support address pairs when a MAC address # is configured. def test_create_port_allowed_address_pairs(self): pass def test_update_add_address_pairs(self): pass def test_equal_to_max_allowed_address_pair(self): pass def test_update_port_security_off_address_pairs(self): pass def test_create_port_security_true_allowed_address_pairs(self): pass def test_create_port_security_false_allowed_address_pairs(self): pass def _test_create_port_remove_allowed_address_pairs(self, update_value): pass def test_create_overlap_with_fixed_ip(self): pass def test_create_port_with_cidr_address_pair(self): with self.network() as net: address_pairs = [{'mac_address': '00:00:00:00:00:01', 'ip_address': '192.168.1.0/24'}] self._create_port(self.fmt, net['network']['id'], expected_res_status=webob.exc.HTTPBadRequest.code, arg_list=(addrp_apidef.ADDRESS_PAIRS,), allowed_address_pairs=address_pairs) def test_create_port_with_address_pair_existing_fixed_ip_fail(self): address_pairs1 = [{'ip_address': '10.0.0.2'}] with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=False) as subnet: fixed_ips1 = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.4'}] fixed_ips2 = [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.5'}] self._create_port(self.fmt, network['network']['id'], arg_list=(addrp_apidef.ADDRESS_PAIRS, 'fixed_ips'), allowed_address_pairs=address_pairs1, fixed_ips=fixed_ips1) res = self._create_port(self.fmt, network['network']['id'], arg_list=(addrp_apidef.ADDRESS_PAIRS, 'fixed_ips'), allowed_address_pairs=address_pairs1, fixed_ips=fixed_ips2) self.assertEqual(res.status_int, 400) class TestNSXPortSecurity(test_psec.TestPortSecurity, NsxVPluginV2TestCase): def setUp(self, plugin=PLUGIN_NAME): super(TestNSXPortSecurity, self).setUp(plugin=plugin) def test_create_port_fails_with_secgroup_and_port_security_false(self): # Security Groups can be used even when port-security is disabled pass def test_update_port_security_off_with_security_group(self): # Security Groups can be used even when port-security is disabled pass def test_create_port_with_security_group_and_net_sec_false(self): pass def _create_compute_port(self, network_name, device_id, port_security): # create a network without port security res = self._create_network('json', network_name, True) net = self.deserialize('json', res) # create a compute port with this network and a device res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled', 'device_id', 'device_owner',), port_security_enabled=port_security, device_id=device_id, device_owner='compute:None') return self.deserialize('json', res) def _add_vnic_to_port(self, port_id, add_exclude, vnic_index): """Add vnic to a port and check if the device was added to the exclude list """ plugin = self._get_core_plugin_with_dvs() vm_moref = 'dummy_moref' with mock.patch.object(plugin._vcm, 'get_vm_moref', return_value=vm_moref): with mock.patch.object( plugin.nsx_v.vcns, 'add_vm_to_exclude_list') as exclude_list_add: data = {'port': {'vnic_index': vnic_index}} self.new_update_request( 'ports', data, port_id).get_response(self.api) if add_exclude: # make sure the vm was added to the exclude list exclude_list_add.assert_called_once_with(vm_moref) else: self.assertFalse(exclude_list_add.called) def _del_vnic_from_port(self, port_id, del_exclude): """Delete the vnic & device id from the port and check if the device was removed from the exclude list """ plugin = self._get_core_plugin_with_dvs() vm_moref = 'dummy_moref' with mock.patch.object(plugin._vcm, 'get_vm_moref', return_value=vm_moref): with mock.patch.object( plugin.nsx_v.vcns, 'delete_vm_from_exclude_list') as exclude_list_del: data = {'port': {'vnic_index': None, 'device_id': ''}} self.new_update_request( 'ports', data, port_id).get_response(self.api) if del_exclude: # make sure the vm was added to the exclude list exclude_list_del.assert_called_once_with(vm_moref) else: self.assertFalse(exclude_list_del.called) def _del_port_with_vnic(self, port_id, del_exclude): """Delete port with vnic, and check if the device was removed from the exclude list """ plugin = self._get_core_plugin_with_dvs() vm_moref = 'dummy_moref' with mock.patch.object(plugin._vcm, 'get_vm_moref', return_value=vm_moref): with mock.patch.object( plugin.nsx_v.vcns, 'delete_vm_from_exclude_list') as exclude_list_del: self.new_delete_request( 'ports', port_id).get_response(self.api) if del_exclude: # make sure the vm was added to the exclude list exclude_list_del.assert_called_once_with(vm_moref) else: self.assertFalse(exclude_list_del.called) def test_update_port_no_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port['port']['id'], True, 3) # delete vnic from the port self._del_vnic_from_port(port['port']['id'], True) def test_update_multiple_port_no_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port1 = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port1['port']['id'], True, 3) # create another compute port without port security on the same device port2 = self._create_compute_port('net2', device_id, False) # add vnic to the port (no need to add to exclude list again) self._add_vnic_to_port(port2['port']['id'], False, 4) # delete vnics from the port self._del_vnic_from_port(port1['port']['id'], False) self._del_vnic_from_port(port2['port']['id'], True) def test_update_mixed_port_no_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port1 = self._create_compute_port('net1', device_id, True) # add vnic to the port self._add_vnic_to_port(port1['port']['id'], False, 3) irrelevant_device_id = _uuid() # create a compute port without port security for a different device port2 = self._create_compute_port('net1', irrelevant_device_id, True) # add vnic to the port self._add_vnic_to_port(port2['port']['id'], False, 3) # create another compute port without port security on the same device port3 = self._create_compute_port('net2', device_id, False) # add vnic to the port (no need to add to exclude list again) self._add_vnic_to_port(port3['port']['id'], True, 4) # delete vnics from the port self._del_vnic_from_port(port1['port']['id'], False) self._del_vnic_from_port(port3['port']['id'], True) self._del_vnic_from_port(port2['port']['id'], False) def test_delete_port_no_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port['port']['id'], True, 3) # delete port with the vnic self._del_port_with_vnic(port['port']['id'], True) def test_delete_multiple_port_no_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port1 = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port1['port']['id'], True, 3) # create another compute port without port security on the same device port2 = self._create_compute_port('net2', device_id, False) # add vnic to the port (no need to add to exclude list again) self._add_vnic_to_port(port2['port']['id'], False, 4) # delete ports with the vnics self._del_port_with_vnic(port2['port']['id'], False) self._del_port_with_vnic(port1['port']['id'], True) def test_detach_port_no_sec(self): device_id = _uuid() # create a compute port without port security port = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port['port']['id'], True, 3) # detach the port with mock.patch.object( self.fc2, 'inactivate_vnic_assigned_addresses') as mock_inactivte: self._del_vnic_from_port(port['port']['id'], True) # inactivate spoofguard should not be called self.assertFalse(mock_inactivte.called) def test_detach_port_with_sec(self): device_id = _uuid() # create a compute port without port security port = self._create_compute_port('net1', device_id, True) # add vnic to the port self._add_vnic_to_port(port['port']['id'], False, 3) # detach the port with mock.patch.object( self.fc2, 'inactivate_vnic_assigned_addresses') as mock_inactivte: self._del_vnic_from_port(port['port']['id'], False) # inactivate spoofguard should be called self.assertTrue(mock_inactivte.called) def _toggle_port_security(self, port_id, enable_port_security, update_exclude): """Enable/disable port security on a port, and verify that the exclude list was updated as expected """ plugin = self._get_core_plugin_with_dvs() vm_moref = 'dummy_moref' data = {'port': {'port_security_enabled': enable_port_security}} with mock.patch.object(plugin._vcm, 'get_vm_moref', return_value=vm_moref): if enable_port_security: with mock.patch.object( plugin.nsx_v.vcns, 'delete_vm_from_exclude_list') as exclude_list_del: self.new_update_request( 'ports', data, port_id).get_response(self.api) if update_exclude: # make sure the vm was added to the exclude list exclude_list_del.assert_called_once_with(vm_moref) else: self.assertFalse(exclude_list_del.called) else: with mock.patch.object( plugin.nsx_v.vcns, 'add_vm_to_exclude_list') as exclude_list_add: self.new_update_request( 'ports', data, port_id).get_response(self.api) if update_exclude: # make sure the vm was added to the exclude list exclude_list_add.assert_called_once_with(vm_moref) else: self.assertFalse(exclude_list_add.called) def test_update_port_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port['port']['id'], True, 3) # enable port security self._toggle_port_security(port['port']['id'], True, True) # disable port security self._toggle_port_security(port['port']['id'], False, True) # delete vnic from the port self._del_vnic_from_port(port['port']['id'], True) def test_update_multiple_port_security_with_vnic(self): device_id = _uuid() # create a compute port without port security port1 = self._create_compute_port('net1', device_id, False) # add vnic to the port self._add_vnic_to_port(port1['port']['id'], True, 3) # create another compute port without port security port2 = self._create_compute_port('net2', device_id, False) # add vnic to the port self._add_vnic_to_port(port2['port']['id'], False, 4) # enable port security on both ports self._toggle_port_security(port1['port']['id'], True, False) self._toggle_port_security(port2['port']['id'], True, True) # disable port security on both ports self._toggle_port_security(port1['port']['id'], False, True) self._toggle_port_security(port2['port']['id'], False, False) def test_service_insertion(self): # init the plugin mocks p = directory.get_plugin() self.fc2.add_member_to_security_group = ( mock.Mock().add_member_to_security_group) self.fc2.remove_member_from_security_group = ( mock.Mock().remove_member_from_security_group) # mock the service insertion handler p._si_handler = mock.Mock() p._si_handler.enabled = True p._si_handler.sg_id = '11' # create a compute port with port security device_id = _uuid() port = self._create_compute_port('net1', device_id, True) # add vnic to the port, and verify that the port was added to the # service insertion security group vnic_id = 3 vnic_index = '%s.%03d' % (device_id, vnic_id) self.fc2.add_member_to_security_group.reset_mock() self._add_vnic_to_port(port['port']['id'], False, vnic_id) self.fc2.add_member_to_security_group.assert_any_call( p._si_handler.sg_id, vnic_index) # disable the port security and make sure it is removed from the # security group self.fc2.remove_member_from_security_group.reset_mock() self._toggle_port_security(port['port']['id'], False, True) self.fc2.remove_member_from_security_group.assert_any_call( p._si_handler.sg_id, vnic_index) def test_service_insertion_notify(self): # create a compute ports with/without port security device_id = _uuid() # create 2 compute ports with port security port1 = self._create_compute_port('net1', device_id, True) self._add_vnic_to_port(port1['port']['id'], False, 1) port2 = self._create_compute_port('net2', device_id, True) self._add_vnic_to_port(port2['port']['id'], False, 2) # create 1 compute port without port security port3 = self._create_compute_port('net3', device_id, False) self._add_vnic_to_port(port3['port']['id'], True, 3) # init the plugin mocks p = directory.get_plugin() self.fc2.add_member_to_security_group = ( mock.Mock().add_member_to_security_group) # call the function (that should be called from the flow classifier # driver) and verify it adds all relevant ports to the group # Since it uses spawn_n, we should mock it. orig_spawn = c_utils.spawn_n c_utils.spawn_n = mock.Mock(side_effect=lambda f, x: f(x, None)) p.add_vms_to_service_insertion(sg_id='aaa') # back to normal c_utils.spawn_n = orig_spawn self.assertEqual(2, self.fc2.add_member_to_security_group.call_count) def test_toggle_non_compute_port_security(self): # create a network without port security res = self._create_network('json', 'net1', True) net = self.deserialize('json', res) # create a port with this network and a device res = self._create_port('json', net['network']['id'], arg_list=('port_security_enabled',), port_security_enabled=True) port = self.deserialize('json', res) port_id = port['port']['id'] # Disable port security data = {'port': {'port_security_enabled': False}} updated_port = self.deserialize( 'json', self.new_update_request('ports', data, port_id).get_response(self.api)) self.assertFalse(updated_port['port']['port_security_enabled']) shown_port = self.deserialize( 'json', self.new_show_request('ports', port_id).get_response(self.api)) self.assertFalse(shown_port['port']['port_security_enabled']) # Enable port security data = {'port': {'port_security_enabled': True}} updated_port = self.deserialize( 'json', self.new_update_request('ports', data, port_id).get_response(self.api)) self.assertTrue(updated_port['port']['port_security_enabled']) shown_port = self.deserialize( 'json', self.new_show_request('ports', port_id).get_response(self.api)) self.assertTrue(shown_port['port']['port_security_enabled']) class TestSharedRouterTestCase(L3NatTest, L3NatTestCaseBase, test_l3_plugin.L3NatTestCaseMixin, NsxVPluginV2TestCase): def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, set_context=False, arg_list=None, **kwargs): tenant_id = tenant_id or _uuid() data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up: data['router']['admin_state_up'] = admin_state_up for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): # Arg must be present and not empty if arg in kwargs and kwargs[arg]: data['router'][arg] = kwargs[arg] data['router']['router_type'] = kwargs.get('router_type', 'shared') router_req = self.new_create_request('routers', data, fmt) if set_context and tenant_id: # create a specific auth context for this request router_req.environ['neutron.context'] = context.Context( '', tenant_id) return router_req.get_response(self.ext_api) @mock.patch.object(edge_utils.EdgeManager, 'update_interface_addr') def test_router_add_interface_multiple_ipv6_subnets_same_net(self, mock): super(TestSharedRouterTestCase, self).test_router_add_interface_multiple_ipv6_subnets_same_net() def test_router_create_with_no_edge(self): name = 'router1' tenant_id = _uuid() expected_value = [('name', name), ('tenant_id', tenant_id), ('admin_state_up', True), ('status', 'ACTIVE'), ('external_gateway_info', None)] with self.router(name='router1', admin_state_up=True, tenant_id=tenant_id) as router: for k, v in expected_value: self.assertEqual(router['router'][k], v) self.assertEqual( [], self.plugin_instance.edge_manager.get_routers_on_same_edge( context.get_admin_context(), router['router']['id'])) def test_router_create_with_size_fail_at_backend(self): data = {'router': { 'tenant_id': 'whatever', 'router_type': 'shared', 'router_size': 'large'}} router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) msg = ('Bad router request: ' 'Cannot specify router-size for shared router.') self.assertEqual("BadRequest", router['NeutronError']['type']) self.assertEqual(msg, router['NeutronError']['message']) def test_router_create_with_gwinfo_with_no_edge(self): with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as s: data = {'router': {'tenant_id': 'whatever'}} data['router']['name'] = 'router1' data['router']['external_gateway_info'] = { 'network_id': s['subnet']['network_id']} router_req = self.new_create_request('routers', data, self.fmt) res = router_req.get_response(self.ext_api) router = self.deserialize(self.fmt, res) self.assertEqual( s['subnet']['network_id'], (router['router']['external_gateway_info'] ['network_id'])) self.assertEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), router['router']['id'])) def test_router_update_with_routes_fail(self): """Shared router currently does not support static routes """ with self.router() as r: router_id = r['router']['id'] body = self._show('routers', router_id) body['router']['routes'] = [{'destination': '5.5.5.5/32', 'nexthop': '6.6.6.6'}] self._update('routers', router_id, body, expected_code=400, neutron_context=context.get_admin_context()) def test_router_update_gateway_with_no_edge(self): with self.router() as r: with self.subnet() as s1: with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as s2: self._set_net_external(s1['subnet']['network_id']) try: self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s1['subnet']['network_id']) self.assertEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) # Plug network with external mapping self._set_net_external(s2['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s2['subnet']['network_id']) self.assertEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) finally: # Cleanup self._remove_external_gateway_from_router( r['router']['id'], s2['subnet']['network_id']) def test_router_update_gateway_with_existing_floatingip_with_edge(self): with self._create_l3_ext_network() as net: with self.subnet(network=net, enable_dhcp=False) as subnet: with self.floatingip_with_assoc() as fip: self._add_external_gateway_to_router( fip['floatingip']['router_id'], subnet['subnet']['network_id'], expected_code=webob.exc.HTTPConflict.code) self.assertNotEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), fip['floatingip']['router_id'])) def test_router_set_gateway_with_interfaces_with_edge(self): with self.router() as r, self.subnet() as s1: self._set_net_external(s1['subnet']['network_id']) try: self._add_external_gateway_to_router( r['router']['id'], s1['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = (body['router'] ['external_gateway_info']['network_id']) self.assertEqual(net_id, s1['subnet']['network_id']) self.assertEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) with self.subnet(cidr='11.0.0.0/24') as s11: with self.subnet(cidr='12.0.0.0/24') as s12: self._router_interface_action('add', r['router']['id'], s11['subnet']['id'], None) self._router_interface_action('add', r['router']['id'], s12['subnet']['id'], None) self.assertIsNotNone( self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) self._router_interface_action('remove', r['router']['id'], s11['subnet']['id'], None) self.assertIsNotNone( self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) self._router_interface_action('remove', r['router']['id'], s12['subnet']['id'], None) self.assertEqual( [], self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r['router']['id'])) finally: # Cleanup self._remove_external_gateway_from_router( r['router']['id'], s1['subnet']['network_id']) @mock.patch.object(edge_utils, "update_firewall") def test_routers_set_gateway_with_nosnat(self, mock): expected_fw1 = [{'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': [], 'destination_ip_address': []}] expected_fw2 = [{'action': 'allow', 'enabled': True, 'name': 'Subnet Rule', 'source_ip_address': [], 'destination_ip_address': []}] nosnat_fw1 = [{'action': 'allow', 'enabled': True, 'name': 'No SNAT Rule', 'source_vnic_groups': ["external"], 'destination_ip_address': []}] nosnat_fw2 = [{'action': 'allow', 'enabled': True, 'name': 'No SNAT Rule', 'source_vnic_groups': ["external"], 'destination_ip_address': []}] with self.router() as r1, self.router() as r2,\ self.subnet() as ext_subnet,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2: self._set_net_external(ext_subnet['subnet']['network_id']) self._router_interface_action( 'add', r1['router']['id'], s1['subnet']['id'], None) expected_fw1[0]['source_ip_address'] = ['11.0.0.0/24'] expected_fw1[0]['destination_ip_address'] = ['11.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list( self._get_md_proxy_fw_rules() + expected_fw1), self._recursive_sort_list(fw_rules)) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) self._add_external_gateway_to_router( r1['router']['id'], ext_subnet['subnet']['network_id']) self._add_external_gateway_to_router( r2['router']['id'], ext_subnet['subnet']['network_id']) expected_fw2[0]['source_ip_address'] = ['12.0.0.0/24'] expected_fw2[0]['destination_ip_address'] = ['12.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list( self._get_md_proxy_fw_rules() + expected_fw1 + expected_fw2), self._recursive_sort_list(fw_rules)) self._update_router_enable_snat( r1['router']['id'], ext_subnet['subnet']['network_id'], False) nosnat_fw1[0]['destination_ip_address'] = ['11.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list( self._get_md_proxy_fw_rules() + expected_fw1 + expected_fw2 + nosnat_fw1), self._recursive_sort_list(fw_rules)) self._update_router_enable_snat( r2['router']['id'], ext_subnet['subnet']['network_id'], False) nosnat_fw2[0]['destination_ip_address'] = ['12.0.0.0/24'] fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list( self._get_md_proxy_fw_rules() + expected_fw1 + expected_fw2 + nosnat_fw1 + nosnat_fw2), self._recursive_sort_list(fw_rules)) self._update_router_enable_snat( r2['router']['id'], ext_subnet['subnet']['network_id'], True) fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list( self._get_md_proxy_fw_rules() + expected_fw1 + expected_fw2 + nosnat_fw1), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list( self._get_md_proxy_fw_rules() + expected_fw1 + nosnat_fw1), self._recursive_sort_list(fw_rules)) self._remove_external_gateway_from_router( r1['router']['id'], ext_subnet['subnet']['network_id']) fw_rules = mock.call_args[0][3]['firewall_rule_list'] self.assertEqual( self._recursive_sort_list( self._get_md_proxy_fw_rules() + expected_fw1), self._recursive_sort_list(fw_rules)) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._remove_external_gateway_from_router( r2['router']['id'], ext_subnet['subnet']['network_id']) def test_routers_with_interface_on_same_edge(self): with self.router() as r1, self.router() as r2,\ self.subnet(cidr='11.0.0.0/24') as s11,\ self.subnet(cidr='12.0.0.0/24') as s12: self._router_interface_action('add', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s12['subnet']['id'], None) routers_expected = [r1['router']['id'], r2['router']['id']] routers_1 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r1['router']['id'])) self.assertEqual(set(routers_expected), set(routers_1)) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(set(routers_expected), set(routers_2)) self._router_interface_action('remove', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s12['subnet']['id'], None) def test_routers_with_overlap_interfaces(self): with self.router() as r1, self.router() as r2,\ self.subnet(cidr='11.0.0.0/24') as s11,\ self.subnet(cidr='11.0.0.0/24') as s12: self._router_interface_action('add', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s12['subnet']['id'], None) r1_expected = [r1['router']['id']] routers_1 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r1['router']['id'])) self.assertEqual(r1_expected, routers_1) r2_expected = [r2['router']['id']] routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(r2_expected, routers_2) self._router_interface_action('remove', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s12['subnet']['id'], None) def test_routers_with_overlap_interfaces_with_migration(self): with self.router() as r1, self.router() as r2,\ self.subnet(cidr='11.0.0.0/24') as s11,\ self.subnet(cidr='12.0.0.0/24') as s12,\ self.subnet(cidr='11.0.0.0/24') as s13: self._router_interface_action('add', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s12['subnet']['id'], None) r1_expected = [r1['router']['id'], r2['router']['id']] routers_1 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r1['router']['id'])) self.assertEqual(set(r1_expected), set(routers_1)) self._router_interface_action('add', r2['router']['id'], s13['subnet']['id'], None) r1_expected = [r1['router']['id']] routers_1 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r1['router']['id'])) self.assertEqual(r1_expected, routers_1) self._router_interface_action('remove', r1['router']['id'], s11['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s12['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s13['subnet']['id'], None) def test_routers_with_different_subnet_on_same_network(self): with self.router() as r1, self.router() as r2,\ self.network() as net,\ self.subnet(network=net, cidr='12.0.0.0/24') as s1,\ self.subnet(network=net, cidr='13.0.0.0/24') as s2: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(1, len(routers_2)) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) def test_routers_with_different_subnet_on_same_network_migration(self): with self.router() as r1, self.router() as r2, self.network() as net,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(network=net, cidr='12.0.0.0/24') as s2,\ self.subnet(network=net, cidr='13.0.0.0/24') as s3: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(2, len(routers_2)) self._router_interface_action('add', r2['router']['id'], s3['subnet']['id'], None) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(2, len(routers_2)) self._router_interface_action('remove', r2['router']['id'], s3['subnet']['id'], None) self._router_interface_action('add', r1['router']['id'], s3['subnet']['id'], None) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(1, len(routers_2)) self._router_interface_action('remove', r1['router']['id'], s3['subnet']['id'], None) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) def test_routers_set_same_gateway_on_same_edge(self): with self.router() as r1, self.router() as r2,\ self.network() as ext_net,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2,\ self.subnet(network=ext_net, cidr='13.0.0.0/24'): self._set_net_external(ext_net['network']['id']) self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) self._add_external_gateway_to_router( r1['router']['id'], ext_net['network']['id']) self._add_external_gateway_to_router( r2['router']['id'], ext_net['network']['id']) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(2, len(routers_2)) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) self._remove_external_gateway_from_router( r1['router']['id'], ext_net['network']['id']) self._remove_external_gateway_from_router( r2['router']['id'], ext_net['network']['id']) def test_routers_set_different_gateway_on_different_edge(self): with self.router() as r1, self.router() as r2,\ self.network() as ext1, self.network() as ext2,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2,\ self.subnet(network=ext1, cidr='13.0.0.0/24'),\ self.subnet(network=ext2, cidr='14.0.0.0/24'): self._set_net_external(ext1['network']['id']) self._set_net_external(ext2['network']['id']) self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) self._add_external_gateway_to_router( r1['router']['id'], ext1['network']['id']) self._add_external_gateway_to_router( r2['router']['id'], ext1['network']['id']) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(2, len(routers_2)) self._add_external_gateway_to_router( r2['router']['id'], ext2['network']['id']) routers_2 = (self.plugin_instance.edge_manager. get_routers_on_same_edge( context.get_admin_context(), r2['router']['id'])) self.assertEqual(1, len(routers_2)) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) self._remove_external_gateway_from_router( r1['router']['id'], ext1['network']['id']) self._remove_external_gateway_from_router( r2['router']['id'], ext2['network']['id']) def test_get_available_and_conflicting_ids_with_no_conflict(self): with self.router() as r1, self.router() as r2,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) router_driver = (self.plugin_instance._router_managers. get_tenant_router_driver(context, 'shared')) available_router_ids, conflict_router_ids = ( router_driver._get_available_and_conflicting_ids( context.get_admin_context(), r1['router']['id'])) self.assertIn(r2['router']['id'], available_router_ids) self.assertEqual(0, len(conflict_router_ids)) def test_get_available_and_conflicting_ids_with_conflict(self): with self.router() as r1, self.router() as r2,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='11.0.0.0/24') as s2: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) router_driver = (self.plugin_instance._router_managers. get_tenant_router_driver(context, 'shared')) available_router_ids, conflict_router_ids = ( router_driver._get_available_and_conflicting_ids( context.get_admin_context(), r1['router']['id'])) self.assertIn(r2['router']['id'], conflict_router_ids) self.assertEqual(0, len(available_router_ids)) def test_get_available_and_conflicting_ids_with_diff_gw(self): with self.router() as r1, self.router() as r2,\ self.network() as ext1, self.network() as ext2,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2,\ self.subnet(network=ext1, cidr='13.0.0.0/24'),\ self.subnet(network=ext2, cidr='14.0.0.0/24'): self._set_net_external(ext1['network']['id']) self._set_net_external(ext2['network']['id']) self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) self._add_external_gateway_to_router( r1['router']['id'], ext1['network']['id']) self._add_external_gateway_to_router( r2['router']['id'], ext2['network']['id']) router_driver = (self.plugin_instance._router_managers. get_tenant_router_driver(context, 'shared')) available_router_ids, conflict_router_ids = ( router_driver._get_available_and_conflicting_ids( context.get_admin_context(), r1['router']['id'])) self.assertIn(r2['router']['id'], conflict_router_ids) self.assertEqual(0, len(available_router_ids)) def test_get_available_and_conflicting_ids_with_tenants(self): cfg.CONF.set_override('share_edges_between_tenants', False, group="nsxv") with self.router(tenant_id='fake1') as r1,\ self.router(tenant_id='fake2') as r2,\ self.subnet(cidr='11.0.0.0/24') as s1,\ self.subnet(cidr='12.0.0.0/24') as s2: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) router_driver = (self.plugin_instance._router_managers. get_tenant_router_driver(context, 'shared')) available_router_ids, conflict_router_ids = ( router_driver._get_available_and_conflicting_ids( context.get_admin_context(), r1['router']['id'])) self.assertIn(r2['router']['id'], conflict_router_ids) self.assertEqual(0, len(available_router_ids)) def test_migrate_shared_router_to_exclusive(self): with self.router(name='r7') as r1, \ self.subnet(cidr='11.0.0.0/24') as s1: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) # update the router type: router_id = r1['router']['id'] self._update('routers', router_id, {'router': {'router_type': 'exclusive'}}) # get the updated router and check it's type body = self._show('routers', router_id) self.assertEqual('exclusive', body['router']['router_type']) def _test_create_router_with_az_hint(self, with_hint): # init the availability zones in the plugin az_name = 'az7' set_az_in_config(az_name) p = directory.get_plugin() p._availability_zones_data = nsx_az.NsxVAvailabilityZones() # create a router with/without hints router = {'router': {'admin_state_up': True, 'name': 'e161be1d-0d0d-4046-9823-5a593d94f72c', 'tenant_id': 'FAKE_TENANT', 'router_type': 'shared'}} if with_hint: router['router']['availability_zone_hints'] = [az_name] returned_router = p.create_router(context.get_admin_context(), router) # availability zones is still empty because the router is not attached if with_hint: self.assertEqual([az_name], returned_router['availability_zone_hints']) else: self.assertEqual([], returned_router['availability_zone_hints']) self.assertEqual([], returned_router['availability_zones']) # Add interface so the router will be attached to an edge with self.subnet() as s1: router_id = returned_router['id'] self._router_interface_action('add', router_id, s1['subnet']['id'], None) edge_id = edge_utils.get_router_edge_id( context.get_admin_context(), router_id) res_az = nsxv_db.get_edge_availability_zone( context.get_admin_context().session, edge_id) expected_az = az_name if with_hint else 'default' self.assertEqual(expected_az, res_az) def test_create_router_with_az_hint(self): self._test_create_router_with_az_hint(True) def test_create_router_without_az_hint(self): self._test_create_router_with_az_hint(False) def test_router_update_with_size_fail(self): """Shared router currently does not support router size update """ with self.router() as r: router_id = r['router']['id'] body = self._show('routers', router_id) body['router']['router_size'] = 'small' self._update('routers', router_id, body, expected_code=400, neutron_context=context.get_admin_context()) class TestRouterFlavorTestCase(extension.ExtensionTestCase, test_l3_plugin.L3NatTestCaseMixin, L3NatTest ): FLAVOR_PLUGIN = 'neutron.services.flavors.flavors_plugin.FlavorsPlugin' def _mock_add_flavor_id(dummy, router_res, router_db): # this function is a registered callback so we can't mock it # in a regular way. # need to change behavior for this test suite only, since # there is no "unregister_dict_extend_funcs" if router_res['name'] == 'router_with_flavor': router_res['flavor_id'] = 'raspberry' def setUp(self, plugin=PLUGIN_NAME): # init the core plugin and flavors plugin service_plugins = {plugin_const.FLAVORS: self.FLAVOR_PLUGIN} super(TestRouterFlavorTestCase, self).setUp( plugin=plugin, service_plugins=service_plugins) self.plugin = directory.get_plugin() self.plugin._flv_plugin = directory.get_plugin(plugin_const.FLAVORS) self.plugin._process_router_flavor_create = mock.Mock() resource_extend.register_funcs( l3_apidef.ROUTERS, [self._mock_add_flavor_id]) # init the availability zones self.az_name = 'az7' set_az_in_config(self.az_name) self.plugin._availability_zones_data = ( nsx_az.NsxVAvailabilityZones()) self._iteration = 1 def assertSyslogConfig(self, expected): """Verify syslog was updated in fake driver Test assumes edge ids are created sequentially starting from edge-1 """ edge_id = ('edge-%s' % self._iteration) actual = self.plugin.nsx_v.vcns.get_edge_syslog(edge_id)[1] if not expected: # test expects no syslog to be configured self.assertNotIn('serverAddresses', actual) return self.assertEqual(expected['protocol'], actual['protocol']) self.assertEqual(expected['server_ip'], actual['serverAddresses']['ipAddress'][0]) if 'server2_ip' in expected: self.assertEqual(expected['server2_ip'], actual['serverAddresses']['ipAddress'][1]) def _test_router_create_with_flavor( self, metainfo, expected_data, create_type=None, create_size=None, create_az=None): router_data = {'flavor_id': 'dummy', 'tenant_id': 'whatever', 'name': 'router_with_flavor', 'admin_state_up': True} if create_type is not None: router_data['router_type'] = create_type if create_size is not None: router_data['router_size'] = create_size if create_az is not None: router_data['availability_zone_hints'] = [create_az] flavor_data = {'service_type': plugin_const.L3, 'enabled': True, 'service_profiles': ['profile_id']} # Mock the flavors plugin with mock.patch(self.FLAVOR_PLUGIN + '.get_flavor', return_value=flavor_data): with mock.patch(self.FLAVOR_PLUGIN + '.get_service_profile', return_value={'metainfo': metainfo}): router = self.plugin.create_router( context.get_admin_context(), {'router': router_data}) # syslog data is not part of router config # and needs to be validated separately if 'syslog' in expected_data.keys(): self.assertSyslogConfig(expected_data['syslog']) for key, expected_val in expected_data.items(): if key != 'syslog': self.assertEqual(expected_val, router[key]) def test_router_create_with_flavor_different_sizes(self): """Create exclusive router with size in flavor """ for size in ['compact', 'large', 'xlarge', 'quadlarge']: metainfo = "{'router_size':'%s'}" % size expected_router = {'router_type': 'exclusive', 'router_size': size} self._test_router_create_with_flavor( metainfo, expected_router, create_type='exclusive') def test_router_create_with_flavor_ex_different_sizes(self): """Create exclusive router with size and type in flavor """ for size in ['compact', 'large', 'xlarge', 'quadlarge']: metainfo = "{'router_size':'%s','router_type':'exclusive'}" % size expected_router = {'router_type': 'exclusive', 'router_size': size} self._test_router_create_with_flavor( metainfo, expected_router) def test_router_create_with_flavor_az(self): """Create exclusive router with availability zone in flavor """ metainfo = "{'availability_zone_hints':'%s'}" % self.az_name expected_router = {'router_type': 'exclusive', 'availability_zone_hints': [self.az_name], 'distributed': False} self._test_router_create_with_flavor( metainfo, expected_router, create_type='exclusive') def test_router_create_with_flavor_shared(self): """Create shared router with availability zone and type in flavor """ metainfo = ("{'availability_zone_hints':'%s'," "'router_type':'shared'}" % self.az_name) expected_router = {'router_type': 'shared', 'availability_zone_hints': [self.az_name], 'distributed': False} self._test_router_create_with_flavor( metainfo, expected_router) def test_router_create_with_flavor_distributed(self): """Create distributed router with availability zone and type in flavor """ metainfo = ("{'availability_zone_hints':'%s'," "'distributed':true}" % self.az_name) expected_router = {'distributed': True, 'availability_zone_hints': [self.az_name]} self._test_router_create_with_flavor( metainfo, expected_router) def test_router_flavor_error_parsing(self): """Use the wrong format for the flavor metainfo It should be ignored, and default values are used """ metainfo = "xxx" expected_router = {'distributed': False, 'router_type': 'shared'} self._test_router_create_with_flavor( metainfo, expected_router) def test_router_create_with_syslog_flavor(self): """Create exclusive router with syslog config in flavor""" # Basic config - server IP only ip = '1.1.1.10' expected_router = {'router_type': 'exclusive', 'syslog': {'protocol': 'tcp', 'server_ip': ip}} metainfo = ("{'router_type':'exclusive'," "'syslog':{'server_ip':'%s'}}" % ip) self._iteration = 1 self._test_router_create_with_flavor( metainfo, expected_router) # Advanced config - secondary server IP, protocol and loglevel ip2 = '1.1.1.11' for protocol in ['tcp', 'udp']: for loglevel in ['none', 'debug', 'info', 'warning', 'error']: expected_router = {'router_type': 'exclusive', 'syslog': {'protocol': protocol, 'server_ip': ip, 'server2_ip': ip2}} metainfo = ("{'router_type':'exclusive'," "'syslog':{'server_ip':'%s', 'server2_ip':'%s'," "'protocol':'%s', 'log_level':'%s'}}" % (ip, ip2, protocol, loglevel)) self._iteration += 1 self._test_router_create_with_flavor( metainfo, expected_router) def test_router_create_with_syslog_flavor_error(self): """Create router based on flavor with badly formed syslog metadata Syslog metadata should be ignored """ expected_router = {'router_type': 'exclusive', 'syslog': None} self._iteration = 0 bad_defs = ("'server_ip':'1.1.1.1', 'protocol':'http2'", "'server2_ip':'2.2.2.2'", "'protocol':'tcp'", "'server_ip':'1.1.1.1', 'protocol':'udp','log_level':'pro'", "'log_level':'error'") for meta in bad_defs: metainfo = "{'router_type':'exclusive', 'syslog': {%s}}" % meta self._iteration += 1 self._test_router_create_with_flavor( metainfo, expected_router) def _test_router_create_with_flavor_error( self, metainfo, error_code, create_type=None, create_size=None, create_az=None): router_data = {'flavor_id': 'dummy', 'tenant_id': 'whatever', 'name': 'test_router', 'admin_state_up': True} if create_type is not None: router_data['router_type'] = create_type if create_size is not None: router_data['router_size'] = create_size if create_az is not None: router_data['availability_zone_hints'] = [create_az] flavor_data = {'service_type': plugin_const.L3, 'enabled': True, 'service_profiles': ['profile_id']} # Mock the flavors plugin with mock.patch(self.FLAVOR_PLUGIN + '.get_flavor', return_value=flavor_data): with mock.patch(self.FLAVOR_PLUGIN + '.get_service_profile', return_value={'metainfo': metainfo}): self.assertRaises(error_code, self.plugin.create_router, context.get_admin_context(), {'router': router_data}) def test_router_flavor_size_conflict(self): metainfo = "{'router_size':'large','router_type':'exclusive'}" self._test_router_create_with_flavor_error( metainfo, n_exc.BadRequest, create_size='compact') def test_router_flavor_type_conflict(self): metainfo = "{'router_size':'large','router_type':'exclusive'}" self._test_router_create_with_flavor_error( metainfo, n_exc.BadRequest, create_type='shared') def test_router_flavor_az_conflict(self): metainfo = ("{'availability_zone_hints':'%s'," "'distributed':true}" % self.az_name) self._test_router_create_with_flavor_error( metainfo, n_exc.BadRequest, create_az=['az2']) class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt, NsxVPluginV2TestCase): def setUp(self, plugin=None): super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp( plugin=PLUGIN_NAME) def test_create_port_with_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_extradhcpopts_ipv6_opt_version(self): self.skipTest('No DHCP v6 Support yet') def test_create_port_with_extradhcpopts_ipv4_opt_version(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0', 'ip_version': 4}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123', 'ip_version': 4}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_update_port_with_extradhcpopts_with_same(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = opt_list[:] for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_additional_extradhcpopt(self): opt_list = [{'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = copy.deepcopy(opt_list) expected_opts.append(upd_opts[0]) self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'changeme.0'}] expected_opts = copy.deepcopy(opt_list) for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_extradhcpopt_delete(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': None}] expected_opts = [] expected_opts = [opt for opt in opt_list if opt['opt_name'] != 'bootfile-name'] self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_adding_extradhcpopts(self): opt_list = [] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] expected_opts = copy.deepcopy(upd_opts) self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) def test_update_port_with_blank_name_extradhcpopt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': ' ', 'opt_value': 'pxelinux.0'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_port_with_empty_router_extradhcpopts(self): self.skipTest('No DHCP support option for router') def test_update_port_with_blank_router_extradhcpopt(self): self.skipTest('No DHCP support option for router') def test_update_port_with_extradhcpopts_ipv6_change_value(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_with_extradhcpopts_add_another_ver_opt(self): self.skipTest('No DHCP v6 Support yet') def test_update_port_with_blank_string_extradhcpopt(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': 'pxelinux.0'}, {'opt_name': 'tftp-server', 'opt_value': '123.123.123.123'}] upd_opts = [{'opt_name': 'bootfile-name', 'opt_value': ' '}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: update_port = {'port': {edo_ext.EXTRADHCPOPTS: upd_opts}} req = self.new_update_request('ports', update_port, port['port']['id']) res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPBadRequest.code, res.status_int) def test_create_port_with_none_extradhcpopts(self): opt_list = [{'opt_name': 'bootfile-name', 'opt_value': None}, {'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] expected = [{'opt_name': 'tftp-server-address', 'opt_value': '123.123.123.123'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(expected, port['port'][edo_ext.EXTRADHCPOPTS]) def test_create_port_with_extradhcpopts_codes(self): opt_list = [{'opt_name': '85', 'opt_value': 'cafecafe'}] params = {edo_ext.EXTRADHCPOPTS: opt_list, 'arg_list': (edo_ext.EXTRADHCPOPTS,)} with self.port(**params) as port: self._check_opts(opt_list, port['port'][edo_ext.EXTRADHCPOPTS]) def test_update_port_with_extradhcpopts_codes(self): opt_list = [{'opt_name': '85', 'opt_value': 'cafecafe'}] upd_opts = [{'opt_name': '85', 'opt_value': '01010101'}] expected_opts = copy.deepcopy(opt_list) for i in expected_opts: if i['opt_name'] == upd_opts[0]['opt_name']: i['opt_value'] = upd_opts[0]['opt_value'] break self._test_update_port_with_extradhcpopts(opt_list, upd_opts, expected_opts) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2382548 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/vshield/0000755000175000017500000000000000000000000024304 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/vshield/__init__.py0000644000175000017500000000000000000000000026403 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/vshield/fake_vcns.py0000644000175000017500000016326100000000000026626 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import xml.etree.ElementTree as ET import netaddr from oslo_serialization import jsonutils from oslo_utils import uuidutils import six from vmware_nsx._i18n import _ from vmware_nsx.plugins.nsx_v.vshield.common import constants from vmware_nsx.plugins.nsx_v.vshield.common import exceptions SECTION_LOCATION_HEADER = '/api/4.0/firewall/globalroot-0/config/%s/%s' class FakeVcns(object): errors = { 303: exceptions.ResourceRedirect, 400: exceptions.RequestBad, 403: exceptions.Forbidden, 404: exceptions.ResourceNotFound, 415: exceptions.MediaTypeUnsupport, 503: exceptions.ServiceUnavailable } def __init__(self, unique_router_name=True): self._jobs = {} self._job_idx = 0 self._edges = {} self._edge_idx = 0 self._lswitches = {} self._unique_router_name = unique_router_name self._fake_nsx_api = None self.fake_firewall_dict = {} self.temp_firewall = { "firewallRules": { "firewallRules": [] } } self.fake_ipsecvpn_dict = {} self.temp_ipsecvpn = { 'featureType': "ipsec_4.0", 'enabled': True, 'sites': {'sites': []}} self._fake_virtualservers_dict = {} self._fake_pools_dict = {} self._fake_monitors_dict = {} self._fake_app_profiles_dict = {} self._fake_loadbalancer_config = {} self._fake_virtual_wires = {} self._virtual_wire_id = 0 self._fake_portgroups = {} self._portgroup_id = 0 self._securitygroups = {'ids': 0, 'names': set()} self._sections = {'section_ids': 0, 'rule_ids': 0, 'names': set()} self._dhcp_bindings = {} self._spoofguard_policies = [] self._ipam_pools = {} def do_request(self, method, uri, params=None, format='json', **kwargs): pass def set_fake_nsx_api(self, fake_nsx_api): self._fake_nsx_api = fake_nsx_api def _validate_edge_name(self, name): for edge_id, edge in six.iteritems(self._edges): if edge['name'] == name: return False return True def deploy_edge(self, request): if (self._unique_router_name and not self._validate_edge_name(request['name'])): header = { 'status': 400 } msg = ('Edge name should be unique for tenant. Edge %s ' 'already exists for default tenant.') % request['name'] response = { 'details': msg, 'errorCode': 10085, 'rootCauseString': None, 'moduleName': 'vShield Edge', 'errorData': None } return (header, jsonutils.dumps(response)) self._edge_idx = self._edge_idx + 1 edge_id = "edge-%d" % self._edge_idx self._edges[edge_id] = { 'name': request['name'], 'request': request, 'nat_rules': None, 'nat_rule_id': 0, 'interface_index': 1 } header = { 'status': 200, 'location': 'https://host/api/4.0/edges/%s' % edge_id } response = '' return (header, response) def update_edge(self, edge_id, request): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) edge = self._edges[edge_id] edge['name'] = request['name'] header = { 'status': 200 } response = '' return (header, response) def get_edge_id(self, job_id): if job_id not in self._jobs: raise Exception(_("Job %s does not nexist") % job_id) header = { 'status': 200 } response = { 'edgeId': self._jobs[job_id] } return (header, response) def delete_edge(self, edge_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) del self._edges[edge_id] header = { 'status': 200 } response = '' return (header, response) def add_vdr_internal_interface(self, edge_id, interface): interface = interface['interfaces'][0] if not self._edges[edge_id].get('interfaces'): self._edges[edge_id]['interfaces'] = [] index = len(self._edges[edge_id]['interfaces']) interface['index'] = str(index) self._edges[edge_id]['interfaces'].append(interface) header = { 'status': 200 } response = {"interfaces": [{"index": str(index)}]} return (header, response) def get_edge_interfaces(self, edge_id): if not self._edges[edge_id].get('interfaces'): self._edges[edge_id]['interfaces'] = [] header = { 'status': 200 } response = {"interfaces": self._edges[edge_id].get('interfaces', [])} return (header, response) def update_vdr_internal_interface( self, edge_id, interface_index, interface): header = { 'status': 200 } response = '' return (header, response) def get_vdr_internal_interface(self, edge_id, interface_index): response = {} header = { 'status': 200 } for interface in self._edges[edge_id].get('interfaces', []): if int(interface['index']) == int(interface_index): response = interface return (header, response) def delete_vdr_internal_interface(self, edge_id, interface_index): for interface in self._edges[edge_id].get('interfaces', []): if int(interface['index']) == int(interface_index): header = { 'status': 200 } break header = {'status': 404} response = '' return (header, response) def get_interfaces(self, edge_id): header = { 'status': 200 } response = '' return (header, response) def update_interface(self, edge_id, vnic): header = { 'status': 200 } response = '' return (header, response) def delete_interface(self, edge_id, vnic_index): header = { 'status': 200 } response = '' return (header, response) def query_interface(self, edge_id, vnic_index): header = { 'status': 200 } response = { 'label': 'vNic_1', 'name': 'internal1', 'addressGroups': { 'addressGroups': [{'primaryAddress': '1.1.1.1'}]}, 'portgroupId': '1', 'mtu': 1500, 'type': 'trunk', 'subInterfaces': {'subInterfaces': []}, 'isConnected': True } return (header, response) def reconfigure_dhcp_service(self, edge_id, request): header = { 'status': 201 } response = '' return (header, response) def query_dhcp_configuration(self, edge_id): header = { 'status': 200 } response = { "featureType": "dhcp_4.0", "version": 14, "enabled": True, "staticBindings": {"staticBindings": [{ "macAddress": "fa:16:3e:e6:ad:ce", "bindingId": "binding-1"}]}, "ipPools": {"ipPools": []} } return (header, response) def create_dhcp_binding(self, edge_id, request): if not self._dhcp_bindings.get(edge_id): self._dhcp_bindings[edge_id] = {} self._dhcp_bindings[edge_id]['idx'] = 0 binding_idx = self._dhcp_bindings[edge_id]['idx'] binding_idx_str = "binding-" + str(binding_idx) self._dhcp_bindings[edge_id][binding_idx_str] = request self._dhcp_bindings[edge_id]['idx'] = binding_idx + 1 header = { 'status': 200, 'location': '/dhcp/config/bindings/%s' % binding_idx_str } response = '' return (header, response) def delete_dhcp_binding(self, edge_id, binding_id): if binding_id not in self._dhcp_bindings[edge_id]: raise Exception(_("binding %s does not exist") % binding_id) del self._dhcp_bindings[edge_id][binding_id] header = { 'status': 200 } response = '' return (header, response) def get_dhcp_binding(self, edge_id, binding_id): if binding_id not in self._dhcp_bindings[edge_id]: raise Exception(_("binding %s does not exist") % binding_id) response = self._dhcp_bindings[edge_id][binding_id] header = { 'status': 200 } return (header, response) def create_bridge(self, edge_id, request): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) header = { 'status': 204 } response = '' return (header, response) def delete_bridge(self, edge_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) header = { 'status': 204 } response = '' return (header, response) def get_nat_config(self, edge_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) edge = self._edges[edge_id] rules = edge['nat_rules'] if rules is None: rules = { 'rules': { 'natRulesDtos': [] }, 'version': 1 } header = { 'status': 200 } rules['version'] = 1 return (header, rules) def update_nat_config(self, edge_id, nat): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) edge = self._edges[edge_id] max_rule_id = edge['nat_rule_id'] rules = copy.deepcopy(nat) for rule in rules['rules']['natRulesDtos']: rule_id = rule.get('ruleId', 0) if rule_id > max_rule_id: max_rule_id = rule_id for rule in rules['rules']['natRulesDtos']: if 'ruleId' not in rule: max_rule_id = max_rule_id + 1 rule['ruleId'] = max_rule_id edge['nat_rules'] = rules edge['nat_rule_id'] = max_rule_id header = { 'status': 200 } response = '' return (header, response) def delete_nat_rule(self, edge_id, rule_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) edge = self._edges[edge_id] rules = edge['nat_rules'] rule_to_delete = None for rule in rules['rules']['natRulesDtos']: if rule_id == rule['ruleId']: rule_to_delete = rule break if rule_to_delete is None: raise Exception(_("Rule id %d doest not exist") % rule_id) rules['rules']['natRulesDtos'].remove(rule_to_delete) header = { 'status': 200 } response = '' return (header, response) def get_edge_status(self, edge_id): if edge_id not in self._edges: raise Exception(_("Edge %s does not exist") % edge_id) header = { 'status': 200 } response = { 'edgeStatus': 'GREEN' } return (header, response) def get_edge(self, edge_id): if edge_id not in self._edges: raise exceptions.VcnsGeneralException( _("Edge %s does not exist!") % edge_id) header = { 'status': 200 } response = { 'name': 'fake-edge', 'id': edge_id, 'appliances': {'appliances': []} } return (header, response) def get_edges(self): edges = [] for edge_id in self._edges: edges.append({ 'id': edge_id, 'edgeStatus': 'GREEN', 'name': self._edges[edge_id]['name'] }) return edges def get_vdn_switch(self, dvs_id): header = { 'status': 200 } response = { 'name': 'fake-switch', 'id': dvs_id, 'teamingPolicy': 'ETHER_CHANNEL' } return (header, response) def update_vdn_switch(self, switch): header = { 'status': 200 } response = '' return (header, response) def update_routes(self, edge_id, routes): header = { 'status': 200 } response = '' return (header, response) def create_lswitch(self, lsconfig): # The lswitch is created via VCNS API so the fake nsx_api will not # see it. Added to fake nsx_api here. if self._fake_nsx_api: lswitch = self._fake_nsx_api._add_lswitch( jsonutils.dumps(lsconfig)) else: lswitch = lsconfig lswitch['uuid'] = uuidutils.generate_uuid() self._lswitches[lswitch['uuid']] = lswitch header = { 'status': 200 } lswitch['_href'] = '/api/ws.v1/lswitch/%s' % lswitch['uuid'] return (header, lswitch) def delete_lswitch(self, id): if id not in self._lswitches: raise Exception(_("Lswitch %s does not exist") % id) del self._lswitches[id] if self._fake_nsx_api: # TODO(fank): fix the hack del self._fake_nsx_api._fake_lswitch_dict[id] header = { 'status': 200 } response = '' return (header, response) def sync_firewall(self): header = {'status': 204} response = "" return self.return_helper(header, response) def update_firewall(self, edge_id, fw_req): self.fake_firewall_dict[edge_id] = fw_req rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] index = 10 for rule in rules: rule['ruleId'] = index index += 10 header = {'status': 204} response = "" return self.return_helper(header, response) def delete_firewall(self, edge_id): header = {'status': 404} if edge_id in self.fake_firewall_dict: header = {'status': 204} del self.fake_firewall_dict[edge_id] response = "" return self.return_helper(header, response) def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req): if edge_id not in self.fake_firewall_dict: raise Exception(_("Edge %s does not exist") % edge_id) header = {'status': 404} rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] for rule in rules: if rule['ruleId'] == int(vcns_rule_id): header['status'] = 204 rule.update(fwr_req) break response = "" return self.return_helper(header, response) def delete_firewall_rule(self, edge_id, vcns_rule_id): if edge_id not in self.fake_firewall_dict: raise Exception(_("Edge %s does not exist") % edge_id) header = {'status': 404} rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] for index in range(len(rules)): if rules[index]['ruleId'] == int(vcns_rule_id): header['status'] = 204 del rules[index] break response = "" return self.return_helper(header, response) def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req): if edge_id not in self.fake_firewall_dict: raise Exception(_("Edge %s does not exist") % edge_id) header = {'status': 404} rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] pre = 0 for index in range(len(rules)): if rules[index]['ruleId'] == int(ref_vcns_rule_id): rules.insert(index, fwr_req) rules[index]['ruleId'] = (int(ref_vcns_rule_id) + pre) / 2 header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id/firewall" "/config/rules/%s" % rules[index]['ruleId']} break pre = int(rules[index]['ruleId']) response = "" return self.return_helper(header, response) def add_firewall_rule(self, edge_id, fwr_req): if edge_id not in self.fake_firewall_dict: self.fake_firewall_dict[edge_id] = self.temp_firewall rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] rules.append(fwr_req) index = len(rules) rules[index - 1]['ruleId'] = index * 10 header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id/firewall" "/config/rules/%s" % rules[index - 1]['ruleId']} response = "" return self.return_helper(header, response) def get_firewall(self, edge_id): if edge_id not in self.fake_firewall_dict: self.fake_firewall_dict[edge_id] = self.temp_firewall header = {'status': 204} response = self.fake_firewall_dict[edge_id] return self.return_helper(header, response) def get_firewall_rule(self, edge_id, vcns_rule_id): if edge_id not in self.fake_firewall_dict: raise Exception(_("Edge %s does not exist") % edge_id) header = {'status': 404} response = "" rules = self.fake_firewall_dict[edge_id][ 'firewallRules']['firewallRules'] for rule in rules: if rule['ruleId'] == int(vcns_rule_id): header['status'] = 204 response = rule break return self.return_helper(header, response) def is_name_unique(self, objs_dict, name): return name not in [obj_dict['name'] for obj_dict in objs_dict.values()] def create_vip(self, edge_id, vip_new): header = {'status': 403} response = "" if not self._fake_virtualservers_dict.get(edge_id): self._fake_virtualservers_dict[edge_id] = {} if not self.is_name_unique(self._fake_virtualservers_dict[edge_id], vip_new['name']): return self.return_helper(header, response) vip_vseid = uuidutils.generate_uuid() self._fake_virtualservers_dict[edge_id][vip_vseid] = vip_new header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id" "/loadbalancer/config/%s" % vip_vseid} return self.return_helper(header, response) def get_vip(self, edge_id, vip_vseid): header = {'status': 404} response = "" if not self._fake_virtualservers_dict.get(edge_id) or ( not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): return self.return_helper(header, response) header = {'status': 204} response = self._fake_virtualservers_dict[edge_id][vip_vseid] return self.return_helper(header, response) def update_vip(self, edge_id, vip_vseid, vip_new): header = {'status': 404} response = "" if not self._fake_virtualservers_dict.get(edge_id) or ( not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): return self.return_helper(header, response) header = {'status': 204} self._fake_virtualservers_dict[edge_id][vip_vseid].update( vip_new) return self.return_helper(header, response) def delete_vip(self, edge_id, vip_vseid): header = {'status': 404} response = "" if not self._fake_virtualservers_dict.get(edge_id) or ( not self._fake_virtualservers_dict[edge_id].get(vip_vseid)): return self.return_helper(header, response) header = {'status': 204} del self._fake_virtualservers_dict[edge_id][vip_vseid] return self.return_helper(header, response) def create_pool(self, edge_id, pool_new): header = {'status': 403} response = "" if not self._fake_pools_dict.get(edge_id): self._fake_pools_dict[edge_id] = {} if not self.is_name_unique(self._fake_pools_dict[edge_id], pool_new['name']): return self.return_helper(header, response) pool_vseid = uuidutils.generate_uuid() self._fake_pools_dict[edge_id][pool_vseid] = pool_new header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id" "/loadbalancer/config/%s" % pool_vseid} return self.return_helper(header, response) def get_pool(self, edge_id, pool_vseid): header = {'status': 404} response = "" if not self._fake_pools_dict.get(edge_id) or ( not self._fake_pools_dict[edge_id].get(pool_vseid)): return self.return_helper(header, response) header = {'status': 204} response = self._fake_pools_dict[edge_id][pool_vseid] return self.return_helper(header, response) def update_pool(self, edge_id, pool_vseid, pool_new): header = {'status': 404} response = "" if not self._fake_pools_dict.get(edge_id) or ( not self._fake_pools_dict[edge_id].get(pool_vseid)): return self.return_helper(header, response) header = {'status': 204} self._fake_pools_dict[edge_id][pool_vseid].update( pool_new) return self.return_helper(header, response) def delete_pool(self, edge_id, pool_vseid): header = {'status': 404} response = "" if not self._fake_pools_dict.get(edge_id) or ( not self._fake_pools_dict[edge_id].get(pool_vseid)): return self.return_helper(header, response) header = {'status': 204} del self._fake_pools_dict[edge_id][pool_vseid] return self.return_helper(header, response) def create_health_monitor(self, edge_id, monitor_new): if not self._fake_monitors_dict.get(edge_id): self._fake_monitors_dict[edge_id] = {} monitor_vseid = uuidutils.generate_uuid() self._fake_monitors_dict[edge_id][monitor_vseid] = monitor_new header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id" "/loadbalancer/config/%s" % monitor_vseid} response = "" return self.return_helper(header, response) def get_health_monitor(self, edge_id, monitor_vseid): header = {'status': 404} response = "" if not self._fake_monitors_dict.get(edge_id) or ( not self._fake_monitors_dict[edge_id].get(monitor_vseid)): return self.return_helper(header, response) header = {'status': 204} response = self._fake_monitors_dict[edge_id][monitor_vseid] return self.return_helper(header, response) def update_health_monitor(self, edge_id, monitor_vseid, monitor_new): header = {'status': 404} response = "" if not self._fake_monitors_dict.get(edge_id) or ( not self._fake_monitors_dict[edge_id].get(monitor_vseid)): return self.return_helper(header, response) header = {'status': 204} self._fake_monitors_dict[edge_id][monitor_vseid].update( monitor_new) return self.return_helper(header, response) def delete_health_monitor(self, edge_id, monitor_vseid): header = {'status': 404} response = "" if not self._fake_monitors_dict.get(edge_id) or ( not self._fake_monitors_dict[edge_id].get(monitor_vseid)): return self.return_helper(header, response) header = {'status': 204} del self._fake_monitors_dict[edge_id][monitor_vseid] return self.return_helper(header, response) def create_app_profile(self, edge_id, app_profile): if not self._fake_app_profiles_dict.get(edge_id): self._fake_app_profiles_dict[edge_id] = {} app_profileid = uuidutils.generate_uuid() self._fake_app_profiles_dict[edge_id][app_profileid] = app_profile header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id" "/loadbalancer/config/%s" % app_profileid} response = "" return self.return_helper(header, response) def update_app_profile(self, edge_id, app_profileid, app_profile): header = {'status': 404} response = "" if not self._fake_app_profiles_dict.get(edge_id) or ( not self._fake_app_profiles_dict[edge_id].get(app_profileid)): return self.return_helper(header, response) header = {'status': 204} self._fake_app_profiles_dict[edge_id][app_profileid].update( app_profile) return self.return_helper(header, response) def delete_app_profile(self, edge_id, app_profileid): header = {'status': 404} response = "" if not self._fake_app_profiles_dict.get(edge_id) or ( not self._fake_app_profiles_dict[edge_id].get(app_profileid)): return self.return_helper(header, response) header = {'status': 204} del self._fake_app_profiles_dict[edge_id][app_profileid] return self.return_helper(header, response) def create_app_rule(self, edge_id, app_rule): app_ruleid = uuidutils.generate_uuid() header = { 'status': 204, 'location': "https://host/api/4.0/edges/edge_id" "/loadbalancer/config/%s" % app_ruleid} response = "" return self.return_helper(header, response) def update_app_rule(self, edge_id, app_ruleid, app_rule): pass def delete_app_rule(self, edge_id, app_ruleid): pass def get_loadbalancer_config(self, edge_id): header = {'status': 204} response = {'config': False} if self._fake_loadbalancer_config[edge_id]: response['config'] = self._fake_loadbalancer_config[edge_id] return self.return_helper(header, response) def get_loadbalancer_statistics(self, edge_id): return [{}, {}] def update_ipsec_config(self, edge_id, ipsec_config): self.fake_ipsecvpn_dict[edge_id] = ipsec_config header = {'status': 204} response = "" return self.return_helper(header, response) def delete_ipsec_config(self, edge_id): header = {'status': 404} if edge_id in self.fake_ipsecvpn_dict: header = {'status': 204} del self.fake_ipsecvpn_dict[edge_id] response = "" return self.return_helper(header, response) def get_ipsec_config(self, edge_id): if edge_id not in self.fake_ipsecvpn_dict: self.fake_ipsecvpn_dict[edge_id] = self.temp_ipsecvpn header = {'status': 204} response = self.fake_ipsecvpn_dict[edge_id] return self.return_helper(header, response) def enable_service_loadbalancer(self, edge_id, config): header = {'status': 204} response = "" self._fake_loadbalancer_config[edge_id] = True return self.return_helper(header, response) def create_virtual_wire(self, vdn_scope_id, request): self._virtual_wire_id += 1 header = {'status': 200} virtual_wire = 'virtualwire-%s' % self._virtual_wire_id data = {'name': request['virtualWireCreateSpec']['name'], 'objectId': virtual_wire} self._fake_virtual_wires.update({virtual_wire: data}) return (header, virtual_wire) def delete_virtual_wire(self, virtualwire_id): del self._fake_virtual_wires[virtualwire_id] header = { 'status': 200 } response = '' return (header, response) def create_port_group(self, dvs_id, request): self._portgroup_id += 1 header = {'status': 200} portgroup = 'dvportgroup-%s' % self._portgroup_id data = {'name': request['networkSpec']['networkName'], 'objectId': portgroup} self._fake_portgroups.update({portgroup: data}) return (header, portgroup) def delete_port_group(self, dvs_id, portgroup_id): del self._fake_portgroups[portgroup_id] header = { 'status': 200 } response = '' return (header, response) def return_helper(self, header, response): status = int(header['status']) if 200 <= status <= 300: return (header, response) if status in self.errors: cls = self.errors[status] else: cls = exceptions.VcnsApiException raise cls( status=status, header=header, uri='fake_url', response=response) def _get_bad_req_response(self, details, error_code, module_name): bad_req_response_format = """
%(details)s
%(error_code)s %(module_name)s
""" return bad_req_response_format % { 'details': details, 'error_code': error_code, 'module_name': module_name, } def _get_section_location(self, type, section_id): return SECTION_LOCATION_HEADER % (type, section_id) def _get_section_id_from_uri(self, section_uri): return section_uri.split('/')[-1] def _section_not_found(self, section_id): msg = "Invalid section id found : %s" % section_id response = self._get_bad_req_response(msg, 100089, 'vShield App') headers = {'status': 400} return (headers, response) def _unknown_error(self): msg = "Unknown Error Occurred.Please look into tech support logs." response = self._get_bad_req_response(msg, 100046, 'vShield App') headers = {'status': 400} return (headers, response) def create_security_group(self, request): sg = request['securitygroup'] if sg['name'] in self._securitygroups['names']: status = 400 msg = ("Another object with same name : %s already exists in " "the current scope : globalroot-0." % sg['name']) response = self._get_bad_req_response(msg, 210, 'core-services') else: sg_id = str(self._securitygroups['ids']) self._securitygroups['ids'] += 1 sg['members'] = set() self._securitygroups[sg_id] = sg self._securitygroups['names'].add(sg['name']) status, response = 201, sg_id return ({'status': status}, response) def update_security_group(self, sg_id, sg_name, description): sg = self._securitygroups[sg_id] self._securitygroups['names'].remove(sg['name']) sg['name'] = sg_name sg['description'] = description self._securitygroups['names'].add(sg_name) return {'status': 200}, '' def delete_security_group(self, securitygroup_id): try: del self._securitygroups[securitygroup_id] except KeyError: status = 404 msg = ("The requested object : %s could " "not be found. Object identifiers are case sensitive." % securitygroup_id) response = self._get_bad_req_response(msg, 210, 'core-services') else: status, response = 200, '' return ({'status': status}, response) def get_security_group_id(self, sg_name): for k, v in self._securitygroups.items(): if k not in ('ids', 'names') and v['name'] == sg_name: return k def get_security_group(self, sg_id): sg = self._securitygroups.get(sg_id) if sg: return ('%s"%s"' '' % (sg_id, sg.get("name"))) def list_security_groups(self): response = "" header = {'status': 200} for k in self._securitygroups.keys(): if k not in ('ids', 'names'): response += self.get_security_group(k) response = "%s" % response return header, response def create_redirect_section(self, request): return self.create_section('layer3redirect', request) def create_section(self, type, request, insert_top=False, insert_before=None): section = ET.fromstring(request) section_name = section.attrib.get('name') if section_name in self._sections['names']: msg = "Section with name %s already exists." % section_name response = self._get_bad_req_response(msg, 100092, 'vShield App') headers = {'status': 400} else: section_id = str(self._sections['section_ids']) section.attrib['id'] = 'section-%s' % section_id _section = self._sections[section_id] = {'name': section_name, 'etag': 'Etag-0', 'rules': {}} self._sections['names'].add(section_name) for rule in section.findall('rule'): rule_id = str(self._sections['rule_ids']) rule.attrib['id'] = rule_id _section['rules'][rule_id] = ET.tostring(rule) self._sections['rule_ids'] += 1 response = ET.tostring(section) headers = { 'status': 201, 'location': self._get_section_location(type, section_id), 'etag': _section['etag'] } self._sections['section_ids'] += 1 return (headers, response) def update_section(self, section_uri, request, h): section = ET.fromstring(request) section_id = section.attrib.get('id') section_name = section.attrib.get('name') if section_id not in self._sections: return self._section_not_found(section_id) _section = self._sections[section_id] if (_section['name'] != section_name and section_name in self._sections['names']): # There's a section with this name already headers, response = self._unknown_error() else: # Different Etag every successful update _section['etag'] = ('Etag-1' if _section['etag'] == 'Etag-0' else 'Etag-0') self._sections['names'].remove(_section['name']) _section['name'] = section_name self._sections['names'].add(section_name) for rule in section.findall('rule'): if not rule.attrib.get('id'): rule.attrib['id'] = str(self._sections['rule_ids']) self._sections['rule_ids'] += 1 rule_id = rule.attrib.get('id') _section['rules'][rule_id] = ET.tostring(rule) _, response = self._get_section(section_id) headers = { 'status': 200, 'location': self._get_section_location(type, section_id), 'etag': _section['etag'] } return (headers, response) def delete_section(self, section_uri): section_id = self._get_section_id_from_uri(section_uri) if section_id not in self._sections: headers, response = self._unknown_error() else: section_name = self._sections[section_id]['name'] del self._sections[section_id] self._sections['names'].remove(section_name) response = '' headers = {'status': 204} return (headers, response) def get_section(self, section_uri): section_id = self._get_section_id_from_uri(section_uri) if section_id not in self._sections: headers, response = self._section_not_found(section_id) else: return self._get_section(section_id) def get_section_rules(self, section_uri): return [] def _get_section(self, section_id): section_rules = ( b''.join(self._sections[section_id]['rules'].values())) response = ('
%s
' % (section_id, self._sections[section_id]['name'], section_rules)) headers = {'status': 200, 'etag': self._sections[section_id]['etag']} return (headers, response) def get_section_id(self, section_name): for k, v in self._sections.items(): if (k not in ('section_ids', 'rule_ids', 'names') and v['name'] == section_name): return k def update_section_by_id(self, id, type, request): pass def get_default_l3_id(self): return 1234 def get_dfw_config(self): response = "" for sec_id in self._sections.keys(): if sec_id.isdigit(): h, r = self._get_section(str(sec_id)) response += r response = "%s" % response headers = {'status': 200} return (headers, response) def remove_rule_from_section(self, section_uri, rule_id): section_id = self._get_section_id_from_uri(section_uri) if section_id not in self._sections: headers, response = self._section_not_found(section_id) else: section = self._sections[section_id] if rule_id in section['rules']: del section['rules'][rule_id] response = '' headers = {'status': 204} else: headers, response = self._unknown_error() return (headers, response) def add_member_to_security_group(self, security_group_id, member_id): if security_group_id not in self._securitygroups: msg = ("The requested object : %s could not be found." "Object identifiers are case " "sensitive.") % security_group_id response = self._get_bad_req_response(msg, 202, 'core-services') headers = {'status': 404} else: self._securitygroups[security_group_id]['members'].add(member_id) response = '' headers = {'status': 200} return (headers, response) def remove_member_from_security_group(self, security_group_id, member_id): if security_group_id not in self._securitygroups: msg = ("The requested object : %s could not be found." "Object identifiers are " "case sensitive.") % security_group_id response = self._get_bad_req_response(msg, 202, 'core-services') headers = {'status': 404} else: self._securitygroups[security_group_id]['members'].remove( member_id) response = '' headers = {'status': 200} return (headers, response) def create_spoofguard_policy(self, enforcement_points, name, enable): policy = {'name': name, 'enforcementPoints': [{'id': enforcement_points[0]}], 'operationMode': 'MANUAL' if enable else 'DISABLE'} policy_id = len(self._spoofguard_policies) self._spoofguard_policies.append(policy) return None, 'spoofguardpolicy-%s' % policy_id def _get_index(self, policy_id): return int(policy_id.split('-')[-1]) def update_spoofguard_policy(self, policy_id, enforcement_points, name, enable): policy = {'name': name, 'enforcementPoints': [{'id': enforcement_points[0]}], 'operationMode': 'MANUAL' if enable else 'DISABLE'} self._spoofguard_policies[self._get_index(policy_id)] = policy return None, '' def delete_spoofguard_policy(self, policy_id): self._spoofguard_policies[self._get_index(policy_id)] = {} def get_spoofguard_policy(self, policy_id): try: return None, self._spoofguard_policies[self._get_index(policy_id)] except IndexError: raise exceptions.VcnsGeneralException( _("Spoofguard policy not found")) def get_spoofguard_policy_data(self, policy_id, list_type='INACTIVE'): return None, {'spoofguardList': []} def get_spoofguard_policies(self): return None, {'policies': self._spoofguard_policies} def approve_assigned_addresses(self, policy_id, vnic_id, mac_addr, addresses): pass def publish_assigned_addresses(self, policy_id, vnic_id): pass def configure_reservations(self): pass def inactivate_vnic_assigned_addresses(self, policy_id, vnic_id): pass def add_vm_to_exclude_list(self, vm_id): pass def delete_vm_from_exclude_list(self, vm_id): pass def get_scoping_objects(self): response = ('' 'Network' 'aaa' 'bbb' '') return response def reset_all(self): self._jobs.clear() self._edges.clear() self._lswitches.clear() self.fake_firewall_dict = {} self._fake_virtualservers_dict = {} self._fake_pools_dict = {} self._fake_monitors_dict = {} self._fake_app_profiles_dict = {} self._fake_loadbalancer_config = {} self._fake_virtual_wires = {} self._virtual_wire_id = 0 self._fake_portgroups = {} self._portgroup_id = 0 self._securitygroups = {'ids': 0, 'names': set()} self._sections = {'section_ids': 0, 'rule_ids': 0, 'names': set()} self._dhcp_bindings = {} self._ipam_pools = {} def validate_datacenter_moid(self, object_id, during_init=False): return True def validate_network(self, object_id, during_init=False): return True def validate_network_name(self, object_id, name, during_init=False): return True def validate_vdn_scope(self, object_id): return True def get_dvs_list(self): return [] def validate_dvs(self, object_id, dvs_list=None): return True def edges_lock_operation(self): pass def validate_inventory(self, moref): return True def get_version(self): return '6.4.6' def get_tuning_configuration(self): return { 'lockUpdatesOnEdge': True, 'edgeVMHealthCheckIntervalInMin': 0, 'aggregatePublishing': False, 'publishingTimeoutInMs': 1200000, 'healthCheckCommandTimeoutInMs': 120000, 'maxParallelVixCallsForHealthCheck': 25} def configure_aggregate_publishing(self): pass def enable_ha(self, edge_id, request_config): header = { 'status': 201 } response = '' return (header, response) def get_edge_syslog(self, edge_id): if ('syslog' not in self._edges.get(edge_id)): header = { 'status': 400 } response = {} else: header = { 'status': 200 } response = self._edges.get(edge_id)['syslog'] return (header, response) def update_edge_syslog(self, edge_id, config): if edge_id not in self._edges: raise exceptions.VcnsGeneralException( _("edge not found")) self._edges[edge_id]['syslog'] = config header = { 'status': 204 } response = '' return (header, response) def delete_edge_syslog(self, edge_id): header = { 'status': 204 } response = '' return (header, response) def update_edge_config_with_modifier(self, edge_id, module, modifier): header = { 'status': 204 } response = '' return (header, response) def change_edge_appliance_size(self, edge_id, size): header = { 'status': 204 } response = {} return (header, response) def change_edge_appliance(self, edge_id, request): header = { 'status': 204 } response = {} return (header, response) def get_edge_appliances(self, edge_id): header = { 'status': 204 } response = {} return (header, response) def get_routes(self, edge_id): header = { 'status': 204 } response = {'staticRoutes': {'staticRoutes': []}} return (header, response) def get_service_insertion_profile(self, profile_id): headers = {'status': 200} response = """ %s ServiceProfile ServiceProfile Service_Vendor securitygroup-30 """ response_format = response % profile_id return (headers, response_format) def update_service_insertion_profile_binding(self, profile_id, request): response = '' headers = {'status': 200} return (headers, response) def create_ipam_ip_pool(self, request): pool_id = uuidutils.generate_uuid() # format the request before saving it: fixed_request = request['ipamAddressPool'] ranges = fixed_request['ipRanges'] for i in range(len(ranges)): ranges[i] = ranges[i]['ipRangeDto'] self._ipam_pools[pool_id] = {'request': fixed_request, 'allocated': []} header = {'status': 200} response = pool_id return (header, response) def delete_ipam_ip_pool(self, pool_id): response = '' if pool_id in self._ipam_pools: pool = self._ipam_pools.pop(pool_id) if len(pool['allocated']) > 0: header = {'status': 400} msg = ("Unable to delete IP pool %s. IP addresses from this " "pool are being used." % pool_id) response = self._get_bad_req_response( msg, 120053, 'core-services') else: header = {'status': 200} return (header, response) else: header = {'status': 400} msg = ("Unable to delete IP pool %s. Pool does not exist." % pool_id) response = self._get_bad_req_response( msg, 120054, 'core-services') return self.return_helper(header, response) def get_ipam_ip_pool(self, pool_id): if pool_id in self._ipam_pools: header = {'status': 200} response = self._ipam_pools[pool_id]['request'] else: header = {'status': 400} msg = ("Unable to retrieve IP pool %s. Pool does not exist." % pool_id) response = self._get_bad_req_response( msg, 120054, 'core-services') return self.return_helper(header, response) def _allocate_ipam_add_ip_and_return(self, pool, ip_addr): # build the response response_text = ( "" "%(id)s" "%(ip)s" "%(gateway)s" "%(prefix)s" "subnet-44") response_args = {'id': len(pool['allocated']), 'gateway': pool['request']['gateway'], 'prefix': pool['request']['prefixLength']} response_args['ip'] = ip_addr response = response_text % response_args # add the ip to the list of allocated ips pool['allocated'].append(ip_addr) header = {'status': 200} return (header, response) def allocate_ipam_ip_from_pool(self, pool_id, ip_addr=None): if pool_id in self._ipam_pools: pool = self._ipam_pools[pool_id] if ip_addr: # verify that this ip was not yet allocated if ip_addr in pool['allocated']: header = {'status': 400} msg = ("Unable to allocate IP from pool %(pool)s. " "IP %(ip)s already in use." % {'pool': pool_id, 'ip': ip_addr}) response = self._get_bad_req_response( msg, constants.NSX_ERROR_IPAM_ALLOCATE_IP_USED, 'core-services') else: return self._allocate_ipam_add_ip_and_return( pool, ip_addr) else: # get an unused ip from the pool for ip_range in pool['request']['ipRanges']: r = netaddr.IPRange(ip_range['startAddress'], ip_range['endAddress']) for ip_addr in r: if str(ip_addr) not in pool['allocated']: return self._allocate_ipam_add_ip_and_return( pool, str(ip_addr)) # if we got here - no ip was found header = {'status': 400} msg = ("Unable to allocate IP from pool %(pool)s. " "All IPs have been used." % {'pool': pool_id}) response = self._get_bad_req_response( msg, constants.NSX_ERROR_IPAM_ALLOCATE_ALL_USED, 'core-services') else: header = {'status': 400} msg = ("Unable to allocate IP from pool %s. Pool does not " "exist." % pool_id) response = self._get_bad_req_response( msg, 120054, 'core-services') return self.return_helper(header, response) def release_ipam_ip_to_pool(self, pool_id, ip_addr): if pool_id in self._ipam_pools: pool = self._ipam_pools[pool_id] if ip_addr not in pool['allocated']: header = {'status': 400} msg = ("IP %(ip)s was not allocated from pool %(pool)s." % {'ip': ip_addr, 'pool': pool_id}) response = self._get_bad_req_response( msg, 120056, 'core-services') else: pool['allocated'].remove(ip_addr) response = '' header = {'status': 200} else: header = {'status': 400} msg = ("Unable to release IP to pool %s. Pool does not exist." % pool_id) response = self._get_bad_req_response( msg, 120054, 'core-services') return self.return_helper(header, response) def get_security_policy(self, policy_id, return_xml=True): name = 'pol1' description = 'dummy' if return_xml: response_text = ( "" "%(id)s" "%(name)s" "%(desc)s" "") % {'id': policy_id, 'name': name, 'desc': description} return response_text else: return {'objectId': policy_id, 'name': name, 'description': description} def update_security_policy(self, policy_id, request): pass def get_security_policies(self): policies = [] for id in ['policy-1', 'policy-2', 'policy-3']: policies.append(self.get_security_policy(id, return_xml=False)) return {'policies': policies} def list_applications(self): applications = [{'name': 'ICMP Echo', 'objectID': 'application-333'}, {'name': 'IPv6-ICMP Echo', 'objectID': 'application-1001'}] return applications def update_dynamic_routing_service(self, edge_id, request_config): header = {'status': 201} response = { 'routerId': '172.24.4.12', 'ipPrefixes': { 'ipPrefixes': [ {'ipAddress': '10.0.0.0/24', 'name': 'prefix-name'} ] } } return self.return_helper(header, response) def get_edge_routing_config(self, edge_id): header = {'status': 200} response = { 'featureType': '', 'ospf': {}, 'routingGlobalConfig': { 'routerId': '172.24.4.12', 'ipPrefixes': { 'ipPrefixes': [ {'ipAddress': '10.0.0.0/24', 'name': 'prefix-name'} ] }, 'logging': { 'logLevel': 'info', 'enable': False }, 'ecmp': False } } return self.return_helper(header, response) def update_edge_routing_config(self, edge_id, request): header = {'status': 200} return self.return_helper(header, {}) def update_bgp_dynamic_routing(self, edge_id, bgp_request): header = {"status": 201} response = { "localAS": 65000, "enabled": True, "bgpNeighbours": { "bgpNeighbours": [ { "bgpFilters": { "bgpFilters": [ { "action": "deny", "direction": "in" } ] }, "password": None, "ipAddress": "172.24.4.253", "remoteAS": 65000 } ] }, "redistribution": { "rules": { "rules": [ { "action": "deny", "from": { "bgp": False, "connected": False, "static": False, "ospf": False }, "id": 0 }, { "action": "permit", "from": { "bgp": False, "connected": True, "static": True, "ospf": False }, "id": 1, "prefixName": "eee4eb79-359e-4416" } ] }, "enabled": True } } return self.return_helper(header, response) def get_bgp_routing_config(self, edge_id): header = {'status': 200} response = { "localAS": 65000, "enabled": True, "redistribution": { "rules": { "rules": [ { "action": "deny", "from": { "bgp": False, "connected": False, "static": False, "ospf": False }, "id": 0 }, { "action": "permit", "from": { "bgp": False, "connected": True, "static": True, "ospf": False }, "id": 1, "prefixName": "eee4eb79-359e-4416" } ] }, "enabled": True } } return self.return_helper(header, response) def delete_bgp_routing_config(self, edge_id): header = {'status': 200} response = '' return header, response def get_application_id(self, name): return 'application-123' def get_tz_connectivity_info(self, vdn_scope_id): return {'clustersInfo': [{ 'clusterId': 'fake_cluster_moid', 'standardNetworks': [{'id': 'fake_net'}], 'distributedVirtualPortGroups': [{'id': 'net-1'}], 'distributedVirtualSwitches': [{'id': 'fake_dvs_id'}], }]} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/vshield/test_edge_utils.py0000644000175000017500000011462300000000000030050 0ustar00coreycorey00000000000000# Copyright 2014 VMware, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import mock from neutron_lib import constants from neutron_lib import context from oslo_config import cfg from oslo_utils import uuidutils from six import moves from neutron.tests.unit import testlib_api from neutron_lib import exceptions as n_exc from vmware_nsx.common import config as conf from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import nsxv_constants from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.tests import unit as vmware _uuid = uuidutils.generate_uuid #Four types of backup edge with different status EDGE_AVAIL = 'available-' EDGE_CREATING = 'creating-' EDGE_ERROR1 = 'error1-' EDGE_ERROR2 = 'error2-' EDGE_DELETING = 'deleting-' DEFAULT_AZ = 'default' class EdgeUtilsTestCaseMixin(testlib_api.SqlTestCase): def setUp(self): super(EdgeUtilsTestCaseMixin, self).setUp() nsxv_manager_p = mock.patch(vmware.VCNS_DRIVER_NAME, autospec=True) self.nsxv_manager = nsxv_manager_p.start() task = mock.Mock() nsxv_manager_p.return_value = task self.nsxv_manager.callbacks = mock.Mock() self.nsxv_manager.vcns = mock.Mock() get_ver = mock.patch.object(self.nsxv_manager.vcns, 'get_version').start() get_ver.return_value = '6.1.4' self.ctx = context.get_admin_context() self.addCleanup(nsxv_manager_p.stop) self.az = (nsx_az.NsxVAvailabilityZones(). get_default_availability_zone()) def _create_router(self, name='router1'): return {'name': name, 'id': _uuid()} def _create_network(self, name='network'): return {'name': name, 'id': _uuid()} def _create_subnet(self, name='subnet'): return {'name': name, 'id': _uuid()} def _populate_vcns_router_binding(self, bindings): for binding in bindings: nsxv_db.init_edge_vnic_binding(self.ctx.session, binding['edge_id']) nsxv_db.add_nsxv_router_binding( self.ctx.session, binding['router_id'], binding['edge_id'], None, binding['status'], appliance_size=binding['appliance_size'], edge_type=binding['edge_type'], availability_zone=binding['availability_zone']) class DummyPlugin(object): def get_network_az_by_net_id(self, context, network_id): return (nsx_az.NsxVAvailabilityZones(). get_default_availability_zone()) class EdgeDHCPManagerTestCase(EdgeUtilsTestCaseMixin): def setUp(self): super(EdgeDHCPManagerTestCase, self).setUp() self.edge_manager = edge_utils.EdgeManager(self.nsxv_manager, None) self.check = mock.patch.object(self.edge_manager, 'check_edge_active_at_backend').start() self.check.return_value = True def test_create_dhcp_edge_service(self): fake_edge_pool = [{'status': constants.ACTIVE, 'edge_id': 'edge-1', 'router_id': 'backup-11111111-1111', 'appliance_size': 'compact', 'edge_type': 'service', 'availability_zone': DEFAULT_AZ}, {'status': constants.PENDING_DELETE, 'edge_id': 'edge-2', 'router_id': 'dhcp-22222222-2222', 'appliance_size': 'compact', 'edge_type': 'service', 'availability_zone': DEFAULT_AZ}, {'status': constants.PENDING_DELETE, 'edge_id': 'edge-3', 'router_id': 'backup-33333333-3333', 'appliance_size': 'compact', 'edge_type': 'service', 'availability_zone': DEFAULT_AZ}] self._populate_vcns_router_binding(fake_edge_pool) fake_network = self._create_network() fake_subnet = self._create_subnet(fake_network['id']) self.edge_manager.plugin = DummyPlugin() with mock.patch.object(self.edge_manager, '_get_used_edges', return_value=([], [])): self.edge_manager.create_dhcp_edge_service(self.ctx, fake_network['id'], fake_subnet) self.nsxv_manager.rename_edge.assert_called_once_with('edge-1', mock.ANY) def test_get_random_available_edge(self): available_edge_ids = ['edge-1', 'edge-2'] selected_edge_id = self.edge_manager._get_random_available_edge( available_edge_ids) self.assertIn(selected_edge_id, available_edge_ids) def test_get_random_available_edge_missing_edges_returns_none(self): available_edge_ids = ['edge-1', 'edge-2'] # Always return inactive(False) while checking whether the edge # exists on the backend. with mock.patch.object(self.edge_manager, 'check_edge_active_at_backend', return_value=False): selected_edge_id = self.edge_manager._get_random_available_edge( available_edge_ids) # If no active edges are found on the backend, return None so that # a new DHCP edge is created. self.assertIsNone(selected_edge_id) class EdgeUtilsTestCase(EdgeUtilsTestCaseMixin): def setUp(self): super(EdgeUtilsTestCase, self).setUp() self.edge_manager = edge_utils.EdgeManager(self.nsxv_manager, None) # Args for vcns interface configuration self.internal_ip = '10.0.0.1' self.uplink_ip = '192.168.111.30' self.subnet_mask = '255.255.255.0' self.pref_len = '24' self.edge_id = 'dummy' self.orig_vnics = ({}, {'vnics': [ {'addressGroups': {'addressGroups': [ {'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': self.uplink_ip}]}, 'type': 'uplink', 'index': 1}, {'addressGroups': {'addressGroups': [ {'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': self.internal_ip}]}, 'type': 'internal', 'index': 2}]} ) # Args for vcns vdr interface configuration self.vdr_ip = '10.0.0.1' self.vnic = 1 self.orig_vdr = ({}, {'index': 2, 'addressGroups': {'addressGroups': [{'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': self.vdr_ip}]}, 'type': 'internal'}) def test_create_lrouter(self): lrouter = self._create_router() self.nsxv_manager.deploy_edge.reset_mock() edge_utils.create_lrouter(self.nsxv_manager, self.ctx, lrouter, lswitch=None, dist=False, availability_zone=self.az) self.nsxv_manager.deploy_edge.assert_called_once_with(self.ctx, lrouter['id'], (lrouter['name'] + '-' + lrouter['id']), internal_network=None, dist=False, availability_zone=self.az, appliance_size=vcns_const.SERVICE_SIZE_MAPPING['router']) def _test_update_intereface_primary_addr(self, old_ip, new_ip, isUplink): fixed_vnic = {'addressGroups': {'addressGroups': [ {'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': new_ip}] if new_ip else []}, 'type': 'uplink' if isUplink else 'internal', 'index': 1 if isUplink else 2} with mock.patch.object(self.nsxv_manager.vcns, 'get_interfaces', return_value=self.orig_vnics): self.edge_manager.update_interface_addr( self.ctx, self.edge_id, old_ip, new_ip, self.subnet_mask, is_uplink=isUplink) self.nsxv_manager.vcns.update_interface.assert_called_once_with( self.edge_id, fixed_vnic) def test_update_interface_addr_intrernal(self): self._test_update_intereface_primary_addr( self.internal_ip, '10.0.0.2', False) def test_remove_interface_primary_addr_intrernal(self): self._test_update_intereface_primary_addr( self.internal_ip, None, False) def test_update_interface_addr_uplink(self): self._test_update_intereface_primary_addr( self.uplink_ip, '192.168.111.31', True) def test_remove_interface_primary_addr_uplink(self): self._test_update_intereface_primary_addr( self.uplink_ip, None, True) def _test_update_intereface_secondary_addr(self, old_ip, new_ip): addr_group = {'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': self.uplink_ip, 'secondaryAddresses': {'type': 'secondary_addresses', 'ipAddress': [new_ip]}} fixed_vnic = {'addressGroups': {'addressGroups': [addr_group]}, 'type': 'uplink', 'index': 1} with mock.patch.object(self.nsxv_manager.vcns, 'get_interfaces', return_value=self.orig_vnics): self.edge_manager.update_interface_addr( self.ctx, self.edge_id, old_ip, new_ip, self.subnet_mask, is_uplink=True) self.nsxv_manager.vcns.update_interface.assert_called_once_with( self.edge_id, fixed_vnic) def test_add_secondary_interface_addr(self): self._test_update_intereface_secondary_addr( None, '192.168.111.31') def test_update_interface_addr_fail(self): # Old ip is not configured on the interface, so we should fail old_ip = '192.168.111.32' new_ip = '192.168.111.31' with mock.patch.object(self.nsxv_manager.vcns, 'get_interfaces', return_value=self.orig_vnics): self.assertRaises( nsx_exc.NsxPluginException, self.edge_manager.update_interface_addr, self.ctx, self.edge_id, old_ip, new_ip, self.subnet_mask, is_uplink=True) def _test_update_vdr_intereface_primary_addr(self, old_ip, new_ip): fixed_vnic = {'addressGroups': {'addressGroups': [ {'subnetMask': self.subnet_mask, 'subnetPrefixLength': self.pref_len, 'primaryAddress': new_ip}] if new_ip else []}, 'type': 'internal', 'index': 2} with mock.patch.object(self.nsxv_manager.vcns, 'get_vdr_internal_interface', return_value=self.orig_vdr): with mock.patch.object(self.nsxv_manager.vcns, 'update_vdr_internal_interface') as vcns_update: self.edge_manager.update_vdr_interface_addr( self.ctx, self.edge_id, self.vnic, old_ip, new_ip, self.subnet_mask) vcns_update.assert_called_once_with(self.edge_id, self.vnic, {'interface': fixed_vnic}) def test_update_vdr_interface_addr_intrernal(self): self._test_update_vdr_intereface_primary_addr( self.vdr_ip, '20.0.0.2') def test_remove_vdr_interface_primary_addr_intrernal(self): self._test_update_vdr_intereface_primary_addr( self.vdr_ip, None) def test_update_vdr_interface_addr_fail(self): # Old ip is not configured on the vdr interface, so we should fail old_ip = '192.168.111.32' new_ip = '192.168.111.31' with mock.patch.object(self.nsxv_manager.vcns, 'get_vdr_internal_interface', return_value=self.orig_vdr): self.assertRaises( nsx_exc.NsxPluginException, self.edge_manager.update_vdr_interface_addr, self.ctx, self.edge_id, self.vnic, old_ip, new_ip, self.subnet_mask) class EdgeManagerTestCase(EdgeUtilsTestCaseMixin): def setUp(self): super(EdgeManagerTestCase, self).setUp() cfg.CONF.set_override('backup_edge_pool', [], 'nsxv') self.edge_manager = edge_utils.EdgeManager(self.nsxv_manager, None) self.check = mock.patch.object(self.edge_manager, 'check_edge_active_at_backend').start() self.check.side_effect = self.check_edge_active_at_backend self.default_edge_pool_dicts = {'default': { nsxv_constants.SERVICE_EDGE: { nsxv_constants.LARGE: {'minimum_pooled_edges': 1, 'maximum_pooled_edges': 3}, nsxv_constants.COMPACT: {'minimum_pooled_edges': 1, 'maximum_pooled_edges': 3}}, nsxv_constants.VDR_EDGE: {}}} self.vdr_edge_pool_dicts = {'default': { nsxv_constants.SERVICE_EDGE: {}, nsxv_constants.VDR_EDGE: { nsxv_constants.LARGE: {'minimum_pooled_edges': 1, 'maximum_pooled_edges': 3}}}} def check_edge_active_at_backend(self, edge_id): # workaround to let edge_id None pass since we wrapped router binding # db update op. if edge_id is None: edge_id = "" return not (edge_id.startswith(EDGE_ERROR1) or edge_id.startswith(EDGE_ERROR2)) def test_backup_edge_pool_with_default(self): cfg.CONF.set_override('backup_edge_pool', ['service:large:1:3', 'service:compact:1:3'], 'nsxv') az = nsx_az.NsxVAvailabilityZone(None) edge_pool_dicts = edge_utils.parse_backup_edge_pool_opt_per_az(az) self.assertEqual(self.default_edge_pool_dicts['default'], edge_pool_dicts) def test_backup_edge_pool_with_empty_conf(self): cfg.CONF.set_override('backup_edge_pool', [], 'nsxv') az = nsx_az.NsxVAvailabilityZone(None) edge_pool_dicts = edge_utils.parse_backup_edge_pool_opt_per_az(az) expect_edge_pool_dicts = { nsxv_constants.SERVICE_EDGE: {}, nsxv_constants.VDR_EDGE: {}} self.assertEqual(expect_edge_pool_dicts, edge_pool_dicts) def test_backup_edge_pool_with_vdr_conf(self): cfg.CONF.set_override('backup_edge_pool', ['vdr:large:1:3'], 'nsxv') az = nsx_az.NsxVAvailabilityZone(None) edge_pool_dicts = edge_utils.parse_backup_edge_pool_opt_per_az(az) expect_edge_pool_dicts = self.vdr_edge_pool_dicts['default'] self.assertEqual(expect_edge_pool_dicts, edge_pool_dicts) def test_backup_edge_pool_with_duplicate_conf(self): cfg.CONF.set_override('backup_edge_pool', ['service:compact:1:3', 'service::3:4'], 'nsxv') az = nsx_az.NsxVAvailabilityZone(None) self.assertRaises(n_exc.Invalid, edge_utils.parse_backup_edge_pool_opt_per_az, az) def _create_router_bindings(self, num, status, id_prefix, size, edge_type, availability_zone): if not availability_zone: availability_zone = self.az return [{'status': status, 'edge_id': id_prefix + '-edge-' + str(i), 'router_id': (vcns_const.BACKUP_ROUTER_PREFIX + id_prefix + str(i)), 'appliance_size': size, 'edge_type': edge_type, 'availability_zone': availability_zone.name} for i in moves.range(num)] def _create_available_router_bindings( self, num, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): status = constants.ACTIVE id_prefix = EDGE_AVAIL + size + '-' + edge_type return self._create_router_bindings( num, status, id_prefix, size, edge_type, availability_zone) def _create_creating_router_bindings( self, num, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): status = constants.PENDING_CREATE id_prefix = EDGE_CREATING + size + '-' + edge_type return self._create_router_bindings( num, status, id_prefix, size, edge_type, availability_zone) def _create_error_router_bindings( self, num, status=constants.ERROR, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): id_prefix = EDGE_ERROR1 + size + '-' + edge_type return self._create_router_bindings( num, status, id_prefix, size, edge_type, availability_zone) def _create_error_router_bindings_at_backend( self, num, status=constants.ACTIVE, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): id_prefix = EDGE_ERROR2 + size + '-' + edge_type return self._create_router_bindings( num, status, id_prefix, size, edge_type, availability_zone) def _create_deleting_router_bindings( self, num, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): status = constants.PENDING_DELETE id_prefix = EDGE_DELETING + size + '-' + edge_type return self._create_router_bindings( num, status, id_prefix, size, edge_type, availability_zone) def _create_edge_pools(self, avail, creating, error, error_at_backend, deleting, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE): """Create a backup edge pool with different status of edges. Backup edges would be edges with avail, creating and error_at_backend, while available edges would only be edges with avail status. """ availability_zone = self.az return ( self._create_error_router_bindings( error, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_deleting_router_bindings( deleting, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_error_router_bindings_at_backend( error_at_backend, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_creating_router_bindings( creating, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_available_router_bindings( avail, size=size, edge_type=edge_type, availability_zone=availability_zone)) def _create_backup_router_bindings( self, avail, creating, error, error_at_backend, deleting, error_status=constants.PENDING_DELETE, error_at_backend_status=constants.PENDING_DELETE, size=nsxv_constants.LARGE, edge_type=nsxv_constants.SERVICE_EDGE, availability_zone=None): if not availability_zone: availability_zone = self.az return ( self._create_error_router_bindings( error, status=error_status, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_error_router_bindings_at_backend( error_at_backend, status=error_at_backend_status, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_creating_router_bindings( creating, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_available_router_bindings( avail, size=size, edge_type=edge_type, availability_zone=availability_zone) + self._create_deleting_router_bindings( deleting, size=size, edge_type=edge_type, availability_zone=availability_zone)) def _verify_router_bindings(self, exp_bindings, act_db_bindings): exp_dict = dict(zip([binding['router_id'] for binding in exp_bindings], exp_bindings)) act_bindings = [{'router_id': binding['router_id'], 'edge_id': binding['edge_id'], 'status': binding['status'], 'appliance_size': binding['appliance_size'], 'edge_type': binding['edge_type'], 'availability_zone': binding['availability_zone']} for binding in act_db_bindings] act_dict = dict(zip([binding['router_id'] for binding in act_bindings], act_bindings)) self.assertEqual(exp_dict, act_dict) def test_get_backup_edge_bindings(self): """Test get backup edges filtering out deleting and error edges.""" pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT)) self._populate_vcns_router_binding(pool_edges) expect_backup_bindings = self._create_backup_router_bindings( 1, 2, 0, 4, 0, error_at_backend_status=constants.ACTIVE, size=nsxv_constants.LARGE) backup_bindings = self.edge_manager._get_backup_edge_bindings(self.ctx, appliance_size=nsxv_constants.LARGE, availability_zone=self.az) self._verify_router_bindings(expect_backup_bindings, backup_bindings) def test_get_available_router_bindings(self): appliance_size = nsxv_constants.LARGE edge_type = nsxv_constants.SERVICE_EDGE pool_edges = (self._create_edge_pools(1, 2, 3, 0, 5) + self._create_edge_pools( 1, 2, 3, 0, 5, size=nsxv_constants.COMPACT)) self._populate_vcns_router_binding(pool_edges) expect_backup_bindings = self._create_backup_router_bindings( 1, 2, 3, 0, 5, error_status=constants.ERROR) binding = self.edge_manager._get_available_router_binding( self.ctx, appliance_size=appliance_size, edge_type=edge_type, availability_zone=self.az) router_bindings = [ binding_db for binding_db in nsxv_db.get_nsxv_router_bindings( self.ctx.session) if (binding_db['appliance_size'] == appliance_size and binding_db['edge_type'] == edge_type and binding_db['availability_zone'] == 'default')] self._verify_router_bindings(expect_backup_bindings, router_bindings) edge_id = (EDGE_AVAIL + appliance_size + '-' + edge_type + '-edge-' + str(0)) self.assertEqual(edge_id, binding['edge_id']) def test_check_backup_edge_pool_with_max(self): appliance_size = nsxv_constants.LARGE edge_type = nsxv_constants.SERVICE_EDGE pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT)) self._populate_vcns_router_binding(pool_edges) expect_pool_bindings = self._create_backup_router_bindings( 1, 2, 3, 4, 5, error_status=constants.ERROR, error_at_backend_status=constants.PENDING_DELETE) self.edge_manager._check_backup_edge_pool( 0, 3, appliance_size=appliance_size, edge_type=edge_type, availability_zone=self.az) router_bindings = [ binding for binding in nsxv_db.get_nsxv_router_bindings(self.ctx.session) if (binding['appliance_size'] == appliance_size and binding['edge_type'] == edge_type)] self._verify_router_bindings(expect_pool_bindings, router_bindings) def test_check_backup_edge_pool_with_min(self): appliance_size = nsxv_constants.LARGE edge_type = nsxv_constants.SERVICE_EDGE pool_edges = (self._create_edge_pools(1, 2, 3, 0, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT)) self._populate_vcns_router_binding(pool_edges) edge_utils.eventlet = mock.Mock() edge_utils.eventlet.spawn_n.return_value = None self.edge_manager._check_backup_edge_pool( 5, 10, appliance_size=appliance_size, edge_type=edge_type, availability_zone=self.az) router_bindings = [ binding for binding in nsxv_db.get_nsxv_router_bindings(self.ctx.session) if binding['edge_id'] is None and binding['status'] == constants.PENDING_CREATE] binding_ids = [bind.router_id for bind in router_bindings] self.assertEqual(2, len(router_bindings)) edge_utils.eventlet.spawn_n.assert_called_with( mock.ANY, binding_ids, appliance_size, edge_type, self.az) def test_check_backup_edge_pools_with_empty_conf(self): pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._check_backup_edge_pools() router_bindings = nsxv_db.get_nsxv_router_bindings(self.ctx.session) for binding in router_bindings: self.assertEqual(constants.PENDING_DELETE, binding['status']) def test_check_backup_edge_pools_with_default(self): self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._check_backup_edge_pools() router_bindings = nsxv_db.get_nsxv_router_bindings(self.ctx.session) expect_large_bindings = self._create_backup_router_bindings( 1, 2, 3, 4, 5, error_status=constants.PENDING_DELETE, error_at_backend_status=constants.PENDING_DELETE) large_bindings = [ binding for binding in router_bindings if (binding['appliance_size'] == nsxv_constants.LARGE and binding['edge_type'] == nsxv_constants.SERVICE_EDGE)] self._verify_router_bindings(expect_large_bindings, large_bindings) expect_compact_bindings = self._create_backup_router_bindings( 1, 2, 3, 4, 5, error_status=constants.PENDING_DELETE, error_at_backend_status=constants.PENDING_DELETE, size=nsxv_constants.COMPACT) compact_bindings = [ binding for binding in router_bindings if (binding['appliance_size'] == nsxv_constants.COMPACT and binding['edge_type'] == nsxv_constants.SERVICE_EDGE)] self._verify_router_bindings(expect_compact_bindings, compact_bindings) vdr_bindings = [ binding for binding in router_bindings if (binding['appliance_size'] == nsxv_constants.LARGE and binding['edge_type'] == nsxv_constants.VDR_EDGE)] for binding in vdr_bindings: self.assertEqual(constants.PENDING_DELETE, binding['status']) def test_check_backup_edge_pools_with_vdr(self): self.edge_manager.edge_pool_dicts = self.vdr_edge_pool_dicts pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._check_backup_edge_pools() router_bindings = nsxv_db.get_nsxv_router_bindings(self.ctx.session) expect_vdr_bindings = self._create_backup_router_bindings( 1, 2, 3, 4, 5, error_status=constants.PENDING_DELETE, error_at_backend_status=constants.PENDING_DELETE, edge_type=nsxv_constants.VDR_EDGE) vdr_bindings = [ binding for binding in router_bindings if (binding['appliance_size'] == nsxv_constants.LARGE and binding['edge_type'] == nsxv_constants.VDR_EDGE)] self._verify_router_bindings(expect_vdr_bindings, vdr_bindings) service_bindings = [ binding for binding in router_bindings if binding['edge_type'] == nsxv_constants.SERVICE_EDGE] for binding in service_bindings: self.assertEqual(constants.PENDING_DELETE, binding['status']) def test_allocate_edge_appliance_with_empty(self): self.edge_manager._clean_all_error_edge_bindings = mock.Mock() self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', availability_zone=self.az) assert not self.edge_manager._clean_all_error_edge_bindings.called def test_allocate_large_edge_appliance_with_default(self): self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', appliance_size=nsxv_constants.LARGE, availability_zone=self.az) edge_id = (EDGE_AVAIL + nsxv_constants.LARGE + '-' + nsxv_constants.SERVICE_EDGE + '-edge-' + str(0)) self.nsxv_manager.rename_edge.assert_has_calls( [mock.call(edge_id, 'fake_name')]) def test_allocate_compact_edge_appliance_with_default(self): self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', appliance_size=nsxv_constants.COMPACT, availability_zone=self.az) edge_id = (EDGE_AVAIL + nsxv_constants.COMPACT + '-' + nsxv_constants.SERVICE_EDGE + '-edge-' + str(0)) self.nsxv_manager.rename_edge.assert_has_calls( [mock.call(edge_id, 'fake_name')]) def test_allocate_large_edge_appliance_with_vdr(self): self.edge_manager.edge_pool_dicts = self.vdr_edge_pool_dicts pool_edges = (self._create_edge_pools(1, 2, 3, 4, 5) + self._create_edge_pools( 1, 2, 3, 4, 5, size=nsxv_constants.COMPACT) + self._create_edge_pools( 1, 2, 3, 4, 5, edge_type=nsxv_constants.VDR_EDGE)) self._populate_vcns_router_binding(pool_edges) self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', dist=True, appliance_size=nsxv_constants.LARGE, availability_zone=self.az) edge_id = (EDGE_AVAIL + nsxv_constants.LARGE + '-' + nsxv_constants.VDR_EDGE + '-edge-' + str(0)) self.nsxv_manager.rename_edge.assert_has_calls( [mock.call(edge_id, 'fake_name')]) def test_free_edge_appliance_with_empty(self): self.edge_manager._clean_all_error_edge_bindings = mock.Mock() self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', availability_zone=self.az) self.edge_manager._free_edge_appliance( self.ctx, 'fake_id') assert not self.edge_manager._clean_all_error_edge_bindings.called def test_free_edge_appliance_with_default(self): self.edge_manager.edge_pool_dicts = self.default_edge_pool_dicts self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', availability_zone=self.az) self.edge_manager._free_edge_appliance( self.ctx, 'fake_id') assert not self.nsxv_manager.delete_edge.called self.nsxv_manager.update_edge.assert_has_calls( [mock.call(mock.ANY, mock.ANY, mock.ANY, mock.ANY, None, appliance_size=nsxv_constants.COMPACT, dist=False, availability_zone=mock.ANY)]) def test_free_edge_appliance_with_default_with_full(self): self.edge_pool_dicts = { nsxv_constants.SERVICE_EDGE: { nsxv_constants.LARGE: {'minimum_pooled_edges': 1, 'maximum_pooled_edges': 1}, nsxv_constants.COMPACT: {'minimum_pooled_edges': 1, 'maximum_pooled_edges': 3}}, nsxv_constants.VDR_EDGE: {}} # Avoid use of eventlet greenpool as this breaks the UT with mock.patch.object(self.edge_manager, '_get_worker_pool'): self.edge_manager._allocate_edge_appliance( self.ctx, 'fake_id', 'fake_name', availability_zone=self.az) self.edge_manager._free_edge_appliance( self.ctx, 'fake_id') class VdrTransitNetUtilDefaultTestCase(EdgeUtilsTestCaseMixin): EXPECTED_NETMASK = '255.255.255.240' EXPECTED_TLR_IP = '169.254.2.1' EXPECTED_PLR_IP = conf.DEFAULT_PLR_ADDRESS def setUp(self): super(VdrTransitNetUtilDefaultTestCase, self).setUp() def test_get_vdr_transit_network_netmask(self): self.assertEqual(edge_utils.get_vdr_transit_network_netmask(), self.EXPECTED_NETMASK) def test_get_vdr_transit_network_tlr_address(self): self.assertEqual(edge_utils.get_vdr_transit_network_tlr_address(), self.EXPECTED_TLR_IP) def test_get_vdr_transit_network_plr_address(self): self.assertEqual(edge_utils.get_vdr_transit_network_plr_address(), self.EXPECTED_PLR_IP) def test_is_overlapping_reserved_subnets(self): self.assertTrue( edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', ['169.254.0.0/16'])) self.assertTrue( edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', ['192.168.2.0/24', '169.254.0.0/16'])) self.assertFalse( edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', ['169.253.0.0/16'])) self.assertFalse( edge_utils.is_overlapping_reserved_subnets('169.254.1.0/24', ['192.168.2.0/24', '169.253.0.0/16'])) class VdrTransitNetUtilTestCase(EdgeUtilsTestCaseMixin): EXPECTED_NETMASK = '255.255.255.0' EXPECTED_TLR_IP = '192.168.1.1' EXPECTED_PLR_IP = '192.168.1.2' def setUp(self): super(VdrTransitNetUtilTestCase, self).setUp() class VdrTransitNetValidatorTestCase(EdgeUtilsTestCaseMixin): def setUp(self): super(VdrTransitNetValidatorTestCase, self).setUp() def _test_validator(self, cidr): cfg.CONF.set_override('vdr_transit_network', cidr, 'nsxv') return edge_utils.validate_vdr_transit_network() def test_vdr_transit_net_validator_success(self): self.assertIsNone(self._test_validator('192.168.253.0/24')) def test_vdr_transit_net_validator_junk_cidr(self): self.assertRaises(n_exc.Invalid, self._test_validator, 'not_a_subnet') def test_vdr_transit_net_validator_too_small_cidr(self): self.assertRaises( n_exc.Invalid, self._test_validator, '169.254.2.0/31') def test_vdr_transit_net_validator_overlap_cidr(self): self.assertRaises( n_exc.Invalid, self._test_validator, '169.254.0.0/16') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v/vshield/test_vcns_driver.py0000644000175000017500000004767600000000000030265 0ustar00coreycorey00000000000000# Copyright 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import greenthread import mock from neutron.tests import base from neutron_lib import context as neutron_context from oslo_config import cfg import six from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az from vmware_nsx.plugins.nsx_v.vshield.common import ( constants as vcns_const) from vmware_nsx.plugins.nsx_v.vshield import edge_appliance_driver as e_drv from vmware_nsx.plugins.nsx_v.vshield.tasks import ( constants as ts_const) from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks as ts from vmware_nsx.plugins.nsx_v.vshield import vcns_driver from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns VCNS_CONFIG_FILE = vmware.get_fake_conf("vcns.ini.test") ts.TaskManager.set_default_interval(100) class VcnsDriverTaskManagerTestCase(base.BaseTestCase): def setUp(self): super(VcnsDriverTaskManagerTestCase, self).setUp() self.manager = ts.TaskManager() self.manager.start(100) def tearDown(self): self.manager.stop() # Task manager should not leave running threads around # if _thread is None it means it was killed in stop() self.assertIsNone(self.manager._thread) super(VcnsDriverTaskManagerTestCase, self).tearDown() def _test_task_manager_task_process_state(self, sync_exec=False): def _task_failed(task, reason): task.userdata['result'] = False task.userdata['error'] = reason def _check_state(task, exp_state): if not task.userdata.get('result', True): return False state = task.userdata['state'] if state != exp_state: msg = "state %d expect %d" % ( state, exp_state) _task_failed(task, msg) return False task.userdata['state'] = state + 1 return True def _exec(task): if not _check_state(task, 1): return ts_const.TaskStatus.ERROR if task.userdata['sync_exec']: return ts_const.TaskStatus.COMPLETED else: return ts_const.TaskStatus.PENDING def _status(task): if task.userdata['sync_exec']: _task_failed(task, "_status callback triggered") state = task.userdata['state'] if state == 3: _check_state(task, 3) return ts_const.TaskStatus.PENDING else: _check_state(task, 4) return ts_const.TaskStatus.COMPLETED def _result(task): if task.userdata['sync_exec']: exp_state = 3 else: exp_state = 5 _check_state(task, exp_state) def _start_monitor(task): _check_state(task, 0) def _executed_monitor(task): _check_state(task, 2) def _result_monitor(task): if task.userdata['sync_exec']: exp_state = 4 else: exp_state = 6 if _check_state(task, exp_state): task.userdata['result'] = True else: task.userdata['result'] = False userdata = { 'state': 0, 'sync_exec': sync_exec } task = ts.Task('name', 'res', _exec, _status, _result, userdata) task.add_start_monitor(_start_monitor) task.add_executed_monitor(_executed_monitor) task.add_result_monitor(_result_monitor) self.manager.add(task) task.wait(ts_const.TaskState.RESULT) self.assertTrue(userdata['result']) def test_task_manager_task_sync_exec_process_state(self): self._test_task_manager_task_process_state(sync_exec=True) def test_task_manager_task_async_exec_process_state(self): self._test_task_manager_task_process_state(sync_exec=False) def test_task_manager_task_ordered_process(self): def _task_failed(task, reason): task.userdata['result'] = False task.userdata['error'] = reason def _exec(task): task.userdata['executed'] = True return ts_const.TaskStatus.PENDING def _status(task): return ts_const.TaskStatus.COMPLETED def _result(task): next_task = task.userdata.get('next') if next_task: if next_task.userdata.get('executed'): _task_failed(next_task, "executed premature") if task.userdata.get('result', True): task.userdata['result'] = True tasks = [] prev = None last_task = None for i in range(5): name = "name-%d" % i task = ts.Task(name, 'res', _exec, _status, _result, {}) tasks.append(task) if prev: prev.userdata['next'] = task prev = task last_task = task for task in tasks: self.manager.add(task) last_task.wait(ts_const.TaskState.RESULT) for task in tasks: self.assertTrue(task.userdata['result']) def test_task_manager_task_parallel_process(self): tasks = [] def _exec(task): task.userdata['executed'] = True return ts_const.TaskStatus.PENDING def _status(task): for t in tasks: if not t.userdata.get('executed'): t.userdata['resut'] = False return ts_const.TaskStatus.COMPLETED def _result(task): if (task.userdata.get('result') is None and task.status == ts_const.TaskStatus.COMPLETED): task.userdata['result'] = True else: task.userdata['result'] = False for i in range(5): name = "name-%d" % i res = 'resource-%d' % i task = ts.Task(name, res, _exec, _status, _result, {}) tasks.append(task) self.manager.add(task) for task in tasks: task.wait(ts_const.TaskState.RESULT) self.assertTrue(task.userdata['result']) def _test_task_manager_stop(self, exec_wait=False, result_wait=False, stop_wait=0): def _exec(task): if exec_wait: greenthread.sleep(0.01) return ts_const.TaskStatus.PENDING def _status(task): greenthread.sleep(0.01) return ts_const.TaskStatus.PENDING def _result(task): if result_wait: greenthread.sleep(0) manager = ts.TaskManager().start(100) manager.stop() # Task manager should not leave running threads around # if _thread is None it means it was killed in stop() self.assertIsNone(manager._thread) manager.start(100) alltasks = {} for i in range(100): res = 'res-%d' % i tasks = [] for i in range(100): task = ts.Task('name', res, _exec, _status, _result) manager.add(task) tasks.append(task) alltasks[res] = tasks greenthread.sleep(stop_wait) manager.stop() # Task manager should not leave running threads around # if _thread is None it means it was killed in stop() self.assertIsNone(manager._thread) for res, tasks in six.iteritems(alltasks): for task in tasks: self.assertEqual(ts_const.TaskStatus.ABORT, task.status) def test_task_manager_stop_1(self): self._test_task_manager_stop(True, True, 0) def test_task_manager_stop_2(self): self._test_task_manager_stop(True, True, 1) def test_task_manager_stop_3(self): self._test_task_manager_stop(False, False, 0) def test_task_manager_stop_4(self): self._test_task_manager_stop(False, False, 1) def test_task_pending_task(self): def _exec(task): task.userdata['executing'] = True while not task.userdata['tested']: greenthread.sleep(0) task.userdata['executing'] = False return ts_const.TaskStatus.COMPLETED userdata = { 'executing': False, 'tested': False } manager = ts.TaskManager().start(100) task = ts.Task('name', 'res', _exec, userdata=userdata) manager.add(task) while not userdata['executing']: greenthread.sleep(0) self.assertTrue(manager.has_pending_task()) userdata['tested'] = True while userdata['executing']: greenthread.sleep(0) self.assertFalse(manager.has_pending_task()) class VcnsDriverTestCase(base.BaseTestCase): def vcns_patch(self): instance = self.mock_vcns.start() instance.return_value.deploy_edge.side_effect = self.fc.deploy_edge instance.return_value.get_edge_id.side_effect = self.fc.get_edge_id instance.return_value.delete_edge.side_effect = self.fc.delete_edge instance.return_value.update_interface.side_effect = ( self.fc.update_interface) instance.return_value.get_nat_config.side_effect = ( self.fc.get_nat_config) instance.return_value.update_nat_config.side_effect = ( self.fc.update_nat_config) instance.return_value.delete_nat_rule.side_effect = ( self.fc.delete_nat_rule) instance.return_value.get_edge_status.side_effect = ( self.fc.get_edge_status) instance.return_value.get_edges.side_effect = self.fc.get_edges instance.return_value.update_routes.side_effect = ( self.fc.update_routes) instance.return_value.create_lswitch.side_effect = ( self.fc.create_lswitch) instance.return_value.delete_lswitch.side_effect = ( self.fc.delete_lswitch) def setUp(self): super(VcnsDriverTestCase, self).setUp() self.ctx = neutron_context.get_admin_context() self.temp_e_drv_nsxv_db = e_drv.nsxv_db e_drv.nsxv_db = mock.MagicMock() self.config_parse(args=['--config-file', VCNS_CONFIG_FILE]) self.fc = fake_vcns.FakeVcns() self.mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) self.vcns_patch() self.addCleanup(self.fc.reset_all) self.vcns_driver = vcns_driver.VcnsDriver(self) self.az = (nsx_az.NsxVAvailabilityZones(). get_default_availability_zone()) self.edge_id = None self.result = None def tearDown(self): e_drv.nsxv_db = self.temp_e_drv_nsxv_db self.vcns_driver.task_manager.stop() # Task manager should not leave running threads around # if _thread is None it means it was killed in stop() self.assertIsNone(self.vcns_driver.task_manager._thread) super(VcnsDriverTestCase, self).tearDown() def complete_edge_creation( self, context, edge_id, name, router_id, dist, deploy_successful, availability_zone=None, deploy_metadata=False): pass def _deploy_edge(self): self.edge_id = self.vcns_driver.deploy_edge( self.ctx, 'router-id', 'myedge', 'internal-network', availability_zone=self.az) self.assertEqual('edge-1', self.edge_id) def test_deploy_edge_with(self): self.vcns_driver.deploy_edge( self.ctx, 'router-id', 'myedge', 'internal-network', availability_zone=self.az) status = self.vcns_driver.get_edge_status('edge-1') self.assertEqual(vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE, status) def test_deploy_edge_fail(self): self.vcns_driver.deploy_edge( self.ctx, 'router-1', 'myedge', 'internal-network', availability_zone=self.az) # self.vcns_driver.deploy_edge( # self.ctx, 'router-2', 'myedge', 'internal-network', # availability_zone=self.az) self.assertRaises( nsxv_exc.NsxPluginException, self.vcns_driver.deploy_edge, self.ctx, 'router-2', 'myedge', 'internal-network', availability_zone=self.az) def test_get_edge_status(self): self._deploy_edge() status = self.vcns_driver.get_edge_status(self.edge_id) self.assertEqual(vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE, status) def test_update_nat_rules(self): self._deploy_edge() snats = [{ 'src': '192.168.1.0/24', 'translated': '10.0.0.1' }, { 'src': '192.168.2.0/24', 'translated': '10.0.0.2' }, { 'src': '192.168.3.0/24', 'translated': '10.0.0.3' } ] dnats = [{ 'dst': '100.0.0.4', 'translated': '192.168.1.1' }, { 'dst': '100.0.0.5', 'translated': '192.168.2.1' } ] result = self.vcns_driver.update_nat_rules(self.edge_id, snats, dnats) self.assertTrue(result) natcfg = self.vcns_driver.get_nat_config(self.edge_id) rules = natcfg['rules']['natRulesDtos'] self.assertEqual(2 * len(dnats) + len(snats), len(rules)) self.natEquals(rules[0], dnats[0]) self.natEquals(rules[1], self.snat_for_dnat(dnats[0])) self.natEquals(rules[2], dnats[1]) self.natEquals(rules[3], self.snat_for_dnat(dnats[1])) self.natEquals(rules[4], snats[0]) self.natEquals(rules[5], snats[1]) self.natEquals(rules[6], snats[2]) def test_update_nat_rules_for_all_vnics(self): self._deploy_edge() snats = [{ 'src': '192.168.1.0/24', 'translated': '10.0.0.1' }, { 'src': '192.168.2.0/24', 'translated': '10.0.0.2' }, { 'src': '192.168.3.0/24', 'translated': '10.0.0.3' } ] dnats = [{ 'dst': '100.0.0.4', 'translated': '192.168.1.1' }, { 'dst': '100.0.0.5', 'translated': '192.168.2.1' } ] indices = [0, 1, 2, 3] result = self.vcns_driver.update_nat_rules(self.edge_id, snats, dnats, indices) self.assertTrue(result) natcfg = self.vcns_driver.get_nat_config(self.edge_id) rules = natcfg['rules']['natRulesDtos'] self.assertEqual(2 * len(indices) * len(dnats) + len(indices) * len(snats), len(rules)) sorted_rules = sorted(rules, key=lambda k: k['vnic']) for i in range(0, len(sorted_rules), 7): self.natEquals(sorted_rules[i], dnats[0]) self.natEquals(sorted_rules[i + 1], self.snat_for_dnat(dnats[0])) self.natEquals(sorted_rules[i + 2], dnats[1]) self.natEquals(sorted_rules[i + 3], self.snat_for_dnat(dnats[1])) self.natEquals(sorted_rules[i + 4], snats[0]) self.natEquals(sorted_rules[i + 5], snats[1]) self.natEquals(sorted_rules[i + 6], snats[2]) def test_update_nat_rules_for_specific_vnics(self): self._deploy_edge() snats = [{ 'src': '192.168.1.0/24', 'translated': '10.0.0.1', 'vnic_index': 5 }, { 'src': '192.168.2.0/24', 'translated': '10.0.0.2' }, { 'src': '192.168.3.0/24', 'translated': '10.0.0.3' } ] dnats = [{ 'dst': '100.0.0.4', 'translated': '192.168.1.1', 'vnic_index': 2 }, { 'dst': '100.0.0.5', 'translated': '192.168.2.1' } ] result = self.vcns_driver.update_nat_rules(self.edge_id, snats, dnats) self.assertTrue(result) natcfg = self.vcns_driver.get_nat_config(self.edge_id) rules = natcfg['rules']['natRulesDtos'] self.assertEqual(2 * len(dnats) + len(snats), len(rules)) self.natEquals(rules[0], dnats[0]) self.assertEqual(2, rules[0]['vnic']) self.natEquals(rules[1], self.snat_for_dnat(dnats[0])) self.assertEqual(2, rules[1]['vnic']) self.natEquals(rules[2], dnats[1]) self.assertNotIn('vnic', rules[2]) self.natEquals(rules[3], self.snat_for_dnat(dnats[1])) self.assertNotIn('vnic', rules[3]) self.natEquals(rules[4], snats[0]) self.assertEqual(5, rules[4]['vnic']) self.natEquals(rules[5], snats[1]) self.assertNotIn('vnic', rules[5]) self.natEquals(rules[6], snats[2]) self.assertNotIn('vnic', rules[6]) def snat_for_dnat(self, dnat): return { 'src': dnat['translated'], 'translated': dnat['dst'] } def natEquals(self, rule, exp): addr = exp.get('src') if not addr: addr = exp.get('dst') self.assertEqual(addr, rule['originalAddress']) self.assertEqual(exp['translated'], rule['translatedAddress']) def test_update_routes(self): self._deploy_edge() routes = [{ 'cidr': '192.168.1.0/24', 'nexthop': '169.254.2.1' }, { 'cidr': '192.168.2.0/24', 'nexthop': '169.254.2.1' }, { 'cidr': '192.168.3.0/24', 'nexthop': '169.254.2.1' } ] result = self.vcns_driver.update_routes( self.edge_id, '10.0.0.1', routes) self.assertTrue(result) def test_update_interface(self): self._deploy_edge() self.vcns_driver.update_interface( 'router-id', self.edge_id, vcns_const.EXTERNAL_VNIC_INDEX, 'network-id', address='100.0.0.3', netmask='255.255.255.0') def test_delete_edge(self): self._deploy_edge() result = self.vcns_driver.delete_edge( self.ctx, 'router-id', self.edge_id) self.assertTrue(result) def test_create_lswitch(self): tz_config = [{ 'transport_zone_uuid': 'tz-uuid' }] lswitch = self.vcns_driver.create_lswitch('lswitch', tz_config) self.assertEqual('lswitch', lswitch['display_name']) self.assertEqual('LogicalSwitchConfig', lswitch['type']) self.assertIn('uuid', lswitch) def test_delete_lswitch(self): tz_config = { 'transport_zone_uuid': 'tz-uuid' } lswitch = self.vcns_driver.create_lswitch('lswitch', tz_config) self.vcns_driver.delete_lswitch(lswitch['uuid']) class VcnsDriverHATestCase(VcnsDriverTestCase): def setUp(self): # add edge_ha and ha_datastore to the pre-defined configuration self._data_store = 'fake-datastore' self._ha_data_store = 'fake-datastore-2' cfg.CONF.set_override('ha_datastore_id', self._ha_data_store, group="nsxv") cfg.CONF.set_override('edge_ha', True, group="nsxv") super(VcnsDriverHATestCase, self).setUp() self.vcns_driver.vcns.orig_deploy = self.vcns_driver.vcns.deploy_edge self.vcns_driver.vcns.deploy_edge = self._fake_deploy_edge def _fake_deploy_edge(self, request): # validate the appliance structure in the request, # and return the regular (fake) response found_app = request['appliances']['appliances'] self.assertEqual(2, len(found_app)) self.assertEqual(self._data_store, found_app[0]['datastoreId']) self.assertEqual(self._ha_data_store, found_app[1]['datastoreId']) return self.vcns_driver.vcns.orig_deploy(request) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2382548 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/0000755000175000017500000000000000000000000022731 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/__init__.py0000644000175000017500000000000000000000000025030 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.242255 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/housekeeper/0000755000175000017500000000000000000000000025250 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/housekeeper/__init__.py0000644000175000017500000000000000000000000027347 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/housekeeper/test_mismatch_logical_port.py0000644000175000017500000000631000000000000033224 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from neutron_lib.plugins import constants from oslo_utils import uuidutils from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v3.housekeeper import mismatch_logical_port from vmware_nsxlib.v3 import exceptions as nsxlib_exc DUMMY_PORT = { "resource_type": "LogicalPort", "id": uuidutils.generate_uuid(), "display_name": "test", "tags": [{ "scope": "os-neutron-dport-id", "tag": uuidutils.generate_uuid() }, { "scope": "os-project-id", "tag": uuidutils.generate_uuid() }, { "scope": "os-project-name", "tag": "admin" }, { "scope": "os-api-version", "tag": "13.0.0.0b3.dev90" }], "logical_switch_id": uuidutils.generate_uuid(), "admin_state": "UP", "switching_profile_ids": []} class MismatchLogicalPortTestCaseReadOnly(base.BaseTestCase): def setUp(self): def get_plugin_mock(alias=constants.CORE): if alias in (constants.CORE, constants.L3): return self.plugin super(MismatchLogicalPortTestCaseReadOnly, self).setUp() self.plugin = mock.Mock() self.plugin.nsxlib = mock.Mock() self.plugin.nsxlib.switching_profile.find_by_display_name = mock.Mock( return_value=[{'id': 'Dummy'}]) self.context = mock.Mock() self.context.session = mock.Mock() mock.patch('neutron_lib.plugins.directory.get_plugin', side_effect=get_plugin_mock).start() self.log = mock.Mock() base_job.LOG = self.log self.job = mismatch_logical_port.MismatchLogicalportJob(True, []) def run_job(self): self.job.run(self.context, readonly=True) def test_clean_run(self): with mock.patch.object(self.plugin, 'get_ports', return_value=[]): self.run_job() self.log.warning.assert_not_called() def test_with_mismatched_ls(self): with mock.patch.object( self.plugin, 'get_ports', return_value=[{'id': uuidutils.generate_uuid()}]),\ mock.patch("vmware_nsx.plugins.nsx_v3.utils.get_port_nsx_id", return_value=uuidutils.generate_uuid()),\ mock.patch.object(self.plugin.nsxlib.logical_port, 'get', side_effect=nsxlib_exc.ResourceNotFound): self.run_job() self.log.warning.assert_called() class MismatchLogicalPortTestCaseReadWrite( MismatchLogicalPortTestCaseReadOnly): def run_job(self): self.job.run(self.context, readonly=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/housekeeper/test_orphaned_dhcp_server.py0000644000175000017500000000566100000000000033055 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from neutron_lib.plugins import constants from oslo_utils import uuidutils from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v3.housekeeper import orphaned_dhcp_server DUMMY_DHCP_SERVER = { "resource_type": "LogicalDhcpServer", "id": uuidutils.generate_uuid(), "display_name": "test", "tags": [{ "scope": "os-neutron-net-id", "tag": uuidutils.generate_uuid() }, { "scope": "os-project-id", "tag": uuidutils.generate_uuid() }, { "scope": "os-project-name", "tag": "admin" }, { "scope": "os-api-version", "tag": "13.0.0.0b3.dev90" }], "attached_logical_port_id": uuidutils.generate_uuid(), "dhcp_profile_id": uuidutils.generate_uuid()} class OrphanedDhcpServerTestCaseReadOnly(base.BaseTestCase): def setUp(self): def get_plugin_mock(alias=constants.CORE): if alias in (constants.CORE, constants.L3): return self.plugin super(OrphanedDhcpServerTestCaseReadOnly, self).setUp() self.plugin = mock.Mock() self.plugin.nsxlib = mock.Mock() self.context = mock.Mock() self.context.session = mock.Mock() mock.patch('neutron_lib.plugins.directory.get_plugin', side_effect=get_plugin_mock).start() self.log = mock.Mock() base_job.LOG = self.log self.job = orphaned_dhcp_server.OrphanedDhcpServerJob(True, []) def run_job(self): self.job.run(self.context, readonly=True) def test_clean_run(self): with mock.patch.object(self.plugin.nsxlib.dhcp_server, 'list', return_value={'results': []}): self.run_job() self.log.warning.assert_not_called() def test_with_orphaned_servers(self): with mock.patch.object(self.plugin.nsxlib.dhcp_server, 'list', return_value={'results': [DUMMY_DHCP_SERVER]}),\ mock.patch.object(self.plugin, 'get_network', side_effect=Exception): self.run_job() self.log.warning.assert_called() class OrphanedDhcpServerTestCaseReadWrite(OrphanedDhcpServerTestCaseReadOnly): def run_job(self): self.job.run(self.context, readonly=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/housekeeper/test_orphaned_logical_router.py0000644000175000017500000000565000000000000033561 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from neutron_lib.plugins import constants from oslo_utils import uuidutils from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v3.housekeeper import orphaned_logical_router DUMMY_ROUTER = { "resource_type": "LogicalRouter", "id": uuidutils.generate_uuid(), "display_name": "test", "tags": [{ "scope": "os-neutron-router-id", "tag": uuidutils.generate_uuid() }, { "scope": "os-project-id", "tag": uuidutils.generate_uuid() }, { "scope": "os-project-name", "tag": "admin" }, { "scope": "os-api-version", "tag": "13.0.0.0b3.dev90" }], "edge_cluster_id": uuidutils.generate_uuid(), "router_type": "TIER1"} class OrphanedLogicalRouterTestCaseReadOnly(base.BaseTestCase): def setUp(self): def get_plugin_mock(alias=constants.CORE): if alias in (constants.CORE, constants.L3): return self.plugin super(OrphanedLogicalRouterTestCaseReadOnly, self).setUp() self.plugin = mock.Mock() self.plugin.nsxlib = mock.Mock() self.context = mock.Mock() self.context.session = mock.Mock() mock.patch('neutron_lib.plugins.directory.get_plugin', side_effect=get_plugin_mock).start() self.log = mock.Mock() base_job.LOG = self.log self.job = orphaned_logical_router.OrphanedLogicalRouterJob(True, []) def run_job(self): self.job.run(self.context, readonly=True) def test_clean_run(self): with mock.patch.object(self.plugin.nsxlib.logical_router, 'list', return_value={'results': []}): self.run_job() self.log.warning.assert_not_called() def test_with_orphaned_ls(self): with mock.patch.object(self.plugin.nsxlib.logical_router, 'list', return_value={'results': [DUMMY_ROUTER]}),\ mock.patch("vmware_nsx.db.db.get_neutron_from_nsx_router_id", return_value=None): self.run_job() self.log.warning.assert_called() class OrphanedLogicalRouterTestCaseReadWrite( OrphanedLogicalRouterTestCaseReadOnly): def run_job(self): self.job.run(self.context, readonly=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/housekeeper/test_orphaned_logical_switch.py0000644000175000017500000000556500000000000033547 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from neutron_lib.plugins import constants from oslo_utils import uuidutils from vmware_nsx.plugins.common.housekeeper import base_job from vmware_nsx.plugins.nsx_v3.housekeeper import orphaned_logical_switch DUMMY_LS = { "resource_type": "LogicalSwitch", "id": uuidutils.generate_uuid(), "display_name": "test", "tags": [{ "scope": "os-neutron-net-id", "tag": uuidutils.generate_uuid() }, { "scope": "os-project-id", "tag": uuidutils.generate_uuid() }, { "scope": "os-project-name", "tag": "admin" }, { "scope": "os-api-version", "tag": "13.0.0.0b3.dev90" }], "transport_zone_id": uuidutils.generate_uuid(), "address_bindings": []} class OrphanedLogicalSwitchTestCaseReadOnly(base.BaseTestCase): def setUp(self): def get_plugin_mock(alias=constants.CORE): if alias in (constants.CORE, constants.L3): return self.plugin super(OrphanedLogicalSwitchTestCaseReadOnly, self).setUp() self.plugin = mock.Mock() self.plugin.nsxlib = mock.Mock() self.context = mock.Mock() self.context.session = mock.Mock() mock.patch('neutron_lib.plugins.directory.get_plugin', side_effect=get_plugin_mock).start() self.log = mock.Mock() base_job.LOG = self.log self.job = orphaned_logical_switch.OrphanedLogicalSwitchJob(True, []) def run_job(self): self.job.run(self.context, readonly=True) def test_clean_run(self): with mock.patch.object(self.plugin.nsxlib.logical_switch, 'list', return_value={'results': []}): self.run_job() self.log.warning.assert_not_called() def test_with_orphaned_ls(self): with mock.patch.object(self.plugin.nsxlib.logical_switch, 'list', return_value={'results': [DUMMY_LS]}),\ mock.patch("vmware_nsx.db.db.get_net_ids", return_value=None): self.run_job() self.log.warning.assert_called() class OrphanedLogicalSwitchTestCaseReadWrite( OrphanedLogicalSwitchTestCaseReadOnly): def run_job(self): self.job.run(self.context, readonly=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/test_api_replay.py0000644000175000017500000001002300000000000026463 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from vmware_nsx.extensions import api_replay from vmware_nsx.tests.unit.nsx_v3 import test_plugin from neutron_lib.api import attributes from neutron_lib.plugins import directory from oslo_config import cfg class TestApiReplay(test_plugin.L3NatTest): def setUp(self, plugin=None, ext_mgr=None, service_plugins=None): # enables api_replay_mode for these tests cfg.CONF.set_override('api_replay_mode', True) super(TestApiReplay, self).setUp() def tearDown(self): # disables api_replay_mode for these tests cfg.CONF.set_override('api_replay_mode', False) # remove the extension from the plugin directory.get_plugin().supported_extension_aliases.remove( api_replay.ALIAS) # Revert the attributes map back to normal for attr_name in ('ports', 'networks', 'security_groups', 'security_group_rules', 'routers', 'policies'): attr_info = attributes.RESOURCES[attr_name] attr_info['id']['allow_post'] = False super(TestApiReplay, self).tearDown() def test_create_port_specify_id(self): specified_network_id = '555e762b-d7a1-4b44-b09b-2a34ada56c9f' specified_port_id = 'e55e762b-d7a1-4b44-b09b-2a34ada56c9f' network_res = self._create_network(self.fmt, 'test-network', True, arg_list=('id',), id=specified_network_id) network = self.deserialize(self.fmt, network_res) self.assertEqual(specified_network_id, network['network']['id']) port_res = self._create_port(self.fmt, network['network']['id'], arg_list=('id',), id=specified_port_id) port = self.deserialize(self.fmt, port_res) self.assertEqual(specified_port_id, port['port']['id']) def _create_router(self, fmt, tenant_id, name=None, admin_state_up=None, arg_list=None, **kwargs): data = {'router': {'tenant_id': tenant_id}} if name: data['router']['name'] = name if admin_state_up: data['router']['admin_state_up'] = admin_state_up for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())): # Arg must be present and not empty if kwargs.get(arg): data['router'][arg] = kwargs[arg] router_req = self.new_create_request('routers', data, fmt) return router_req.get_response(self.ext_api) def test_create_update_router(self): specified_router_id = '555e762b-d7a1-4b44-b09b-2a34ada56c9f' router_res = self._create_router(self.fmt, 'test-tenant', 'test-rtr', arg_list=('id',), id=specified_router_id) router = self.deserialize(self.fmt, router_res) self.assertEqual(specified_router_id, router['router']['id']) # This part tests _fixup_res_dict as well body = self._update('routers', specified_router_id, {'router': {'name': 'new_name'}}) body = self._show('routers', specified_router_id) self.assertEqual(body['router']['name'], 'new_name') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/test_availability_zones.py0000644000175000017500000001532200000000000030235 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_utils import uuidutils from neutron.tests import base from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az class Nsxv3AvailabilityZonesTestCase(base.BaseTestCase): def setUp(self): super(Nsxv3AvailabilityZonesTestCase, self).setUp() self.az_name = "zone1" self.group_name = "az:%s" % self.az_name config.register_nsxv3_azs(cfg.CONF, [self.az_name]) self.global_md_proxy = uuidutils.generate_uuid() cfg.CONF.set_override( "metadata_proxy", self.global_md_proxy, group="nsx_v3") self.global_dhcp_profile = uuidutils.generate_uuid() cfg.CONF.set_override( "dhcp_profile", self.global_dhcp_profile, group="nsx_v3") cfg.CONF.set_override( "native_metadata_route", "1.1.1.1", group="nsx_v3") cfg.CONF.set_override("dns_domain", "xxx.com", group="nsx_v3") cfg.CONF.set_override("nameservers", ["10.1.1.1"], group="nsx_v3") cfg.CONF.set_override("switching_profiles", ["uuid1"], group="nsx_v3") cfg.CONF.set_override("dhcp_relay_service", "service1", group="nsx_v3") cfg.CONF.set_override( "default_tier0_router", "uuidrtr1", group="nsx_v3") cfg.CONF.set_override("edge_cluster", "ec1", group="nsx_v3") def _config_az(self, metadata_proxy="metadata_proxy1", dhcp_profile="dhcp_profile1", native_metadata_route="2.2.2.2", dns_domain="aaa.com", nameservers=["20.1.1.1"], default_overlay_tz='otz', default_vlan_tz='vtz', switching_profiles=["uuid2"], dhcp_relay_service="service2", default_tier0_router="uuidrtr2", edge_cluster="ec2"): if metadata_proxy is not None: cfg.CONF.set_override("metadata_proxy", metadata_proxy, group=self.group_name) if dhcp_profile is not None: cfg.CONF.set_override("dhcp_profile", dhcp_profile, group=self.group_name) if native_metadata_route is not None: cfg.CONF.set_override("native_metadata_route", native_metadata_route, group=self.group_name) if dns_domain is not None: cfg.CONF.set_override("dns_domain", dns_domain, group=self.group_name) if nameservers is not None: cfg.CONF.set_override("nameservers", nameservers, group=self.group_name) if default_overlay_tz is not None: cfg.CONF.set_override("default_overlay_tz", default_overlay_tz, group=self.group_name) if default_vlan_tz is not None: cfg.CONF.set_override("default_vlan_tz", default_vlan_tz, group=self.group_name) if switching_profiles is not None: cfg.CONF.set_override("switching_profiles", switching_profiles, group=self.group_name) if dhcp_relay_service is not None: cfg.CONF.set_override("dhcp_relay_service", dhcp_relay_service, group=self.group_name) if default_tier0_router is not None: cfg.CONF.set_override("default_tier0_router", default_tier0_router, group=self.group_name) if edge_cluster is not None: cfg.CONF.set_override("edge_cluster", edge_cluster, group=self.group_name) def test_simple_availability_zone(self): self._config_az() az = nsx_az.NsxV3AvailabilityZone(self.az_name) self.assertEqual(self.az_name, az.name) self.assertEqual("metadata_proxy1", az.metadata_proxy) self.assertEqual("dhcp_profile1", az.dhcp_profile) self.assertEqual("2.2.2.2", az.native_metadata_route) self.assertEqual("aaa.com", az.dns_domain) self.assertEqual(["20.1.1.1"], az.nameservers) self.assertEqual("otz", az.default_overlay_tz) self.assertEqual("vtz", az.default_vlan_tz) self.assertEqual(["uuid2"], az.switching_profiles) self.assertEqual("service2", az.dhcp_relay_service) self.assertEqual("uuidrtr2", az.default_tier0_router) self.assertEqual("ec2", az.edge_cluster) def test_missing_group_section(self): self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxV3AvailabilityZone, "doesnt_exist") def test_availability_zone_missing_metadata_proxy(self): # Mandatory parameter self._config_az(metadata_proxy=None) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxV3AvailabilityZone, self.az_name) def test_availability_zone_missing_dhcp_profile(self): # Mandatory parameter self._config_az(dhcp_profile=None) self.assertRaises( nsx_exc.NsxInvalidConfiguration, nsx_az.NsxV3AvailabilityZone, self.az_name) def test_availability_zone_missing_md_route(self): self._config_az(native_metadata_route=None) az = nsx_az.NsxV3AvailabilityZone(self.az_name) self.assertEqual("1.1.1.1", az.native_metadata_route) def test_availability_zone_missing_dns_domain(self): self._config_az(dns_domain=None) az = nsx_az.NsxV3AvailabilityZone(self.az_name) self.assertEqual("xxx.com", az.dns_domain) def test_availability_zone_missing_nameservers(self): self._config_az(nameservers=None) az = nsx_az.NsxV3AvailabilityZone(self.az_name) self.assertEqual(["10.1.1.1"], az.nameservers) def test_availability_zone_missing_profiles(self): self._config_az(switching_profiles=None) az = nsx_az.NsxV3AvailabilityZone(self.az_name) self.assertEqual(["uuid1"], az.switching_profiles) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/test_client_cert.py0000644000175000017500000002056100000000000026641 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import mock from oslo_config import cfg from neutron.tests.unit import testlib_api from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.plugins.nsx_v3 import cert_utils from vmware_nsx.plugins.nsx_v3 import utils class NsxV3ClientCertProviderTestCase(testlib_api.SqlTestCase): CERT = "-----BEGIN CERTIFICATE-----\n" \ "MIIDJTCCAg0CBFh36j0wDQYJKoZIhvcNAQELBQAwVzELMAkGA1UEBhMCVVMxEzAR\n" \ "BgNVBAgMCkNhbGlmb3JuaWExDjAMBgNVBAoMBU15T3JnMQ8wDQYDVQQLDAZNeVVu\n" \ "aXQxEjAQBgNVBAMMCW15b3JnLmNvbTAeFw0xNzAxMTIyMDQyMzdaFw0yNzAxMTAy\n" \ "MDQyMzdaMFcxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMQ4wDAYD\n" \ "VQQKDAVNeU9yZzEPMA0GA1UECwwGTXlVbml0MRIwEAYDVQQDDAlteW9yZy5jb20w\n" \ "ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC/wsYintlWVaSeXwaSrdPa\n" \ "+AHtL1ooH7q0uf6tt+6Rwiy10YRjAVJhapj9995gqgJ2402J+3gzNXLCbXjjDR/D\n" \ "9xjAzKHu61r0AVNd9/0+8yXQrEDuzlwHSCKz+zjq5ZEZ7RkLIUdreaZJFPTCwry3\n" \ "wuTnBfqcE7xWl6WfWR8evooV+ZzIfjQdoSliIyn3YGxNN5pc1P40qt0pxOsNBGXG\n" \ "2FIZXpML8TpKw0ga/wE70CJd6tRvSsAADxQXehfKvGtHvlJYS+3cTahC7reQXJnc\n" \ "qsjgYkiWyhhR4jdcTD/tDlVcJroM1jFVxpsCg/AU3srWWWeAGyVe42ZhqWVf0Urz\n" \ "AgMBAAEwDQYJKoZIhvcNAQELBQADggEBAA/lLfmXe8wPyBhN/VMb5bu5Ey56qz+j\n" \ "jCn7tz7FjRvsB9P0fLUDOBKNwyon3yopDNYJ4hnm4yKoHCHURQLZKWHzm0XKzE+4\n" \ "cA/M13M8OEg5otnVVHhz1FPQWnJq7bLHh/KXYcc5Rkc7UeHEPj0sDjfUjCPGdepc\n" \ "Ghu1ZcgHsL4JCuvcadG+RFGeDTug3yO92Fj2uFy5DlzzWOZSi4otpZRd9JZkAtZ1\n" \ "umZRBJ2A504nJx4MplmNqvLNkmxMLKQdvZYNNiYr6icOavDOJA5RhzgoppJZkV2w\n" \ "v2oC+8BFarXnZSk37HAWjwcaqzBLbIyPYpClW5IYMr8LiixSBACc+4w=\n" \ "-----END CERTIFICATE-----\n" PKEY = "-----BEGIN PRIVATE KEY-----\n" \ "MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQC/wsYintlWVaSe\n" \ "XwaSrdPa+AHtL1ooH7q0uf6tt+6Rwiy10YRjAVJhapj9995gqgJ2402J+3gzNXLC\n" \ "bXjjDR/D9xjAzKHu61r0AVNd9/0+8yXQrEDuzlwHSCKz+zjq5ZEZ7RkLIUdreaZJ\n" \ "FPTCwry3wuTnBfqcE7xWl6WfWR8evooV+ZzIfjQdoSliIyn3YGxNN5pc1P40qt0p\n" \ "xOsNBGXG2FIZXpML8TpKw0ga/wE70CJd6tRvSsAADxQXehfKvGtHvlJYS+3cTahC\n" \ "7reQXJncqsjgYkiWyhhR4jdcTD/tDlVcJroM1jFVxpsCg/AU3srWWWeAGyVe42Zh\n" \ "qWVf0UrzAgMBAAECggEBAJrGuie9cQy3KZzOdD614RaPMPbhTnKuUYOH0GEk4YFy\n" \ "aaYDS0iiC30njf8HLs10y3JsOuyRNU6X6F24AGe68xW3/pm3UUjHXG0wGLry68wA\n" \ "c1g/gFV/6FXUSnZc4m7uBjUX4yvRm5TK5oV8TaZZifsEar9xWvrZDx4RXpQEWhL0\n" \ "L/TyrOZSfRtBgdWX6Ag4XQVsCfZxJoCi2ZyvaMBsWTH06x9AGo1Io5t1AmA9Hsfb\n" \ "6BsSz186nqb0fq4UMfrWrSCz7M/1s03+hBOVICH2TdaRDZLtDVa1b2x4sFpfdp9t\n" \ "VVxuSHxcmvzOPMIv3NXwj0VitTYYJDBFKoEfx1mzhNkCgYEA59gYyBfpsuCOevP2\n" \ "tn7IeysbtaoKDzHE+ksjs3sAn6Vr2Y0Lbed26NpdIVL6u3HAteJxqrIh0zpkpAtp\n" \ "akdqlj86oRaBUqLXxK3QNpUx19f7KN7UsVAbzUJSOm2n1piPg261ktfhtms2rxnQ\n" \ "+9yluINu+z1wS4FG9SwrRmwwfsUCgYEA072Ma1sj2MER5tmQw1zLANkzP1PAkUdy\n" \ "+oDuJmU9A3/+YSIkm8dGprFglPkLUaf1B15oN6wCJVMpB1lza3PM/YT70rpqc7cq\n" \ "PHJXQlZFMBhyVfIkCv3wICTLD5phhgAWlzlwm094f2uAnbG6WUkrVfZajuh0pW53\n" \ "1i0OTfxAvlcCgYEAkDB2oSM2JhjApDlMbA2HtAqIbkA1h2OlpSDMMFjEd4WTALdW\n" \ "r2CwNHtyRkJsS92gQ750gPvOS6daZifuxLlr0cu7M+piPbmnRdvvzbKWUC40NyP2\n" \ "1dwDnnGr4EjIhI9XWh+lb5EyAJjHZrlAnxOIQawEft6kE2FwdxSkSWUJ+B0CgYEA\n" \ "n2xYDXzRwKGdmPK2zGFRd5IRw9yLYNcq+vGYXdBb4Aa+wOO0LJYd2+Qxk/jvTMvo\n" \ "8WNjlIcuFmxGuAHhpUXLUhaOhFtXS0jdxCVTDd9muI+vhoaKHLyVz53kRhs20m2+\n" \ "lJ3q6wUq9MU8UX8/j3pH5rFV/cOIEAbcs6W4337OQIECgYEAoLtQyqXjH45FlCQx\n" \ "xK8dY+GuxIP+TIwiq23yhu3e+3LIgXJw8DwBFN5yJyH2HMnhGkD4PurEx2sGHeLO\n" \ "EG6L8PNDOxpvSzcgxwmZsUK6j3nAbKycF3PDDXA4kt8WDXBr86OMQsFtpjeO+fGh\n" \ "YWJa+OKc2ExdeMewe9gKIDQ5stw=\n" \ "-----END PRIVATE KEY-----\n" def _init_config(self, storage_type='nsx-db', password=None, cert_file=None): cfg.CONF.set_override('nsx_use_client_auth', True, 'nsx_v3') cfg.CONF.set_override('nsx_client_cert_storage', storage_type, 'nsx_v3') cfg.CONF.set_override('nsx_client_cert_file', cert_file, 'nsx_v3') cfg.CONF.set_override('nsx_client_cert_pk_password', password, 'nsx_v3') # pk password secret is cached - reset it for each test cert_utils.reset_secret() self._provider = utils.get_client_cert_provider() def validate_db_provider(self, expected_cert_data): fname = None with self._provider() as p: # verify cert data was exported to CERTFILE fname = p.filename() with open(fname, 'r') as f: actual = f.read() self.assertEqual(expected_cert_data, actual) # after with statement, cert file should be deleted self.assertFalse(os.path.isfile(fname)) def validate_basic_provider(self, expected_cert_data): fname = None with self._provider as p: fname = p.filename() with open(fname, 'r') as f: actual = f.read() self.assertEqual(expected_cert_data, actual) # with statement should not touch the file self.assertTrue(os.path.isfile(fname)) def test_db_provider_without_cert(self): """Verify init fails if no cert is provided in client cert mode""" # certificate not generated - exception should be raised self._init_config() # no certificate in table mock.patch( "vmware_nsx.db.db.get_certificate", return_value=(None, None)).start() self.assertRaises(nsx_exc.ClientCertificateException, self._provider().__enter__) # now verify return to normal after failure mock.patch( "vmware_nsx.db.db.get_certificate", return_value=(self.CERT, self.PKEY)).start() self.validate_db_provider(self.CERT + self.PKEY) def test_db_provider_with_cert(self): """Verify successful certificate load from storage""" self._init_config() mock.patch( "vmware_nsx.db.db.get_certificate", return_value=(self.CERT, self.PKEY)).start() self.validate_db_provider(self.CERT + self.PKEY) def test_db_provider_with_encryption(self): """Verify successful encrypted PK load from storage""" password = 'topsecret' self._init_config(password=password) secret = cert_utils.generate_secret_from_password(password) encrypted_pkey = cert_utils.symmetric_encrypt(secret, self.PKEY) # db should contain encrypted key mock.patch( "vmware_nsx.db.db.get_certificate", return_value=(self.CERT, encrypted_pkey)).start() self.validate_db_provider(self.CERT + self.PKEY) def test_db_provider_with_bad_decrypt(self): """Verify loading plaintext PK from storage fails in encrypt mode""" mock.patch( "vmware_nsx.db.db.get_certificate", return_value=(self.CERT, self.PKEY)).start() # after decrypt failure, cert will be deleted mock.patch( "vmware_nsx.db.db.delete_certificate").start() self._init_config(password='topsecret') # since PK in DB is not encrypted, we should fail to decrypt it on load self.assertRaises(nsx_exc.ClientCertificateException, self._provider().__enter__) def test_basic_provider(self): fname = '/tmp/cert.pem' # with basic provider, the file is provided by admin with open(fname, 'w') as f: f.write(self.CERT) f.write(self.PKEY) self._init_config(storage_type='none', cert_file=fname) with self._provider as p: self.assertEqual(fname, p.filename()) self.validate_basic_provider(self.CERT + self.PKEY) os.remove(fname) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/test_constants.py0000644000175000017500000000120400000000000026353 0ustar00coreycorey00000000000000# Copyright (c) 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. PLUGIN_NAME = 'vmware_nsx.plugins.nsx_v3.plugin.NsxV3Plugin' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/test_dhcp_metadata.py0000644000175000017500000015170500000000000027131 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import netaddr from oslo_config import cfg from oslo_utils import uuidutils from neutron.extensions import securitygroup as secgrp from neutron_lib.api.definitions import provider_net as pnet from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from vmware_nsx.common import config from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.db import db as nsx_db from vmware_nsx.extensions import advancedserviceproviders as as_providers from vmware_nsx.plugins.nsx_v3 import availability_zones as nsx_az from vmware_nsx.tests.unit.nsx_v3 import test_plugin from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import nsx_constants from vmware_nsxlib.v3 import resources as nsx_resources def set_az_in_config(name, metadata_proxy="metadata_proxy1", dhcp_profile="dhcp_profile1", native_metadata_route="2.2.2.2", dns_domain='aaaa', nameservers=['bbbb']): group_name = 'az:%s' % name cfg.CONF.set_override('availability_zones', [name], group="nsx_v3") config.register_nsxv3_azs(cfg.CONF, [name]) cfg.CONF.set_override("metadata_proxy", metadata_proxy, group=group_name) cfg.CONF.set_override("dhcp_profile", dhcp_profile, group=group_name) cfg.CONF.set_override("native_metadata_route", native_metadata_route, group=group_name) cfg.CONF.set_override("dns_domain", dns_domain, group=group_name) cfg.CONF.set_override("nameservers", nameservers, group=group_name) class NsxNativeDhcpTestCase(test_plugin.NsxV3PluginTestCaseMixin): def setUp(self): super(NsxNativeDhcpTestCase, self).setUp() self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification self._orig_native_dhcp_metadata = cfg.CONF.nsx_v3.native_dhcp_metadata cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') self._az_name = 'zone1' self.az_metadata_route = '3.3.3.3' set_az_in_config(self._az_name, native_metadata_route=self.az_metadata_route) self._patcher = mock.patch.object(core_resources.NsxLibDhcpProfile, 'get') self._patcher.start() # Need to run some plugin init methods manually because plugin was # started before setUp() overrides CONF.nsx_v3.native_dhcp_metadata. self._initialize_azs() self.plugin._init_dhcp_metadata() def tearDown(self): self._patcher.stop() cfg.CONF.set_override('dhcp_agent_notification', self._orig_dhcp_agent_notification) cfg.CONF.set_override('native_dhcp_metadata', self._orig_native_dhcp_metadata, 'nsx_v3') super(NsxNativeDhcpTestCase, self).tearDown() def _make_subnet_data(self, name=None, network_id=None, cidr=None, gateway_ip=None, tenant_id=None, allocation_pools=None, enable_dhcp=True, dns_nameservers=None, ip_version=4, host_routes=None, shared=False): return {'subnet': { 'name': name, 'network_id': network_id, 'cidr': cidr, 'gateway_ip': gateway_ip, 'tenant_id': tenant_id, 'allocation_pools': allocation_pools, 'ip_version': ip_version, 'enable_dhcp': enable_dhcp, 'dns_nameservers': dns_nameservers, 'host_routes': host_routes, 'shared': shared}} def _verify_dhcp_service(self, network_id, tenant_id, enabled): # Verify if DHCP service is enabled on a network. port_res = self._list_ports('json', 200, network_id, tenant_id=tenant_id, device_owner=constants.DEVICE_OWNER_DHCP) port_list = self.deserialize('json', port_res) self.assertEqual(len(port_list['ports']) == 1, enabled) def _verify_dhcp_binding(self, subnet, port_data, update_data, assert_data): # Verify if DHCP binding is updated. with mock.patch( 'vmware_nsxlib.v3.resources.LogicalDhcpServer.update_binding' ) as update_dhcp_binding: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, **port_data) as port: # Retrieve the DHCP binding info created in the DB for the # new port. dhcp_binding = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id'])[0] # Update the port with provided data. self.plugin.update_port( context.get_admin_context(), port['port']['id'], update_data) binding_data = {'mac_address': port['port']['mac_address'], 'ip_address': port['port']['fixed_ips'][0][ 'ip_address']} # Extend basic binding data with to-be-asserted data. binding_data.update(assert_data) # Verify the update call. update_dhcp_binding.assert_called_once_with( dhcp_binding['nsx_service_id'], dhcp_binding['nsx_binding_id'], **binding_data) def test_dhcp_profile_configuration(self): # Test if dhcp_agent_notification and dhcp_profile are # configured correctly. orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification cfg.CONF.set_override('dhcp_agent_notification', True) self.assertRaises(nsx_exc.NsxPluginException, self.plugin._init_dhcp_metadata) cfg.CONF.set_override('dhcp_agent_notification', orig_dhcp_agent_notification) orig_dhcp_profile_uuid = cfg.CONF.nsx_v3.dhcp_profile cfg.CONF.set_override('dhcp_profile', '', 'nsx_v3') self.assertRaises(cfg.RequiredOptError, self.plugin._translate_configured_names_to_uuids) cfg.CONF.set_override('dhcp_profile', orig_dhcp_profile_uuid, 'nsx_v3') def test_dhcp_service_with_create_network(self): # Test if DHCP service is disabled on a network when it is created. with self.network() as network: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_delete_dhcp_network(self): # Test if DHCP service is disabled when directly deleting a network # with a DHCP-enabled subnet. with self.network() as network: with self.subnet(network=network, enable_dhcp=True): self.plugin.delete_network(context.get_admin_context(), network['network']['id']) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_non_dhcp_subnet(self): # Test if DHCP service is disabled on a network when a DHCP-disabled # subnet is created. with self.network() as network: with self.subnet(network=network, enable_dhcp=False): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_multiple_non_dhcp_subnets(self): # Test if DHCP service is disabled on a network when multiple # DHCP-disabled subnets are created. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=False): with self.subnet(network=network, cidr='20.0.0.0/24', enable_dhcp=False): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_create_dhcp_subnet(self): # Test if DHCP service is enabled on a network when a DHCP-enabled # subnet is created. with self.network() as network: with self.subnet(network=network, enable_dhcp=True): self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) def test_dhcp_service_with_create_dhcp_subnet_bulk(self): # Test if DHCP service is enabled on all networks after a # create_subnet_bulk operation. with self.network() as network1, self.network() as network2: subnet1 = self._make_subnet_data( network_id=network1['network']['id'], cidr='10.0.0.0/24', tenant_id=network1['network']['tenant_id']) subnet2 = self._make_subnet_data( network_id=network2['network']['id'], cidr='20.0.0.0/24', tenant_id=network2['network']['tenant_id']) subnets = {'subnets': [subnet1, subnet2]} with mock.patch.object(self.plugin, '_post_create_subnet' ) as post_create_subnet: self.plugin.create_subnet_bulk( context.get_admin_context(), subnets) # Check if post_create function has been called for # both subnets. self.assertEqual(len(subnets['subnets']), post_create_subnet.call_count) # Check if the bindings to backend DHCP entries are created. dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network1['network']['id'], nsx_constants.SERVICE_DHCP) self.assertTrue(dhcp_service) dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network2['network']['id'], nsx_constants.SERVICE_DHCP) self.assertTrue(dhcp_service) def test_dhcp_service_with_create_dhcp_subnet_bulk_failure(self): # Test if user-provided rollback function is invoked when # exception occurred during a create_subnet_bulk operation. with self.network() as network1, self.network() as network2: subnet1 = self._make_subnet_data( network_id=network1['network']['id'], cidr='10.0.0.0/24', tenant_id=network1['network']['tenant_id']) subnet2 = self._make_subnet_data( network_id=network2['network']['id'], cidr='20.0.0.0/24', tenant_id=network2['network']['tenant_id']) subnets = {'subnets': [subnet1, subnet2]} # Inject an exception on the second create_subnet call. orig_create_subnet = self.plugin.create_subnet with mock.patch.object(self.plugin, 'create_subnet') as create_subnet: def side_effect(*args, **kwargs): return self._fail_second_call( create_subnet, orig_create_subnet, *args, **kwargs) create_subnet.side_effect = side_effect with mock.patch.object(self.plugin, '_rollback_subnet') as rollback_subnet: try: self.plugin.create_subnet_bulk( context.get_admin_context(), subnets) except Exception: pass # Check if rollback function has been called for # the subnet in the first network. rollback_subnet.assert_called_once_with(mock.ANY, mock.ANY) subnet_arg = rollback_subnet.call_args[0][0] self.assertEqual(network1['network']['id'], subnet_arg['network_id']) # Check if the bindings to backend DHCP entries are removed. dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network1['network']['id'], nsx_constants.SERVICE_DHCP) self.assertFalse(dhcp_service) dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, network2['network']['id'], nsx_constants.SERVICE_DHCP) self.assertFalse(dhcp_service) def test_dhcp_service_with_create_dhcp_subnet_in_vlan_network(self): # Test if a DHCP-enabled subnet cannot be created in a vlan network. # on nsx version that does not support it povidernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.PHYSICAL_NETWORK: 'tzuuid', pnet.SEGMENTATION_ID: 100} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'),\ mock.patch.object(self.plugin.nsxlib, 'feature_supported', return_value=False),\ self.network(providernet_args=povidernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) as network: subnet = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.0.0/24', 'enable_dhcp': True}} self.assertRaises( n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), subnet) def test_dhcp_service_with_create_multiple_dhcp_subnets(self): # Test if multiple DHCP-enabled subnets cannot be created in a network. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=True): subnet = {'subnet': {'network_id': network['network']['id'], 'cidr': '20.0.0.0/24', 'enable_dhcp': True}} self.assertRaises( n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), subnet) def test_dhcp_service_with_delete_dhcp_subnet(self): # Test if DHCP service is disabled on a network when a DHCP-disabled # subnet is deleted. with self.network() as network: with self.subnet(network=network, enable_dhcp=True) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) self.plugin.delete_subnet(context.get_admin_context(), subnet['subnet']['id']) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_service_with_update_dhcp_subnet(self): # Test if DHCP service is enabled on a network when a DHCP-disabled # subnet is updated to DHCP-enabled. with self.network() as network: with self.subnet(network=network, enable_dhcp=False) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) data = {'subnet': {'enable_dhcp': True}} self.plugin.update_subnet(context.get_admin_context(), subnet['subnet']['id'], data) self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) def test_dhcp_service_with_update_multiple_dhcp_subnets(self): # Test if a DHCP-disabled subnet cannot be updated to DHCP-enabled # if a DHCP-enabled subnet already exists in the same network. with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24', enable_dhcp=True): with self.subnet(network=network, cidr='20.0.0.0/24', enable_dhcp=False) as subnet: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], True) data = {'subnet': {'enable_dhcp': True}} self.assertRaises( n_exc.InvalidInput, self.plugin.update_subnet, context.get_admin_context(), subnet['subnet']['id'], data) def test_dhcp_service_with_update_dhcp_port(self): # Test if DHCP server IP is updated when the corresponding DHCP port # IP is changed. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'update') as update_logical_dhcp_server: with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) port = self.plugin.get_port(context.get_admin_context(), dhcp_service['port_id']) old_ip = port['fixed_ips'][0]['ip_address'] new_ip = str(netaddr.IPAddress(old_ip) + 1) data = {'port': {'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} self.plugin.update_port(context.get_admin_context(), dhcp_service['port_id'], data) update_logical_dhcp_server.assert_called_once_with( dhcp_service['nsx_service_id'], server_ip=new_ip) def test_dhcp_binding_with_create_port(self): # Test if DHCP binding is added when a compute port is created. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_v3.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_opts(self): # Test if DHCP binding is added when a compute port is created # with extra options. opt_name = 'interface-mtu' opt_code = 26 opt_val = '9000' with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() extra_dhcp_opts = [{'opt_name': opt_name, 'opt_value': opt_val}] with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, extra_dhcp_opts=extra_dhcp_opts, arg_list=('extra_dhcp_opts',)) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': opt_code, 'values': [opt_val]}]} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_v3.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_opts121(self): # Test if DHCP binding is added when a compute port is created # with extra option121. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() extra_dhcp_opts = [{'opt_name': 'classless-static-route', 'opt_value': '1.0.0.0/24,1.2.3.4'}] with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, extra_dhcp_opts=extra_dhcp_opts, arg_list=('extra_dhcp_opts',)) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}, {'network': '1.0.0.0/24', 'next_hop': '1.2.3.4'}]}} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_v3.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_dhcp_binding_with_create_port_with_bad_opts(self): with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() ctx = context.get_admin_context() # Use illegal opt-name extra_dhcp_opts = [{'opt_name': 'Dummy', 'opt_value': 'Dummy'}] data = {'port': { 'name': 'dummy', 'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'device_owner': device_owner, 'device_id': device_id, 'extra_dhcp_opts': extra_dhcp_opts, 'admin_state_up': True, 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01', }} self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, ctx, data) # Use illegal option121 value extra_dhcp_opts = [{'opt_name': 'classless-static-route', 'opt_value': '1.0.0.0/24,5.5.5.5,cc'}] data['port']['extra_dhcp_opts'] = extra_dhcp_opts self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, ctx, data) def test_dhcp_binding_with_disable_enable_dhcp(self): # Test if DHCP binding is preserved after DHCP is disabled and # re-enabled on a subnet. with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: ip = port['port']['fixed_ips'][0]['ip_address'] dhcp_bindings = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id']) dhcp_service = dhcp_bindings[0]['nsx_service_id'] self.assertEqual(1, len(dhcp_bindings)) self.assertEqual(ip, dhcp_bindings[0]['ip_address']) # Disable DHCP on subnet. data = {'subnet': {'enable_dhcp': False}} self.plugin.update_subnet(context.get_admin_context(), subnet['subnet']['id'], data) dhcp_bindings = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id']) self.assertEqual([], dhcp_bindings) # Re-enable DHCP on subnet. data = {'subnet': {'enable_dhcp': True}} self.plugin.update_subnet(context.get_admin_context(), subnet['subnet']['id'], data) dhcp_bindings = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id']) self.assertEqual(1, len(dhcp_bindings)) self.assertEqual(ip, dhcp_bindings[0]['ip_address']) # The DHCP service ID should be different because a new # logical DHCP server is created for re-enabling DHCP. self.assertNotEqual(dhcp_service, dhcp_bindings[0]['nsx_service_id']) def test_dhcp_binding_with_delete_port(self): # Test if DHCP binding is removed when the associated compute port # is deleted. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'delete_binding') as delete_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_binding = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id'])[0] self.plugin.delete_port( context.get_admin_context(), port['port']['id']) delete_dhcp_binding.assert_called_once_with( dhcp_binding['nsx_service_id'], dhcp_binding['nsx_binding_id']) def test_dhcp_binding_with_update_port_delete_ip(self): # Test if DHCP binding is deleted when the IP of the associated # compute port is deleted. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'delete_binding') as delete_dhcp_binding: with self.subnet(enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_binding = nsx_db.get_nsx_dhcp_bindings( context.get_admin_context().session, port['port']['id'])[0] data = {'port': {'fixed_ips': [], 'admin_state_up': False, secgrp.SECURITYGROUPS: []}} self.plugin.update_port( context.get_admin_context(), port['port']['id'], data) delete_dhcp_binding.assert_called_once_with( dhcp_binding['nsx_service_id'], dhcp_binding['nsx_binding_id']) def test_dhcp_binding_with_update_port_ip(self): # Test if DHCP binding is updated when the IP of the associated # compute port is changed. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: port_data = {'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]} new_ip = '10.0.0.4' update_data = {'port': {'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} assert_data = {'host_name': 'host-%s' % new_ip.replace('.', '-'), 'ip_address': new_ip, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': new_ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_mac(self): # Test if DHCP binding is updated when the Mac of the associated # compute port is changed. with self.subnet(enable_dhcp=True) as subnet: port_data = {'mac_address': '11:22:33:44:55:66'} new_mac = '22:33:44:55:66:77' update_data = {'port': {'mac_address': new_mac}} assert_data = {'mac_address': new_mac, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': mock.ANY}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_mac_ip(self): # Test if DHCP binding is updated when the IP and Mac of the associated # compute port are changed at the same time. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: port_data = {'mac_address': '11:22:33:44:55:66', 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.0.3'}]} new_mac = '22:33:44:55:66:77' new_ip = '10.0.0.4' update_data = {'port': {'mac_address': new_mac, 'fixed_ips': [ {'subnet_id': subnet['subnet']['id'], 'ip_address': new_ip}]}} assert_data = {'host_name': 'host-%s' % new_ip.replace('.', '-'), 'mac_address': new_mac, 'ip_address': new_ip, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': new_ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_update_dhcp_opt(self): # Test updating extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9000'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9002'}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 26, 'values': ['9002']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_adding_dhcp_opt(self): # Test adding extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'nis-domain', 'opt_value': 'abc'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': '9002'}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 26, 'values': ['9002']}, {'code': 40, 'values': ['abc']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_update_port_with_deleting_dhcp_opt(self): # Test adding extra-dhcp-opts via port update. with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: mac_address = '11:22:33:44:55:66' ip_addr = '10.0.0.3' port_data = {'arg_list': ('extra_dhcp_opts',), 'mac_address': mac_address, 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': ip_addr}], 'extra_dhcp_opts': [ {'opt_name': 'nis-domain', 'opt_value': 'abc'}, {'opt_name': 'interface-mtu', 'opt_value': '9002'}]} update_data = {'port': {'extra_dhcp_opts': [ {'opt_name': 'interface-mtu', 'opt_value': None}]}} assert_data = {'mac_address': mac_address, 'ip_address': ip_addr, 'options': {'option121': {'static_routes': [ {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % cfg.CONF.nsx_v3.native_metadata_route, 'next_hop': ip_addr}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': constants.IPv4_ANY, 'next_hop': subnet['subnet']['gateway_ip']}]}, 'others': [{'code': 40, 'values': ['abc']}]}} self._verify_dhcp_binding(subnet, port_data, update_data, assert_data) def test_dhcp_binding_with_update_port_name(self): # Test if DHCP binding is not updated when the name of the associated # compute port is changed. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'update_binding') as update_dhcp_binding: with self.subnet(cidr='10.0.0.0/24', enable_dhcp=True) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id, name='abc') as port: data = {'port': {'name': 'xyz'}} self.plugin.update_port( context.get_admin_context(), port['port']['id'], data) update_dhcp_binding.assert_not_called() def test_create_network_with_bad_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': ['bad_hint'] }} self.assertRaises(n_exc.NeutronException, p.create_network, ctx, data) def test_create_network_with_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False, 'availability_zone_hints': [self._az_name] }} # network creation should succeed net = p.create_network(ctx, data) self.assertEqual([self._az_name], net['availability_zone_hints']) self.assertEqual([self._az_name], net['availability_zones']) def test_create_network_with_no_az_hint(self): p = directory.get_plugin() ctx = context.get_admin_context() data = {'network': { 'name': 'test-az', 'tenant_id': self._tenant_id, 'port_security_enabled': False, 'admin_state_up': True, 'shared': False }} # network creation should succeed net = p.create_network(ctx, data) self.assertEqual([], net['availability_zone_hints']) self.assertEqual([nsx_az.DEFAULT_NAME], net['availability_zones']) def test_dhcp_service_with_create_az_network(self): # Test if DHCP service is disabled on a network when it is created. with self.network(availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: self._verify_dhcp_service(network['network']['id'], network['network']['tenant_id'], False) def test_dhcp_binding_with_create_az_port(self): # Test if DHCP binding is added when a compute port is created. with mock.patch.object(nsx_resources.LogicalDhcpServer, 'create_binding', return_value={"id": uuidutils.generate_uuid()} ) as create_dhcp_binding: with self.network( availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: with self.subnet(enable_dhcp=True, network=network) as subnet: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' device_id = uuidutils.generate_uuid() with self.port(subnet=subnet, device_owner=device_owner, device_id=device_id) as port: dhcp_service = nsx_db.get_nsx_service_binding( context.get_admin_context().session, subnet['subnet']['network_id'], nsx_constants.SERVICE_DHCP) ip = port['port']['fixed_ips'][0]['ip_address'] hostname = 'host-%s' % ip.replace('.', '-') options = {'option121': {'static_routes': [ {'network': '%s' % self.az_metadata_route, 'next_hop': '0.0.0.0'}, {'network': '%s' % self.az_metadata_route, 'next_hop': ip}, {'network': subnet['subnet']['cidr'], 'next_hop': '0.0.0.0'}, {'network': '0.0.0.0/0', 'next_hop': subnet['subnet']['gateway_ip']}]}} create_dhcp_binding.assert_called_once_with( dhcp_service['nsx_service_id'], port['port']['mac_address'], ip, hostname, cfg.CONF.nsx_v3.dhcp_lease_time, options, subnet['subnet']['gateway_ip']) def test_create_subnet_with_dhcp_port(self): with self.subnet(enable_dhcp=True) as subnet: # find the dhcp port and verify it has port security disabled ports = self.plugin.get_ports( context.get_admin_context()) self.assertEqual(1, len(ports)) self.assertEqual('network:dhcp', ports[0]['device_owner']) self.assertEqual(subnet['subnet']['network_id'], ports[0]['network_id']) self.assertEqual(False, ports[0]['port_security_enabled']) class NsxNativeMetadataTestCase(test_plugin.NsxV3PluginTestCaseMixin): def setUp(self): super(NsxNativeMetadataTestCase, self).setUp() self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification self._orig_native_dhcp_metadata = cfg.CONF.nsx_v3.native_dhcp_metadata cfg.CONF.set_override('dhcp_agent_notification', False) cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') self._az_name = 'zone1' self._az_metadata_proxy = 'dummy' set_az_in_config(self._az_name, metadata_proxy=self._az_metadata_proxy) self._patcher = mock.patch.object(core_resources.NsxLibMetadataProxy, 'get') self._patcher.start() self._initialize_azs() self.plugin._init_dhcp_metadata() def tearDown(self): self._patcher.stop() cfg.CONF.set_override('dhcp_agent_notification', self._orig_dhcp_agent_notification) cfg.CONF.set_override('native_dhcp_metadata', self._orig_native_dhcp_metadata, 'nsx_v3') super(NsxNativeMetadataTestCase, self).tearDown() def test_metadata_proxy_configuration(self): # Test if dhcp_agent_notification and metadata_proxy are # configured correctly. orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification cfg.CONF.set_override('dhcp_agent_notification', True) self.assertRaises(nsx_exc.NsxPluginException, self.plugin._init_dhcp_metadata) cfg.CONF.set_override('dhcp_agent_notification', orig_dhcp_agent_notification) orig_metadata_proxy_uuid = cfg.CONF.nsx_v3.metadata_proxy cfg.CONF.set_override('metadata_proxy', '', 'nsx_v3') self.assertRaises(cfg.RequiredOptError, self.plugin._translate_configured_names_to_uuids) cfg.CONF.set_override('metadata_proxy', orig_metadata_proxy_uuid, 'nsx_v3') def test_metadata_proxy_with_create_network(self): # Test if native metadata proxy is enabled on a network when it is # created. with mock.patch.object(nsx_resources.LogicalPort, 'create') as create_logical_port: with self.network() as network: nsx_net_id = self.plugin._get_network_nsx_id( context.get_admin_context(), network['network']['id']) tags = self.plugin.nsxlib.build_v3_tags_payload( network['network'], resource_type='os-neutron-net-id', project_name=None) name = utils.get_name_and_uuid('%s-%s' % ( 'mdproxy', network['network']['name'] or 'network'), network['network']['id']) create_logical_port.assert_called_once_with( nsx_net_id, cfg.CONF.nsx_v3.metadata_proxy, tags=tags, name=name, attachment_type=nsx_constants.ATTACHMENT_MDPROXY) def test_metadata_proxy_with_create_az_network(self): # Test if native metadata proxy is enabled on a network when it is # created. with mock.patch.object(nsx_resources.LogicalPort, 'create') as create_logical_port: with self.network( availability_zone_hints=[self._az_name], arg_list=('availability_zone_hints',)) as network: nsx_net_id = self.plugin._get_network_nsx_id( context.get_admin_context(), network['network']['id']) tags = self.plugin.nsxlib.build_v3_tags_payload( network['network'], resource_type='os-neutron-net-id', project_name=None) name = utils.get_name_and_uuid('%s-%s' % ( 'mdproxy', network['network']['name'] or 'network'), network['network']['id']) create_logical_port.assert_called_once_with( nsx_net_id, self._az_metadata_proxy, tags=tags, name=name, attachment_type=nsx_constants.ATTACHMENT_MDPROXY) def test_metadata_proxy_with_get_subnets(self): # Test if get_subnets() handles advanced-service-provider extension, # which is used when processing metadata requests. with self.network() as n1, self.network() as n2: with self.subnet(network=n1) as s1, self.subnet(network=n2) as s2: # Get all the subnets. subnets = self._list('subnets')['subnets'] self.assertEqual(len(subnets), 2) self.assertEqual(set([s['id'] for s in subnets]), set([s1['subnet']['id'], s2['subnet']['id']])) lswitch_id = nsx_db.get_nsx_switch_ids( context.get_admin_context().session, n1['network']['id'])[0] # Get only the subnets associated with a particular advanced # service provider (i.e. logical switch). subnets = self._list('subnets', query_params='%s=%s' % (as_providers.ADV_SERVICE_PROVIDERS, lswitch_id))['subnets'] self.assertEqual(len(subnets), 1) self.assertEqual(subnets[0]['id'], s1['subnet']['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/test_fwaas_v2_driver.py0000644000175000017500000004674000000000000027440 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron_lib.plugins import directory from vmware_nsx.services.fwaas.nsx_v3 import edge_fwaas_driver_v2 from vmware_nsx.services.fwaas.nsx_v3 import fwaas_callbacks_v2 from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin from vmware_nsxlib.v3 import nsx_constants as consts FAKE_FW_ID = 'fake_fw_uuid' FAKE_ROUTER_ID = 'fake_rtr_uuid' FAKE_PORT_ID = 'fake_port_uuid' FAKE_NET_ID = 'fake_net_uuid' FAKE_NSX_LS_ID = 'fake_nsx_ls_uuid' MOCK_NSX_ID = 'nsx_nsx_router_id' MOCK_DEFAULT_RULE_ID = 'nsx_default_rule_id' MOCK_SECTION_ID = 'sec_id' DEFAULT_RULE = {'is_default': True, 'display_name': edge_fwaas_driver_v2.DEFAULT_RULE_NAME, 'id': MOCK_DEFAULT_RULE_ID, 'action': consts.FW_ACTION_DROP} class Nsxv3FwaasTestCase(test_v3_plugin.NsxV3PluginTestCaseMixin): def setUp(self): super(Nsxv3FwaasTestCase, self).setUp() self.firewall = edge_fwaas_driver_v2.EdgeFwaasV3DriverV2() # Start some nsxlib/DB mocks mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter." "get_firewall_section_id", return_value=MOCK_SECTION_ID).start() mock.patch( "vmware_nsxlib.v3.security.NsxLibFirewallSection." "get_default_rule", return_value={'id': MOCK_DEFAULT_RULE_ID}).start() mock.patch( "vmware_nsx.db.db.get_nsx_router_id", return_value=MOCK_NSX_ID).start() self.plugin = directory.get_plugin() self.plugin.fwaas_callbacks = fwaas_callbacks_v2.\ Nsxv3FwaasCallbacksV2(False) self.plugin.fwaas_callbacks.fwaas_enabled = True self.plugin.fwaas_callbacks.fwaas_driver = self.firewall self.plugin.fwaas_callbacks.internal_driver = self.firewall self.plugin.init_is_complete = True def _default_rule(self): rule = DEFAULT_RULE rule['action'] = consts.FW_ACTION_ALLOW return rule def _fake_rules_v4(self, is_ingress=True, cidr='10.24.4.0/24', is_conflict=False): rule1 = {'enabled': True, 'action': 'allow', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '80', 'id': 'fake-fw-rule1', 'description': 'first rule'} rule2 = {'enabled': True, 'action': 'reject', 'ip_version': 4, 'protocol': 'tcp', 'destination_port': '22:24', 'source_port': '1:65535', 'id': 'fake-fw-rule2'} rule3 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'protocol': 'icmp', 'id': 'fake-fw-rule3'} rule4 = {'enabled': True, 'action': 'deny', 'ip_version': 4, 'id': 'fake-fw-rule4'} if is_ingress: if not is_conflict: rule1['source_ip_address'] = cidr else: rule1['destination_ip_address'] = cidr else: if not is_conflict: rule1['destination_ip_address'] = cidr else: rule1['source_ip_address'] = cidr return [rule1, rule2, rule3, rule4] def _translated_cidr(self, cidr): if cidr is None: return [] else: return [{'target_id': cidr, 'target_type': 'IPv4Address'}] def _fake_translated_rules(self, nsx_port_id, cidr='10.24.4.0/24', is_ingress=True, is_conflict=False, logged=False): # The expected translation of the rules in _fake_rules_v4 service1 = {'l4_protocol': 'TCP', 'resource_type': 'L4PortSetNSService', 'destination_ports': ['80'], 'source_ports': []} rule1 = {'action': 'ALLOW', 'services': [{'service': service1}], 'sources': self._translated_cidr(cidr), 'display_name': 'Fwaas-fake-fw-rule1', 'notes': 'first rule'} if ((is_ingress and is_conflict) or (not is_ingress and not is_conflict)): # Swap ips rule1['destinations'] = rule1['sources'] del rule1['sources'] if 'sources' in rule1 and not rule1['sources']: del rule1['sources'] service2 = {'l4_protocol': 'TCP', 'resource_type': 'L4PortSetNSService', 'destination_ports': ['22-24'], 'source_ports': ['1-65535']} rule2 = {'action': 'DROP', # Reject is replaced with deny 'services': [{'service': service2}], 'display_name': 'Fwaas-fake-fw-rule2'} service3_1 = {'resource_type': 'ICMPTypeNSService', 'protocol': 'ICMPv4'} service3_2 = {'resource_type': 'ICMPTypeNSService', 'protocol': 'ICMPv6'} rule3 = {'action': 'DROP', # icmp is translated to icmp v4 & v6 'services': [{'service': service3_1}, {'service': service3_2}], 'display_name': 'Fwaas-fake-fw-rule3'} rule4 = {'action': 'DROP', 'display_name': 'Fwaas-fake-fw-rule4'} if nsx_port_id: if is_ingress: field = 'destinations' direction = 'IN' else: field = 'sources' direction = 'OUT' new_val = [{'target_id': nsx_port_id, 'target_type': 'LogicalSwitch'}] for rule in (rule1, rule2, rule3, rule4): if not rule.get(field): rule[field] = new_val rule['direction'] = direction if logged: for rule in (rule1, rule2, rule3, rule4): rule['logged'] = logged return [rule1, rule2, rule3, rule4] def _fake_empty_firewall_group(self): fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': True, 'tenant_id': 'tenant-uuid', 'ingress_rule_list': [], 'egress_rule_list': []} return fw_inst def _fake_firewall_group(self, rule_list, is_ingress=True, admin_state_up=True): _rule_list = copy.deepcopy(rule_list) for rule in _rule_list: rule['position'] = str(_rule_list.index(rule)) fw_inst = {'id': FAKE_FW_ID, 'admin_state_up': admin_state_up, 'tenant_id': 'tenant-uuid', 'ingress_rule_list': [], 'egress_rule_list': []} if is_ingress: fw_inst['ingress_rule_list'] = _rule_list else: fw_inst['egress_rule_list'] = _rule_list return fw_inst def _fake_firewall_group_with_admin_down(self, rule_list, is_ingress=True): return self._fake_firewall_group( rule_list, is_ingress=is_ingress, admin_state_up=False) def _fake_apply_list(self): router_inst = {'id': FAKE_ROUTER_ID, 'external_gateway_info': 'dummy'} router_info_inst = mock.Mock() router_info_inst.router = router_inst router_info_inst.router_id = FAKE_ROUTER_ID apply_list = [(router_info_inst, FAKE_PORT_ID)] return apply_list def test_create_firewall_no_rules(self): apply_list = self._fake_apply_list() firewall = self._fake_empty_firewall_group() port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin, 'get_port', return_value=port),\ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall),\ mock.patch.object(self.plugin, 'service_router_has_services', return_value=True),\ mock.patch("vmware_nsx.db.db.get_nsx_switch_and_port_id", return_value=(FAKE_NSX_LS_ID, 0)),\ mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw: self.firewall.create_firewall_group('nsx', apply_list, firewall) # expecting 2 block rules for the logical port (egress & ingress) # and last default allow all rule expected_rules = [ {'display_name': "Block port ingress", 'action': consts.FW_ACTION_DROP, 'destinations': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'IN'}, {'display_name': "Block port egress", 'action': consts.FW_ACTION_DROP, 'sources': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'OUT'}, self._default_rule() ] update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=expected_rules) def _setup_firewall_with_rules(self, func, is_ingress=True, is_conflict=False): apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4(is_ingress=is_ingress, is_conflict=is_conflict) firewall = self._fake_firewall_group(rule_list, is_ingress=is_ingress) port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin, 'get_port', return_value=port),\ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall), \ mock.patch.object(self.plugin, 'service_router_has_services', return_value=True), \ mock.patch("vmware_nsx.db.db.get_nsx_switch_and_port_id", return_value=(FAKE_NSX_LS_ID, 0)),\ mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw: func('nsx', apply_list, firewall) expected_rules = self._fake_translated_rules( FAKE_NSX_LS_ID, is_ingress=is_ingress, is_conflict=is_conflict) + [ {'display_name': "Block port ingress", 'action': consts.FW_ACTION_DROP, 'destinations': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'IN'}, {'display_name': "Block port egress", 'action': consts.FW_ACTION_DROP, 'sources': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'OUT'}, self._default_rule() ] update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=expected_rules) def test_create_firewall_with_ingress_rules(self): self._setup_firewall_with_rules(self.firewall.create_firewall_group) def test_update_firewall_with_ingress_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group) def test_create_firewall_with_egress_rules(self): self._setup_firewall_with_rules(self.firewall.create_firewall_group, is_ingress=False) def test_update_firewall_with_egress_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group, is_ingress=False) def test_create_firewall_with_egress_conflicting_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group, is_ingress=False, is_conflict=True) def test_create_firewall_with_ingress_conflicting_rules(self): self._setup_firewall_with_rules(self.firewall.update_firewall_group, is_ingress=True, is_conflict=True) def test_create_firewall_with_illegal_cidr(self): apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4(cidr='0.0.0.0/24') firewall = self._fake_firewall_group(rule_list) port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin, 'get_port', return_value=port), \ mock.patch.object(self.plugin, 'service_router_has_services', return_value=True), \ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall),\ mock.patch("vmware_nsx.db.db.get_nsx_switch_and_port_id", return_value=(FAKE_NSX_LS_ID, 0)),\ mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw: self.firewall.create_firewall_group('nsx', apply_list, firewall) expected_rules = self._fake_translated_rules( FAKE_NSX_LS_ID, cidr=None) + [ {'display_name': "Block port ingress", 'action': consts.FW_ACTION_DROP, 'destinations': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'IN'}, {'display_name': "Block port egress", 'action': consts.FW_ACTION_DROP, 'sources': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'OUT'}, self._default_rule() ] update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=expected_rules) def test_delete_firewall(self): apply_list = self._fake_apply_list() firewall = self._fake_empty_firewall_group() port = {'id': FAKE_PORT_ID} with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=None), \ mock.patch.object(self.plugin, 'service_router_has_services', return_value=True), \ mock.patch("vmware_nsx.db.db.get_nsx_switch_and_port_id", return_value=(FAKE_NSX_LS_ID, 0)),\ mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw: self.firewall.delete_firewall_group('nsx', apply_list, firewall) update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=[self._default_rule()]) def test_create_firewall_with_admin_down(self): apply_list = self._fake_apply_list() rule_list = self._fake_rules_v4() firewall = self._fake_firewall_group_with_admin_down(rule_list) with mock.patch.object(self.plugin, 'service_router_has_services', return_value=True), \ mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection" ".update") as update_fw: self.firewall.create_firewall_group('nsx', apply_list, firewall) update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=[self._default_rule()]) def test_create_firewall_with_dhcp_relay(self): apply_list = self._fake_apply_list() firewall = self._fake_empty_firewall_group() port = {'id': FAKE_PORT_ID, 'network_id': FAKE_NET_ID} relay_server = '1.1.1.1' with mock.patch.object(self.plugin, '_get_router_interfaces', return_value=[port]),\ mock.patch.object(self.plugin, 'get_port', return_value=port),\ mock.patch.object(self.plugin, '_get_port_relay_servers', return_value=[relay_server]),\ mock.patch.object(self.plugin.fwaas_callbacks, 'get_port_fwg', return_value=firewall), \ mock.patch.object(self.plugin, 'service_router_has_services', return_value=True), \ mock.patch("vmware_nsx.db.db.get_nsx_switch_and_port_id", return_value=(FAKE_NSX_LS_ID, 0)),\ mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection." "update") as update_fw: self.firewall.create_firewall_group('nsx', apply_list, firewall) # expecting 2 allow rules for the relay servers, # 2 block rules for the logical port (egress & ingress) # and last default allow all rule expected_rules = [ {'display_name': "DHCP Relay ingress traffic", 'action': consts.FW_ACTION_ALLOW, 'destinations': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'sources': [{'target_id': relay_server, 'target_type': 'IPv4Address'}], 'services': self.plugin._get_port_relay_services(), 'direction': 'IN'}, {'display_name': "DHCP Relay egress traffic", 'action': consts.FW_ACTION_ALLOW, 'sources': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'destinations': [{'target_id': relay_server, 'target_type': 'IPv4Address'}], 'services': self.plugin._get_port_relay_services(), 'direction': 'OUT'}, {'display_name': "Block port ingress", 'action': consts.FW_ACTION_DROP, 'destinations': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'IN'}, {'display_name': "Block port egress", 'action': consts.FW_ACTION_DROP, 'sources': [{'target_type': 'LogicalSwitch', 'target_id': FAKE_NSX_LS_ID}], 'direction': 'OUT'}, self._default_rule() ] update_fw.assert_called_once_with( MOCK_SECTION_ID, rules=expected_rules) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsx_v3/test_plugin.py0000644000175000017500000047111700000000000025653 0ustar00coreycorey00000000000000# Copyright (c) 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import netaddr from neutron.db import l3_db from neutron.db import models_v2 from neutron.db import securitygroups_db as sg_db from neutron.extensions import address_scope from neutron.extensions import l3 from neutron.extensions import securitygroup as secgrp from neutron.tests.unit import _test_extension_portbindings as test_bindings from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin from neutron.tests.unit.extensions import test_address_scope from neutron.tests.unit.extensions import test_extra_dhcp_opt as test_dhcpopts from neutron.tests.unit.extensions import test_extraroute as test_ext_route from neutron.tests.unit.extensions import test_l3 as test_l3_plugin from neutron.tests.unit.extensions \ import test_l3_ext_gw_mode as test_ext_gw_mode from neutron.tests.unit.scheduler \ import test_dhcp_agent_scheduler as test_dhcpagent from neutron.tests.unit import testlib_api from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib.api.definitions import extraroute as xroute_apidef from neutron_lib.api.definitions import l3_ext_gw_mode as l3_egm_apidef from neutron_lib.api.definitions import port_security as psec from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api.definitions import vlantransparent as vlan_apidef from neutron_lib.callbacks import events from neutron_lib.callbacks import exceptions as nc_exc from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.plugins import utils as plugin_utils from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import uuidutils from webob import exc from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import utils from vmware_nsx.db import db as nsx_db from vmware_nsx.plugins.nsx_v3 import plugin as nsx_plugin from vmware_nsx.services.lbaas.nsx_v3.implementation import loadbalancer_mgr from vmware_nsx.services.lbaas.octavia import octavia_listener from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.common_plugin import common_v3 from vmware_nsx.tests.unit.extensions import test_metadata from vmware_nsxlib.tests.unit.v3 import mocks as nsx_v3_mocks from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase from vmware_nsxlib.v3 import exceptions as nsxlib_exc from vmware_nsxlib.v3 import nsx_constants PLUGIN_NAME = 'vmware_nsx.plugin.NsxV3Plugin' NSX_TZ_NAME = 'default transport zone' NSX_DHCP_PROFILE_ID = 'default dhcp profile' NSX_METADATA_PROXY_ID = 'default metadata proxy' NSX_SWITCH_PROFILE = 'dummy switch profile' NSX_DHCP_RELAY_SRV = 'dhcp relay srv' NSX_EDGE_CLUSTER_UUID = 'dummy edge cluster' def _mock_create_firewall_rules(*args): # NOTE(arosen): the code in the neutron plugin expects the # neutron rule id as the display_name. rules = args[4] return { 'rules': [ {'display_name': rule['id'], 'id': uuidutils.generate_uuid()} for rule in rules ]} def _return_id_key(*args, **kwargs): return {'id': uuidutils.generate_uuid()} def _return_id_key_list(*args, **kwargs): return [{'id': uuidutils.generate_uuid()}] def _mock_add_rules_in_section(*args): # NOTE(arosen): the code in the neutron plugin expects the # neutron rule id as the display_name. rules = args[0] return { 'rules': [ {'display_name': rule['display_name'], 'id': uuidutils.generate_uuid()} for rule in rules ]} def _mock_nsx_backend_calls(): mock.patch("vmware_nsxlib.v3.client.NSX3Client").start() fake_profile = {'key': 'FakeKey', 'resource_type': 'FakeResource', 'id': uuidutils.generate_uuid()} def _return_id(*args, **kwargs): return uuidutils.generate_uuid() def _return_same(key, *args, **kwargs): return key mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibSwitchingProfile." "find_by_display_name", return_value=[fake_profile] ).start() mock.patch( "vmware_nsxlib.v3.router.RouterLib.validate_tier0").start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibSwitchingProfile." "create_port_mirror_profile", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibBridgeEndpoint.create", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.security.NsxLibNsGroup.find_by_display_name", side_effect=_return_id_key_list).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.create", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibDhcpProfile." "get_id_by_name_or_id", return_value=NSX_DHCP_PROFILE_ID).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibDhcpRelayService." "get_id_by_name_or_id", return_value=NSX_DHCP_RELAY_SRV).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibMetadataProxy." "get_id_by_name_or_id", side_effect=_return_same).start() mock.patch( "vmware_nsxlib.v3.resources.LogicalPort.create", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter.create", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.resources.LogicalDhcpServer.create", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.resources.LogicalDhcpServer.create_binding", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter." "get_firewall_section_id", side_effect=_return_id_key).start() mock.patch( "vmware_nsxlib.v3.NsxLib.get_version", return_value='2.4.0').start() mock.patch( "vmware_nsxlib.v3.load_balancer.Service.get_router_lb_service", return_value=None).start() mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='OVERLAY').start() mock.patch("vmware_nsxlib.v3.core_resources.NsxLibEdgeCluster." "get_transport_nodes", return_value=['dummy']).start() mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportNode." "get_transport_zones", return_value=[NSX_TZ_NAME, mock.ANY]).start() mock.patch("vmware_nsxlib.v3.security.NsxLibFirewallSection.add_rules", side_effect=_mock_add_rules_in_section).start() class NsxV3PluginTestCaseMixin(test_plugin.NeutronDbPluginV2TestCase, nsxlib_testcase.NsxClientTestCase): def setup_conf_overrides(self): cfg.CONF.set_override('default_overlay_tz', NSX_TZ_NAME, 'nsx_v3') cfg.CONF.set_override('native_dhcp_metadata', False, 'nsx_v3') cfg.CONF.set_override('dhcp_profile', NSX_DHCP_PROFILE_ID, 'nsx_v3') cfg.CONF.set_override('metadata_proxy', NSX_METADATA_PROXY_ID, 'nsx_v3') cfg.CONF.set_override( 'network_scheduler_driver', 'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler') def mock_plugin_methods(self): # need to mock the global placeholder. This is due to the fact that # the generic security group tests assume that there is just one # security group. mock_ensure_global_sg_placeholder = mock.patch.object( nsx_plugin.NsxV3Plugin, '_ensure_global_sg_placeholder') mock_ensure_global_sg_placeholder.start() mock.patch( 'neutron_lib.rpc.Connection.consume_in_threads', return_value=[]).start() mock.patch.object(nsx_plugin.NsxV3Plugin, '_cleanup_duplicates').start() def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None, **kwargs): self._patchers = [] _mock_nsx_backend_calls() self.setup_conf_overrides() self.mock_get_edge_cluster = mock.patch.object( nsx_plugin.NsxV3Plugin, '_get_edge_cluster', return_value=NSX_EDGE_CLUSTER_UUID) self.mock_get_edge_cluster.start() self.mock_plugin_methods() # ignoring the given plugin and use the nsx-v3 one if not plugin.endswith('NsxTVDPlugin'): plugin = PLUGIN_NAME super(NsxV3PluginTestCaseMixin, self).setUp(plugin=plugin, ext_mgr=ext_mgr) self.maxDiff = None def tearDown(self): for patcher in self._patchers: patcher.stop() super(NsxV3PluginTestCaseMixin, self).tearDown() def _create_network(self, fmt, name, admin_state_up, arg_list=None, providernet_args=None, set_context=False, tenant_id=None, **kwargs): tenant_id = tenant_id or self._tenant_id data = {'network': {'name': name, 'admin_state_up': admin_state_up, 'tenant_id': tenant_id}} # Fix to allow the router:external attribute and any other # attributes containing a colon to be passed with # a double underscore instead kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items()) if extnet_apidef.EXTERNAL in kwargs: arg_list = (extnet_apidef.EXTERNAL, ) + (arg_list or ()) if providernet_args: kwargs.update(providernet_args) for arg in (('admin_state_up', 'tenant_id', 'shared', 'availability_zone_hints') + (arg_list or ())): # Arg must be present if arg in kwargs: data['network'][arg] = kwargs[arg] network_req = self.new_create_request('networks', data, fmt) if set_context and tenant_id: # create a specific auth context for this request network_req.environ['neutron.context'] = context.Context( '', tenant_id) return network_req.get_response(self.api) def _create_l3_ext_network( self, physical_network=nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID): name = 'l3_ext_net' net_type = utils.NetworkTypes.L3_EXT providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: physical_network} return self.network(name=name, router__external=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) def _save_networks(self, networks): ctx = context.get_admin_context() for network_id in networks: with ctx.session.begin(subtransactions=True): ctx.session.add(models_v2.Network(id=network_id)) def _initialize_azs(self): self.plugin.init_availability_zones() self.plugin._translate_configured_names_to_uuids() def _enable_native_dhcp_md(self): cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3') cfg.CONF.set_override('dhcp_agent_notification', False) self.plugin._init_dhcp_metadata() def _enable_dhcp_relay(self): # Add the relay service to the config and availability zones cfg.CONF.set_override('dhcp_relay_service', NSX_DHCP_RELAY_SRV, 'nsx_v3') mock_nsx_version = mock.patch.object( self.plugin.nsxlib, 'feature_supported', return_value=True) mock_nsx_version.start() self._initialize_azs() self._enable_native_dhcp_md() class TestNetworksV2(test_plugin.TestNetworksV2, NsxV3PluginTestCaseMixin): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): # add vlan transparent to the configuration cfg.CONF.set_override('vlan_transparent', True) super(TestNetworksV2, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def tearDown(self): super(TestNetworksV2, self).tearDown() @mock.patch.object(nsx_plugin.NsxV3Plugin, 'validate_availability_zones') def test_create_network_with_availability_zone(self, mock_validate_az): name = 'net-with-zone' zone = ['zone1'] mock_validate_az.return_value = None with self.network(name=name, availability_zone_hints=zone) as net: az_hints = net['network']['availability_zone_hints'] self.assertListEqual(az_hints, zone) def test_network_failure_rollback(self): self._enable_native_dhcp_md() self.plugin = directory.get_plugin() with mock.patch.object(self.plugin.nsxlib.logical_port, 'create', side_effect=api_exc.NsxApiException): self.network() ctx = context.get_admin_context() networks = self.plugin.get_networks(ctx) self.assertListEqual([], networks) def test_create_provider_flat_network(self): providernet_args = {pnet.NETWORK_TYPE: 'flat'} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'create', side_effect=_return_id_key) as nsx_create, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'delete') as nsx_delete, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'),\ self.network(name='flat_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, )) as net: self.assertEqual('flat', net['network'].get(pnet.NETWORK_TYPE)) # make sure the network is created at the backend nsx_create.assert_called_once() # Delete the network and make sure it is deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_called_once() def test_create_provider_flat_network_with_physical_net(self): physical_network = nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'),\ self.network(name='flat_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: self.assertEqual('flat', net['network'].get(pnet.NETWORK_TYPE)) def test_create_provider_flat_network_with_vlan(self): providernet_args = {pnet.NETWORK_TYPE: 'flat', pnet.SEGMENTATION_ID: 11} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'): result = self._create_network(fmt='json', name='bad_flat_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID)) data = self.deserialize('json', result) # should fail self.assertEqual('InvalidInput', data['NeutronError']['type']) def test_create_provider_geneve_network(self): providernet_args = {pnet.NETWORK_TYPE: 'geneve'} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'create', side_effect=_return_id_key) as nsx_create, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'delete') as nsx_delete, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='OVERLAY'),\ self.network(name='geneve_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, )) as net: self.assertEqual('geneve', net['network'].get(pnet.NETWORK_TYPE)) # make sure the network is created at the backend nsx_create.assert_called_once() # Delete the network and make sure it is deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_called_once() def test_create_provider_geneve_network_with_physical_net(self): physical_network = nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID providernet_args = {pnet.NETWORK_TYPE: 'geneve', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='OVERLAY'),\ self.network(name='geneve_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, )) as net: self.assertEqual('geneve', net['network'].get(pnet.NETWORK_TYPE)) def test_create_provider_geneve_network_with_vlan(self): providernet_args = {pnet.NETWORK_TYPE: 'geneve', pnet.SEGMENTATION_ID: 11} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='OVERLAY'): result = self._create_network(fmt='json', name='bad_geneve_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID)) data = self.deserialize('json', result) # should fail self.assertEqual('InvalidInput', data['NeutronError']['type']) def test_create_provider_vlan_network(self): providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 11} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'create', side_effect=_return_id_key) as nsx_create, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'delete') as nsx_delete, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'),\ self.network(name='vlan_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID)) as net: self.assertEqual('vlan', net['network'].get(pnet.NETWORK_TYPE)) # make sure the network is created at the backend nsx_create.assert_called_once() # Delete the network and make sure it is deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_called_once() def test_create_provider_nsx_network(self): physical_network = 'Fake logical switch' providernet_args = {pnet.NETWORK_TYPE: 'nsx-net', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.create', side_effect=nsxlib_exc.ResourceNotFound) as nsx_create, \ mock.patch('vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.' 'delete') as nsx_delete, \ self.network(name='nsx_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: self.assertEqual('nsx-net', net['network'].get(pnet.NETWORK_TYPE)) self.assertEqual(physical_network, net['network'].get(pnet.PHYSICAL_NETWORK)) # make sure the network is NOT created at the backend nsx_create.assert_not_called() # Delete the network. It should NOT deleted from the backend req = self.new_delete_request('networks', net['network']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPNoContent.code, res.status_int) nsx_delete.assert_not_called() def test_create_provider_bad_nsx_network(self): physical_network = 'Bad logical switch' providernet_args = {pnet.NETWORK_TYPE: 'nsx-net', pnet.PHYSICAL_NETWORK: physical_network} with mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", side_effect=nsxlib_exc.ResourceNotFound): result = self._create_network(fmt='json', name='bad_nsx_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) data = self.deserialize('json', result) # should fail self.assertEqual('InvalidInput', data['NeutronError']['type']) def test_create_ens_network_with_no_port_sec(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') providernet_args = {psec.PORTSECURITY: False} with mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", return_value={'transport_zone_id': 'xxx'}): result = self._create_network(fmt='json', name='ens_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(psec.PORTSECURITY,)) res = self.deserialize('json', result) # should succeed, and net should have port security disabled self.assertFalse(res['network']['port_security_enabled']) def test_create_ens_network_with_port_sec(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') providernet_args = {psec.PORTSECURITY: True} with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.3.0'),\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch." "get", return_value={'transport_zone_id': 'xxx'}): result = self._create_network(fmt='json', name='ens_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(psec.PORTSECURITY,)) res = self.deserialize('json', result) # should fail self.assertEqual('NsxENSPortSecurity', res['NeutronError']['type']) def test_create_ens_network_with_port_sec_supported(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') providernet_args = {psec.PORTSECURITY: True} with mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch." "get", return_value={'transport_zone_id': 'xxx'}): result = self._create_network(fmt='json', name='ens_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(psec.PORTSECURITY,)) res = self.deserialize('json', result) # should succeed self.assertTrue(res['network'][psec.PORTSECURITY]) def test_create_ens_network_disable_default_port_security(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') cfg.CONF.set_override('disable_port_security_for_ens', True, 'nsx_v3') mock_ens = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_host_switch_mode', return_value='ENS') mock_tz = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibLogicalSwitch.get', return_value={'transport_zone_id': 'xxx'}) mock_tt = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_transport_type', return_value='VLAN') data = {'network': { 'name': 'portsec_net', 'admin_state_up': True, 'shared': False, 'tenant_id': 'some_tenant', 'provider:network_type': 'flat', 'provider:physical_network': 'xxx', 'port_security_enabled': True}} with mock_ens, mock_tz, mock_tt: self.plugin.create_network(context.get_admin_context(), data) def test_create_ens_network_with_qos(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') mock_ens = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_host_switch_mode', return_value='ENS') mock_tz = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibLogicalSwitch.get', return_value={'transport_zone_id': 'xxx'}) mock_tt = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_transport_type', return_value='VLAN') mock_ver = mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.4.0') policy_id = uuidutils.generate_uuid() data = {'network': { 'name': 'qos_net', 'tenant_id': 'some_tenant', 'provider:network_type': 'flat', 'provider:physical_network': 'xxx', 'qos_policy_id': policy_id, 'port_security_enabled': False}} with mock_ens, mock_tz, mock_tt, mock_ver, mock.patch.object( self.plugin, '_validate_qos_policy_id'): self.assertRaises(n_exc.InvalidInput, self.plugin.create_network, context.get_admin_context(), data) def test_update_ens_network_with_qos(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') mock_ens = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_host_switch_mode', return_value='ENS') mock_tz = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibLogicalSwitch.get', return_value={'transport_zone_id': 'xxx'}) mock_tt = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_transport_type', return_value='VLAN') mock_ver = mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.4.0') data = {'network': { 'name': 'qos_net', 'tenant_id': 'some_tenant', 'provider:network_type': 'flat', 'provider:physical_network': 'xxx', 'admin_state_up': True, 'shared': False, 'port_security_enabled': False}} with mock_ens, mock_tz, mock_tt, mock_ver,\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): network = self.plugin.create_network(context.get_admin_context(), data) policy_id = uuidutils.generate_uuid() data = {'network': { 'id': network['id'], 'admin_state_up': True, 'shared': False, 'port_security_enabled': False, 'tenant_id': 'some_tenant', 'qos_policy_id': policy_id}} self.assertRaises(n_exc.InvalidInput, self.plugin.update_network, context.get_admin_context(), network['id'], data) def test_update_ens_network(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') providernet_args = {psec.PORTSECURITY: False} with mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.3.0'),\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch." "get", return_value={'transport_zone_id': 'xxx'}): result = self._create_network(fmt='json', name='ens_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(psec.PORTSECURITY,)) net = self.deserialize('json', result) net_id = net['network']['id'] args = {'network': {psec.PORTSECURITY: True}} req = self.new_update_request('networks', args, net_id, fmt='json') res = self.deserialize('json', req.get_response(self.api)) # should fail self.assertEqual('NsxENSPortSecurity', res['NeutronError']['type']) def test_update_ens_network_psec_supported(self): cfg.CONF.set_override('ens_support', True, 'nsx_v3') providernet_args = {psec.PORTSECURITY: False} with mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", return_value={'transport_zone_id': 'xxx'}): result = self._create_network(fmt='json', name='ens_net', admin_state_up=True, providernet_args=providernet_args, arg_list=(psec.PORTSECURITY,)) net = self.deserialize('json', result) net_id = net['network']['id'] args = {'network': {psec.PORTSECURITY: True}} req = self.new_update_request('networks', args, net_id, fmt='json') res = self.deserialize('json', req.get_response(self.api)) # should succeed self.assertTrue(res['network'][psec.PORTSECURITY]) def test_create_transparent_vlan_network(self): providernet_args = {vlan_apidef.VLANTRANSPARENT: True} with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='OVERLAY'),\ self.network(name='vt_net', providernet_args=providernet_args, arg_list=(vlan_apidef.VLANTRANSPARENT, )) as net: self.assertTrue(net['network'].get(vlan_apidef.VLANTRANSPARENT)) def test_create_provider_vlan_network_with_transparent(self): providernet_args = {pnet.NETWORK_TYPE: 'vlan', vlan_apidef.VLANTRANSPARENT: True} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'): result = self._create_network(fmt='json', name='badvlan_net', admin_state_up=True, providernet_args=providernet_args, arg_list=( pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, vlan_apidef.VLANTRANSPARENT)) data = self.deserialize('json', result) self.assertEqual('vlan', data['network'].get(pnet.NETWORK_TYPE)) def _test_generate_tag(self, vlan_id): net_type = 'vlan' name = 'phys_net' plugin = directory.get_plugin() plugin._network_vlans = plugin_utils.parse_network_vlan_ranges( cfg.CONF.nsx_v3.network_vlan_ranges) expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (pnet.NETWORK_TYPE, net_type), (pnet.PHYSICAL_NETWORK, 'fb69d878-958e-4f32-84e4-50286f26226b'), (pnet.SEGMENTATION_ID, vlan_id)] providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'fb69d878-958e-4f32-84e4-50286f26226b'} gtt_path = "vmware_nsxlib.v3.core_resources." \ "NsxLibTransportZone.get_transport_type" with mock.patch(gtt_path, return_value='VLAN'): with self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) as net: for k, v in expected: self.assertEqual(net['network'][k], v) def test_create_phys_vlan_generate(self): cfg.CONF.set_override('network_vlan_ranges', 'fb69d878-958e-4f32-84e4-50286f26226b', 'nsx_v3') self._test_generate_tag(1) def test_create_phys_vlan_generate_range(self): cfg.CONF.set_override('network_vlan_ranges', 'fb69d878-958e-4f32-84e4-' '50286f26226b:100:110', 'nsx_v3') self._test_generate_tag(100) def test_create_phys_vlan_network_outofrange_returns_503(self): cfg.CONF.set_override('network_vlan_ranges', 'fb69d878-958e-4f32-84e4-' '50286f26226b:9:10', 'nsx_v3') self._test_generate_tag(9) self._test_generate_tag(10) with testlib_api.ExpectedException(exc.HTTPClientError) as ctx_manager: self._test_generate_tag(11) self.assertEqual(ctx_manager.exception.code, 503) def test_update_external_flag_on_net(self): with self.network() as net: # should fail to update the network to external args = {'network': {'router:external': 'True'}} req = self.new_update_request('networks', args, net['network']['id'], fmt='json') res = self.deserialize('json', req.get_response(self.api)) self.assertEqual('InvalidInput', res['NeutronError']['type']) def test_network_update_external(self): # This plugin does not support updating the external flag of a network self.skipTest("UnSupported") def test_network_update_external_failure(self): data = {'network': {'name': 'net1', 'router:external': 'True', 'tenant_id': 'tenant_one', 'provider:physical_network': 'stam'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) ext_net_id = network['network']['id'] # should fail to update the network to non-external args = {'network': {'router:external': 'False'}} req = self.new_update_request('networks', args, ext_net_id, fmt='json') res = self.deserialize('json', req.get_response(self.api)) self.assertEqual('InvalidInput', res['NeutronError']['type']) def test_update_network_rollback(self): with self.network() as net: # Fail the backend update with mock.patch("vmware_nsxlib.v3.core_resources." "NsxLibLogicalSwitch.update", side_effect=nsxlib_exc.InvalidInput): args = {'network': {'description': 'test rollback'}} req = self.new_update_request('networks', args, net['network']['id'], fmt='json') res = self.deserialize('json', req.get_response(self.api)) # should fail with the nsxlib error (meaning that the rollback # did not fail) self.assertEqual('InvalidInput', res['NeutronError']['type']) class TestSubnetsV2(common_v3.NsxV3TestSubnets, NsxV3PluginTestCaseMixin): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None): super(TestSubnetsV2, self).setUp(plugin=plugin, ext_mgr=ext_mgr) def test_create_subnet_with_shared_address_space(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '100.64.0.0/16', 'name': 'sub1', 'enable_dhcp': False, 'dns_nameservers': None, 'allocation_pools': None, 'tenant_id': 'tenant_one', 'host_routes': None, 'ip_version': 4}} self.assertRaises(n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), data) def _create_external_network(self): data = {'network': {'name': 'net1', 'router:external': 'True', 'tenant_id': 'tenant_one', 'provider:physical_network': 'stam'}} network_req = self.new_create_request('networks', data) network = self.deserialize(self.fmt, network_req.get_response(self.api)) return network def test_create_subnet_with_conflicting_t0_address(self): network = self._create_external_network() data = {'subnet': {'network_id': network['network']['id'], 'cidr': '172.20.1.0/24', 'name': 'sub1', 'enable_dhcp': False, 'dns_nameservers': None, 'allocation_pools': None, 'tenant_id': 'tenant_one', 'host_routes': None, 'ip_version': 4}} ports = [{'subnets': [{'ip_addresses': [u'172.20.1.60'], 'prefix_length': 24}], 'resource_type': 'LogicalRouterUpLinkPort'}] with mock.patch.object(self.plugin.nsxlib.logical_router_port, 'get_by_router_id', return_value=ports): self.assertRaises(n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), data) def test_subnet_native_dhcp_subnet_enabled(self): self._enable_native_dhcp_md() with self.network() as network: with mock.patch.object(self.plugin, '_enable_native_dhcp') as enable_dhcp,\ self.subnet(network=network, enable_dhcp=True): # Native dhcp should be set for this subnet self.assertTrue(enable_dhcp.called) def test_subnet_native_dhcp_subnet_disabled(self): self._enable_native_dhcp_md() with self.network() as network: with mock.patch.object(self.plugin, '_enable_native_dhcp') as enable_dhcp,\ self.subnet(network=network, enable_dhcp=False): # Native dhcp should not be set for this subnet self.assertFalse(enable_dhcp.called) def test_subnet_native_dhcp_with_relay(self): """Verify that the relay service is added to the router interface""" self._enable_dhcp_relay() with self.network() as network: with mock.patch.object(self.plugin, '_enable_native_dhcp') as enable_dhcp,\ self.subnet(network=network, enable_dhcp=True): # Native dhcp should not be set for this subnet self.assertFalse(enable_dhcp.called) def test_subnet_native_dhcp_flat_subnet_disabled(self): self._enable_native_dhcp_md() providernet_args = {pnet.NETWORK_TYPE: 'flat'} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'): with self.network(name='flat_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, )) as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '172.20.1.0/24', 'name': 'sub1', 'enable_dhcp': False, 'dns_nameservers': None, 'allocation_pools': None, 'tenant_id': 'tenant_one', 'host_routes': None, 'ip_version': 4}} self.plugin.create_subnet( context.get_admin_context(), data) def test_subnet_native_dhcp_flat_subnet_enabled(self): self._enable_native_dhcp_md() providernet_args = {pnet.NETWORK_TYPE: 'flat'} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'): with self.network(name='flat_net', providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, )) as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '172.20.1.0/24', 'name': 'sub1', 'enable_dhcp': True, 'dns_nameservers': None, 'allocation_pools': None, 'tenant_id': 'tenant_one', 'host_routes': None, 'ip_version': 4}} self.assertRaises(n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), data) def test_fail_create_static_routes_per_subnet_over_limit(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '10.0.0.0/16', 'name': 'sub1', 'dns_nameservers': None, 'allocation_pools': None, 'tenant_id': 'tenant_one', 'enable_dhcp': False, 'ip_version': 4}} count = 1 host_routes = [] while count < nsx_constants.MAX_STATIC_ROUTES: host_routes.append("'host_routes': [{'destination': " "'135.207.0.0/%s', 'nexthop': " "'1.2.3.%s'}]" % (count, count)) count += 1 data['subnet']['host_routes'] = host_routes self.assertRaises(n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), data) def test_create_subnet_disable_dhcp_with_host_route_fails(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '172.20.1.0/24', 'name': 'sub1', 'dns_nameservers': None, 'allocation_pools': None, 'tenant_id': 'tenant_one', 'enable_dhcp': False, 'host_routes': [{ 'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}], 'ip_version': 4}} self.assertRaises(n_exc.InvalidInput, self.plugin.create_subnet, context.get_admin_context(), data) def test_update_subnet_disable_dhcp_with_host_route_fails(self): with self.network() as network: data = {'subnet': {'network_id': network['network']['id'], 'cidr': '172.20.1.0/24', 'name': 'sub1', 'dns_nameservers': None, 'allocation_pools': None, 'tenant_id': 'tenant_one', 'enable_dhcp': True, 'host_routes': [{ 'destination': '135.207.0.0/16', 'nexthop': '1.2.3.4'}], 'ip_version': 4}} subnet = self.plugin.create_subnet( context.get_admin_context(), data) data['subnet']['enable_dhcp'] = False self.assertRaises(n_exc.InvalidInput, self.plugin.update_subnet, context.get_admin_context(), subnet['id'], data) def test_create_subnet_ipv6_gw_is_nw_start_addr(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_gw_is_nw_start_addr_canonicalize(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_gw_is_nw_end_addr(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_first_ip_owned_by_router(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_first_ip_owned_by_non_router(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_with_v6_pd_allocation_pool(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_with_v6_allocation_pool(self): self.skipTest('No DHCP v6 Support yet') def test_update_subnet_ipv6_ra_mode_fails(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_ip_already_allocated(self): self.skipTest('No DHCP v6 Support yet') def test_create_subnet_ipv6_slaac_with_db_reference_error(self): self.skipTest('No DHCP v6 Support yet') class TestPortsV2(common_v3.NsxV3SubnetMixin, common_v3.NsxV3TestPorts, NsxV3PluginTestCaseMixin, test_bindings.PortBindingsTestCase, test_bindings.PortBindingsHostTestCaseMixin, test_bindings.PortBindingsVnicTestCaseMixin): VIF_TYPE = portbindings.VIF_TYPE_OVS HAS_PORT_FILTER = True def setUp(self): cfg.CONF.set_override('switching_profiles', [NSX_SWITCH_PROFILE], 'nsx_v3') # add vlan transparent to the configuration cfg.CONF.set_override('vlan_transparent', True) super(TestPortsV2, self).setUp() self.plugin = directory.get_plugin() self.ctx = context.get_admin_context() def test_update_port_delete_ip(self): # This test case overrides the default because the nsx plugin # implements port_security/security groups and it is not allowed # to remove an ip address from a port unless the security group # is first removed. with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [], secgrp.SECURITYGROUPS: []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(res['port']['admin_state_up'], data['port']['admin_state_up']) self.assertEqual(res['port']['fixed_ips'], data['port']['fixed_ips']) def test_delete_dhcp_port(self): self._enable_native_dhcp_md() with self.subnet(): pl = directory.get_plugin() ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) ports = pl.get_ports( ctx, filters={'device_owner': [constants.DEVICE_OWNER_DHCP]}) req = self.new_delete_request('ports', ports[0]['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_fail_create_port_with_ext_net(self): expected_error = 'InvalidInput' with self._create_l3_ext_network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'): device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' res = self._create_port(self.fmt, network['network']['id'], exc.HTTPBadRequest.code, device_owner=device_owner) data = self.deserialize(self.fmt, res) self.assertEqual(expected_error, data['NeutronError']['type']) def test_fail_update_port_with_ext_net(self): with self._create_l3_ext_network() as network: with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: with self.port(subnet=subnet) as port: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' data = {'port': {'device_owner': device_owner}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_fail_update_lb_port_with_allowed_address_pairs(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'pair_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': constants.DEVICE_OWNER_LOADBALANCERV2, 'fixed_ips': []} } port = self.plugin.create_port(self.ctx, data) data['port']['allowed_address_pairs'] = '10.0.0.1' self.assertRaises( n_exc.InvalidInput, self.plugin.update_port, self.ctx, port['id'], data) def test_fail_create_allowed_address_pairs_over_limit(self): with self.network() as network, self.subnet( network=network, enable_dhcp=True) as s1: data = { 'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'pair_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [{'subnet_id': s1['subnet']['id']}] } } count = 1 address_pairs = [] while count < 129: address_pairs.append({'ip_address': '10.0.0.%s' % count}) count += 1 data['port']['allowed_address_pairs'] = address_pairs self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, self.ctx, data) def test_fail_update_lb_port_with_fixed_ip(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'pair_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': constants.DEVICE_OWNER_LOADBALANCERV2, 'fixed_ips': []} } port = self.plugin.create_port(self.ctx, data) data['port']['fixed_ips'] = '10.0.0.1' self.assertRaises( n_exc.InvalidInput, self.plugin.update_port, self.ctx, port['id'], data) def test_create_port_with_qos(self): with self.network() as network: policy_id = uuidutils.generate_uuid() data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'qos_policy_id': policy_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } with mock.patch.object(self.plugin, '_get_qos_profile_id'),\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): port = self.plugin.create_port(self.ctx, data) self.assertEqual(policy_id, port['qos_policy_id']) # Get port should also return the qos policy id with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_port_policy_id', return_value=policy_id): port = self.plugin.get_port(self.ctx, port['id']) self.assertEqual(policy_id, port['qos_policy_id']) def test_update_port_with_qos(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } port = self.plugin.create_port(self.ctx, data) policy_id = uuidutils.generate_uuid() data['port']['qos_policy_id'] = policy_id with mock.patch.object(self.plugin, '_get_qos_profile_id'),\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): res = self.plugin.update_port(self.ctx, port['id'], data) self.assertEqual(policy_id, res['qos_policy_id']) # Get port should also return the qos policy id with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_port_policy_id', return_value=policy_id): res = self.plugin.get_port(self.ctx, port['id']) self.assertEqual(policy_id, res['qos_policy_id']) # now remove the qos from the port data['port']['qos_policy_id'] = None res = self.plugin.update_port(self.ctx, port['id'], data) self.assertIsNone(res['qos_policy_id']) def test_create_ext_port_with_qos_fail(self): with self._create_l3_ext_network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'),\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): policy_id = uuidutils.generate_uuid() data = {'port': {'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'qos_policy_id': policy_id}} # Cannot add qos policy to a router port self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, self.ctx, data) def _test_create_illegal_port_with_qos_fail(self, device_owner): with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'),\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): policy_id = uuidutils.generate_uuid() data = {'port': {'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'device_owner': device_owner, 'qos_policy_id': policy_id}} # Cannot add qos policy to this type of port self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, self.ctx, data) def test_create_port_ens_with_qos_fail(self): with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'): policy_id = uuidutils.generate_uuid() mock_ens = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_host_switch_mode', return_value='ENS') mock_tz = mock.patch('vmware_nsxlib.v3' '.core_resources' '.NsxLibLogicalSwitch.get', return_value={ 'transport_zone_id': 'xxx'}) mock_tt = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_transport_type', return_value='VLAN') mock_ver = mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.4.0') data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01', 'qos_policy_id': policy_id} } # Cannot add qos policy to this type of port with mock_ens, mock_tz, mock_tt, mock_ver,\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, self.ctx, data) def test_create_port_ens_with_sg(self): cfg.CONF.set_override('disable_port_security_for_ens', True, 'nsx_v3') with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'): mock_ens = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_host_switch_mode', return_value='ENS') mock_tz = mock.patch('vmware_nsxlib.v3' '.core_resources' '.NsxLibLogicalSwitch.get', return_value={ 'transport_zone_id': 'xxx'}) mock_tt = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_transport_type', return_value='VLAN') data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'sg_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01', 'port_security_enabled': True} } with mock_ens, mock_tz, mock_tt: self.plugin.create_port(self.ctx, data) def test_update_port_ens_with_qos_fail(self): with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'): policy_id = uuidutils.generate_uuid() mock_ens = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_host_switch_mode', return_value='ENS') mock_tz = mock.patch('vmware_nsxlib.v3' '.core_resources' '.NsxLibLogicalSwitch.get', return_value={ 'transport_zone_id': 'xxx'}) mock_tt = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_transport_type', return_value='VLAN') mock_ver = mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.4.0') data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01'} } with mock_ens, mock_tz, mock_tt, mock_ver,\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): port = self.plugin.create_port(self.ctx, data) data['port'] = {'qos_policy_id': policy_id} self.assertRaises(n_exc.InvalidInput, self.plugin.update_port, self.ctx, port['id'], data) def test_create_port_with_mac_learning_true(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01', 'mac_learning_enabled': True} } port = self.plugin.create_port(self.ctx, data) self.assertTrue(port['mac_learning_enabled']) def test_create_port_with_mac_learning_false(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01', 'mac_learning_enabled': False} } port = self.plugin.create_port(self.ctx, data) self.assertFalse(port['mac_learning_enabled']) def test_update_port_with_mac_learning_true(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01'} } port = self.plugin.create_port(self.ctx, data) data['port']['mac_learning_enabled'] = True update_res = self.plugin.update_port(self.ctx, port['id'], data) self.assertTrue(update_res['mac_learning_enabled']) def test_update_port_with_mac_learning_false(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01'} } port = self.plugin.create_port(self.ctx, data) data['port']['mac_learning_enabled'] = False update_res = self.plugin.update_port(self.ctx, port['id'], data) self.assertFalse(update_res['mac_learning_enabled']) def test_update_port_with_mac_learning_failes(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': constants.DEVICE_OWNER_FLOATINGIP, 'fixed_ips': [], 'port_security_enabled': False, 'mac_address': '00:00:00:00:00:01'} } port = self.plugin.create_port(self.ctx, data) data['port']['mac_learning_enabled'] = True self.assertRaises( n_exc.InvalidInput, self.plugin.update_port, self.ctx, port['id'], data) def test_create_router_port_with_qos_fail(self): self._test_create_illegal_port_with_qos_fail( 'network:router_interface') def test_create_dhcp_port_with_qos_fail(self): self._test_create_illegal_port_with_qos_fail('network:dhcp') def _test_update_illegal_port_with_qos_fail(self, device_owner): with self.network() as network: with self.subnet(network=network, cidr='10.0.0.0/24'),\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): policy_id = uuidutils.generate_uuid() data = {'port': {'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01', 'device_id': 'dummy', 'device_owner': ''}} port = self.plugin.create_port(self.ctx, data) policy_id = uuidutils.generate_uuid() data['port'] = {'qos_policy_id': policy_id, 'device_owner': device_owner} # Cannot add qos policy to a router interface port self.assertRaises(n_exc.InvalidInput, self.plugin.update_port, self.ctx, port['id'], data) def test_update_router_port_with_qos_fail(self): self._test_update_illegal_port_with_qos_fail( 'network:router_interface') def test_update_dhcp_port_with_qos_fail(self): self._test_update_illegal_port_with_qos_fail('network:dhcp') def test_create_port_with_qos_on_net(self): with self.network() as network: policy_id = uuidutils.generate_uuid() device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': device_owner, 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } with mock.patch.object(self.plugin, '_get_qos_profile_id') as get_profile,\ mock.patch('vmware_nsx.services.qos.common.utils.' 'get_network_policy_id', return_value=policy_id),\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): self.plugin.create_port(self.ctx, data) get_profile.assert_called_once_with(self.ctx, policy_id) def test_update_port_with_qos_on_net(self): with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'qos_port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } port = self.plugin.create_port(self.ctx, data) policy_id = uuidutils.generate_uuid() device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' data['port']['device_owner'] = device_owner with mock.patch.object(self.plugin, '_get_qos_profile_id') as get_profile,\ mock.patch('vmware_nsx.services.qos.common.utils.' 'get_network_policy_id', return_value=policy_id),\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): self.plugin.update_port(self.ctx, port['id'], data) get_profile.assert_called_once_with(self.ctx, policy_id) def _get_ports_with_fields(self, tenid, fields, expected_count): pl = directory.get_plugin() ctx = context.Context(user_id=None, tenant_id=tenid, is_admin=False) ports = pl.get_ports(ctx, filters={'tenant_id': [tenid]}, fields=fields) self.assertEqual(expected_count, len(ports)) def test_get_ports_with_fields(self): with self.port(), self.port(), self.port(), self.port() as p: tenid = p['port']['tenant_id'] # get all fields: self._get_ports_with_fields(tenid, None, 4) # get specific fields: self._get_ports_with_fields(tenid, 'mac_address', 4) self._get_ports_with_fields(tenid, 'network_id', 4) def test_list_ports_filtered_by_security_groups(self): ctx = context.get_admin_context() with self.port() as port1, self.port() as port2: query_params = "security_groups=%s" % ( port1['port']['security_groups'][0]) ports_data = self._list('ports', query_params=query_params) self.assertEqual(set([port1['port']['id'], port2['port']['id']]), set([port['id'] for port in ports_data['ports']])) query_params = "security_groups=%s&id=%s" % ( port1['port']['security_groups'][0], port1['port']['id']) ports_data = self._list('ports', query_params=query_params) self.assertEqual(port1['port']['id'], ports_data['ports'][0]['id']) self.assertEqual(1, len(ports_data['ports'])) temp_sg = {'security_group': {'tenant_id': 'some_tenant', 'name': '', 'description': 's'}} sg_dbMixin = sg_db.SecurityGroupDbMixin() sg = sg_dbMixin.create_security_group(ctx, temp_sg) sg_dbMixin._delete_port_security_group_bindings( ctx, port2['port']['id']) sg_dbMixin._create_port_security_group_binding( ctx, port2['port']['id'], sg['id']) port2['port']['security_groups'][0] = sg['id'] query_params = "security_groups=%s" % ( port1['port']['security_groups'][0]) ports_data = self._list('ports', query_params=query_params) self.assertEqual(port1['port']['id'], ports_data['ports'][0]['id']) self.assertEqual(1, len(ports_data['ports'])) query_params = "security_groups=%s" % ( (port2['port']['security_groups'][0])) ports_data = self._list('ports', query_params=query_params) self.assertEqual(port2['port']['id'], ports_data['ports'][0]['id']) def test_port_failure_rollback_dhcp_exception(self): self._enable_native_dhcp_md() self.plugin = directory.get_plugin() with mock.patch.object(self.plugin, '_add_port_mp_dhcp_binding', side_effect=nsxlib_exc.ManagerError): self.port() ctx = context.get_admin_context() networks = self.plugin.get_ports(ctx) self.assertListEqual([], networks) def test_port_DB_failure_rollback_dhcp_exception(self): self._enable_native_dhcp_md() self.plugin = directory.get_plugin() with mock.patch('vmware_nsx.db.db.add_neutron_nsx_dhcp_binding', side_effect=db_exc.DBError),\ mock.patch.object(self.plugin, '_enable_native_dhcp'),\ mock.patch('vmware_nsx.db.db.get_nsx_service_binding'),\ self.network() as network,\ self.subnet(network, cidr='10.0.1.0/24') as subnet: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'p1', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [{'subnet_id': subnet['subnet']['id'], 'ip_address': '10.0.1.2'}], 'mac_address': '00:00:00:00:00:01'} } # making sure the port creation succeeded anyway created_port = self.plugin.create_port(self.ctx, data) self.assertEqual('fake_device', created_port['device_id']) def test_update_port_add_additional_ip(self): """Test update of port with additional IP fails.""" with self.subnet() as subnet: with self.port(subnet=subnet) as port: data = {'port': {'admin_state_up': False, 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} req = self.new_update_request('ports', data, port['port']['id']) res = req.get_response(self.api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_port_additional_ip(self): """Test that creation of port with additional IP fails.""" with self.subnet() as subnet: data = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}, {'subnet_id': subnet['subnet']['id']}]}} port_req = self.new_create_request('ports', data) res = port_req.get_response(self.api) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_create_port_with_switching_profiles(self): """Tests that nsx ports get the configures switching profiles""" self.plugin = directory.get_plugin() with self.network() as network: data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'p1', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': 'fake_owner', 'fixed_ips': [], 'mac_address': '00:00:00:00:00:01'} } with mock.patch.object(self.plugin.nsxlib.logical_port, 'create', return_value={'id': 'fake'}) as nsx_create: self.plugin.create_port(self.ctx, data) expected_prof = self.plugin.get_default_az().\ switching_profiles_objs[0] actual_profs = nsx_create.call_args[1]['switch_profile_ids'] # the ports switching profiles should start with the # configured one self.assertEqual(expected_prof, actual_profs[0]) def test_create_ens_port_with_no_port_sec(self): with self.subnet() as subnet,\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", return_value={'transport_zone_id': 'xxx'}): args = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}], psec.PORTSECURITY: False}} port_req = self.new_create_request('ports', args) port = self.deserialize(self.fmt, port_req.get_response(self.api)) self.assertFalse(port['port']['port_security_enabled']) def test_create_ens_port_with_port_sec(self): with self.subnet() as subnet,\ mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.3.0'),\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch." "get", return_value={'transport_zone_id': 'xxx'}): args = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}], psec.PORTSECURITY: True}} port_req = self.new_create_request('ports', args) res = self.deserialize('json', port_req.get_response(self.api)) # should fail self.assertEqual('NsxENSPortSecurity', res['NeutronError']['type']) def test_create_ens_port_with_port_sec_supported(self): with self.subnet() as subnet,\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch.get", return_value={'transport_zone_id': 'xxx'}): args = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}], psec.PORTSECURITY: True}} port_req = self.new_create_request('ports', args) res = self.deserialize('json', port_req.get_response(self.api)) # should succeed self.assertTrue(res['port'][psec.PORTSECURITY]) def test_update_ens_port(self): with self.subnet() as subnet,\ mock.patch("vmware_nsxlib.v3.NsxLib.get_version", return_value='2.3.0'),\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch." "get", return_value={'transport_zone_id': 'xxx'}): args = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}], psec.PORTSECURITY: False}} port_req = self.new_create_request('ports', args) port = self.deserialize(self.fmt, port_req.get_response(self.api)) port_id = port['port']['id'] args = {'port': {psec.PORTSECURITY: True}} req = self.new_update_request('ports', args, port_id) res = self.deserialize('json', req.get_response(self.api)) # should fail self.assertEqual('NsxENSPortSecurity', res['NeutronError']['type']) def test_update_ens_port_psec_supported(self): with self.subnet() as subnet,\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportZone." "get_host_switch_mode", return_value="ENS"),\ mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalSwitch." "get", return_value={'transport_zone_id': 'xxx'}): args = {'port': {'network_id': subnet['subnet']['network_id'], 'tenant_id': subnet['subnet']['tenant_id'], 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}], psec.PORTSECURITY: False}} port_req = self.new_create_request('ports', args) port = self.deserialize(self.fmt, port_req.get_response(self.api)) port_id = port['port']['id'] args = {'port': {psec.PORTSECURITY: True}} req = self.new_update_request('ports', args, port_id) res = self.deserialize('json', req.get_response(self.api)) # should succeed self.assertTrue(res['port'][psec.PORTSECURITY]) def test_update_dhcp_port_device_owner(self): self._enable_native_dhcp_md() with self.subnet(): pl = directory.get_plugin() ctx = context.Context(user_id=None, tenant_id=self._tenant_id, is_admin=False) ports = pl.get_ports( ctx, filters={'device_owner': [constants.DEVICE_OWNER_DHCP]}) port_id = ports[0]['id'] args = {'port': {'admin_state_up': False, 'fixed_ips': [], 'device_owner': 'abcd'}} req = self.new_update_request('ports', args, port_id) res = self.deserialize('json', req.get_response(self.api)) # should fail self.assertEqual('InvalidInput', res['NeutronError']['type']) def test_create_compute_port_with_relay_no_router(self): """Compute port creation should fail if a network with dhcp relay is not connected to a router """ self._enable_dhcp_relay() with self.network() as network, \ self.subnet(network=network, enable_dhcp=True) as s1: device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': device_owner, 'fixed_ips': [{'subnet_id': s1['subnet']['id']}], 'mac_address': '00:00:00:00:00:01'} } self.assertRaises(n_exc.InvalidInput, self.plugin.create_port, self.ctx, data) def test_create_compute_port_with_relay_and_router(self): self._enable_dhcp_relay() with self.network() as network, \ self.subnet(network=network, enable_dhcp=True) as s1,\ mock.patch.object(self.plugin, '_get_router', return_value={'name': 'dummy'}): # first create a router interface to simulate a router data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'port', 'admin_state_up': True, 'device_id': 'dummy', 'device_owner': l3_db.DEVICE_OWNER_ROUTER_INTF, 'fixed_ips': [{'subnet_id': s1['subnet']['id']}], 'mac_address': '00:00:00:00:00:02'} } port1 = self.plugin.create_port(self.ctx, data) self.assertIn('id', port1) # Now create a compute port device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'X' data = {'port': { 'network_id': network['network']['id'], 'tenant_id': self._tenant_id, 'name': 'port', 'admin_state_up': True, 'device_id': 'fake_device', 'device_owner': device_owner, 'fixed_ips': [{'subnet_id': s1['subnet']['id']}], 'mac_address': '00:00:00:00:00:01'} } port2 = self.plugin.create_port(self.ctx, data) self.assertIn('id', port2) def _test_create_direct_network(self, vlan_id=0): net_type = vlan_id and 'vlan' or 'flat' name = 'direct_net' providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: 'tzuuid'} if vlan_id: providernet_args[pnet.SEGMENTATION_ID] = vlan_id mock_tt = mock.patch('vmware_nsxlib.v3' '.core_resources.NsxLibTransportZone' '.get_transport_type', return_value='VLAN') mock_tt.start() return self.network(name=name, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK, pnet.SEGMENTATION_ID)) def _test_create_port_vnic_direct(self, vlan_id): with self._test_create_direct_network(vlan_id=vlan_id) as network: # Check that port security conflicts kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, psec.PORTSECURITY: True} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, exc.HTTPBadRequest.code) # Check that security group conflicts kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, 'security_groups': [ '4cd70774-cc67-4a87-9b39-7d1db38eb087'], psec.PORTSECURITY: False} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, exc.HTTPBadRequest.code) # All is kosher so we can create the port kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE,), **kwargs) port = self.deserialize('json', res) self.assertEqual("direct", port['port'][portbindings.VNIC_TYPE]) self.assertEqual("dvs", port['port'][portbindings.VIF_TYPE]) self.assertEqual( vlan_id, port['port'][portbindings.VIF_DETAILS]['segmentation-id']) # try to get the same port req = self.new_show_request('ports', port['port']['id'], self.fmt) sport = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual("dvs", sport['port'][portbindings.VIF_TYPE]) self.assertEqual("direct", sport['port'][portbindings.VNIC_TYPE]) self.assertEqual( vlan_id, sport['port'][portbindings.VIF_DETAILS]['segmentation-id']) self.assertFalse( sport['port'][portbindings.VIF_DETAILS]['vlan-transparent']) def test_create_port_vnic_direct_flat(self): self._test_create_port_vnic_direct(0) def test_create_port_vnic_direct_vlan(self): self._test_create_port_vnic_direct(10) def test_create_port_vnic_direct_invalid_network(self): with self.network(name='not vlan/flat') as net: kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT, psec.PORTSECURITY: False} net_id = net['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(exc.HTTPBadRequest.code, res.status_int) def test_update_vnic_direct(self): with self._test_create_direct_network(vlan_id=7) as network: with self.subnet(network=network) as subnet: with self.port(subnet=subnet) as port: # need to do two updates as the update for port security # disabled requires that it can only change 2 items data = {'port': {psec.PORTSECURITY: False, 'security_groups': []}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(portbindings.VNIC_NORMAL, res['port'][portbindings.VNIC_TYPE]) data = {'port': {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual(portbindings.VNIC_DIRECT, res['port'][portbindings.VNIC_TYPE]) def test_port_invalid_vnic_type(self): with self._test_create_direct_network(vlan_id=7) as network: kwargs = {portbindings.VNIC_TYPE: 'invalid', psec.PORTSECURITY: False} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE, psec.PORTSECURITY), **kwargs) self.assertEqual(res.status_int, exc.HTTPBadRequest.code) def test_create_transparent_vlan_port(self): providernet_args = {pnet.NETWORK_TYPE: 'vlan', vlan_apidef.VLANTRANSPARENT: True} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'): result = self._create_network(fmt='json', name='vlan_net', admin_state_up=True, providernet_args=providernet_args, arg_list=( pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, vlan_apidef.VLANTRANSPARENT)) network = self.deserialize('json', result) net_id = network['network']['id'] with self.subnet(network=network): kwargs = {portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT} net_id = network['network']['id'] res = self._create_port(self.fmt, net_id=net_id, arg_list=(portbindings.VNIC_TYPE,), **kwargs) port = self.deserialize('json', res) self.assertTrue( port['port'][portbindings.VIF_DETAILS]['vlan-transparent']) @common_v3.with_disable_dhcp def test_requested_subnet_id_v4_and_v6(self): return super(TestPortsV2, self).test_requested_subnet_id_v4_and_v6() def test_port_binding_host(self): with self.port() as port: # add host data = {'port': {portbindings.HOST_ID: 'abc'}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual('abc', res['port'][portbindings.HOST_ID]) # remove host data = {'port': {portbindings.HOST_ID: None}} req = self.new_update_request('ports', data, port['port']['id']) res = self.deserialize('json', req.get_response(self.api)) self.assertEqual('', res['port'][portbindings.HOST_ID]) class DHCPOptsTestCase(test_dhcpopts.TestExtraDhcpOpt, NsxV3PluginTestCaseMixin): def setUp(self, plugin=None): super(test_dhcpopts.ExtraDhcpOptDBTestCase, self).setUp( plugin=PLUGIN_NAME) class NSXv3DHCPAgentAZAwareWeightSchedulerTestCase( test_dhcpagent.DHCPAgentAZAwareWeightSchedulerTestCase, NsxV3PluginTestCaseMixin): def setUp(self): super(NSXv3DHCPAgentAZAwareWeightSchedulerTestCase, self).setUp() self.plugin = directory.get_plugin() self.ctx = context.get_admin_context() def setup_coreplugin(self, core_plugin=None, load_plugins=True): super(NSXv3DHCPAgentAZAwareWeightSchedulerTestCase, self).setup_coreplugin(core_plugin=PLUGIN_NAME, load_plugins=load_plugins) class TestL3ExtensionManager(object): def get_resources(self): # Simulate extension of L3 attribute map l3.L3().update_attributes_map( l3_egm_apidef.RESOURCE_ATTRIBUTE_MAP) l3.L3().update_attributes_map( xroute_apidef.RESOURCE_ATTRIBUTE_MAP) return (l3.L3.get_resources() + address_scope.Address_scope.get_resources()) def get_actions(self): return [] def get_request_extensions(self): return [] class L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxV3PluginTestCaseMixin, common_v3.FixExternalNetBaseTest, common_v3.NsxV3SubnetMixin, test_address_scope.AddressScopeTestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH) cfg.CONF.set_default('max_routes', 3) ext_mgr = ext_mgr or TestL3ExtensionManager() mock_nsx_version = mock.patch.object(nsx_plugin.utils, 'is_nsx_version_2_0_0', new=lambda v: True) mock_nsx_version.start() # Make sure the LB callback is not called on router deletion self.lb_mock1 = mock.patch( "vmware_nsx.services.lbaas.octavia.octavia_listener." "NSXOctaviaListenerEndpoint._check_lb_service_on_router") self.lb_mock1.start() self.lb_mock2 = mock.patch( "vmware_nsx.services.lbaas.octavia.octavia_listener." "NSXOctaviaListenerEndpoint._check_lb_service_on_router_interface") self.lb_mock2.start() super(L3NatTest, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) self.plugin_instance = directory.get_plugin() self._plugin_name = "%s.%s" % ( self.plugin_instance.__module__, self.plugin_instance.__class__.__name__) self._plugin_class = self.plugin_instance.__class__ self.plugin_instance.fwaas_callbacks = None self.original_subnet = self.subnet self.original_network = self.network def _set_net_external(self, net_id): # This action is not supported by the V3 plugin pass def external_network(self, name='net1', admin_state_up=True, fmt=None, **kwargs): if not name: name = 'l3_ext_net' physical_network = nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID net_type = utils.NetworkTypes.L3_EXT providernet_args = {pnet.NETWORK_TYPE: net_type, pnet.PHYSICAL_NETWORK: physical_network} return self.original_network(name=name, admin_state_up=admin_state_up, fmt=fmt, router__external=True, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK)) def test_floatingip_create_different_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_router_add_interface_multiple_ipv4_subnet_port_returns_400(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_router_add_interface_multiple_ipv6_subnet_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_floatingip_update_different_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') def test_create_multiple_floatingips_same_fixed_ip_same_port(self): self.skipTest('Multiple fixed ips on a port are not supported') class TestL3NatTestCase(L3NatTest, test_l3_plugin.L3NatDBIntTestCase, test_ext_route.ExtraRouteDBTestCaseBase, test_metadata.MetaDataTestCase): block_dhcp_notifier = False def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): super(TestL3NatTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr) cfg.CONF.set_override('metadata_mode', None, 'nsx_v3') cfg.CONF.set_override('metadata_on_demand', False, 'nsx_v3') self.subnet_calls = [] def _test_create_l3_ext_network( self, physical_network=nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID): name = 'l3_ext_net' net_type = utils.NetworkTypes.L3_EXT expected = [('subnets', []), ('name', name), ('admin_state_up', True), ('status', 'ACTIVE'), ('shared', False), (extnet_apidef.EXTERNAL, True), (pnet.NETWORK_TYPE, net_type), (pnet.PHYSICAL_NETWORK, physical_network)] with self._create_l3_ext_network(physical_network) as net: for k, v in expected: self.assertEqual(net['network'][k], v) @common_v3.with_external_subnet def test_router_update_gateway_with_external_ip_used_by_gw(self): super(TestL3NatTestCase, self).test_router_update_gateway_with_external_ip_used_by_gw() @common_v3.with_external_subnet def test_router_update_gateway_with_invalid_external_ip(self): super(TestL3NatTestCase, self).test_router_update_gateway_with_invalid_external_ip() @common_v3.with_external_subnet def test_router_update_gateway_with_invalid_external_subnet(self): super(TestL3NatTestCase, self).test_router_update_gateway_with_invalid_external_subnet() @common_v3.with_external_network def test_router_update_gateway_with_different_external_subnet(self): super(TestL3NatTestCase, self).test_router_update_gateway_with_different_external_subnet() @common_v3.with_disable_dhcp def test_create_floatingip_ipv6_only_network_returns_400(self): super(TestL3NatTestCase, self).test_create_floatingip_ipv6_only_network_returns_400() @common_v3.with_disable_dhcp def test_create_floatingip_with_assoc_to_ipv4_and_ipv6_port(self): super(L3NatTest, self).test_create_floatingip_with_assoc_to_ipv4_and_ipv6_port() @common_v3.with_external_subnet_once def test_router_update_gateway_with_existed_floatingip(self): with self.subnet(cidr='20.0.0.0/24') as subnet: self._set_net_external(subnet['subnet']['network_id']) with self.floatingip_with_assoc() as fip: self._add_external_gateway_to_router( fip['floatingip']['router_id'], subnet['subnet']['network_id'], expected_code=exc.HTTPConflict.code) @common_v3.with_external_network def test_router_update_gateway_add_multiple_prefixes_ipv6(self): super(TestL3NatTestCase, self).test_router_update_gateway_add_multiple_prefixes_ipv6() @common_v3.with_external_network def test_router_concurrent_delete_upon_subnet_create(self): super(TestL3NatTestCase, self).test_router_concurrent_delete_upon_subnet_create() @common_v3.with_external_network def test_router_update_gateway_upon_subnet_create_ipv6(self): super(TestL3NatTestCase, self).test_router_update_gateway_upon_subnet_create_ipv6() @common_v3.with_external_subnet def test_router_add_gateway_dup_subnet2_returns_400(self): super(TestL3NatTestCase, self).test_router_add_gateway_dup_subnet2_returns_400() @common_v3.with_external_subnet def test_router_update_gateway(self): super(TestL3NatTestCase, self).test_router_update_gateway() @common_v3.with_external_subnet def test_router_create_with_gwinfo(self): super(TestL3NatTestCase, self).test_router_create_with_gwinfo() @common_v3.with_external_subnet def test_router_clear_gateway_callback_failure_returns_409(self): super(TestL3NatTestCase, self).test_router_clear_gateway_callback_failure_returns_409() @common_v3.with_external_subnet def test_router_create_with_gwinfo_ext_ip(self): super(TestL3NatTestCase, self).test_router_create_with_gwinfo_ext_ip() @common_v3.with_external_network def test_router_create_with_gwinfo_ext_ip_subnet(self): super(TestL3NatTestCase, self).test_router_create_with_gwinfo_ext_ip_subnet() @common_v3.with_external_subnet_second_time def test_router_delete_with_floatingip_existed_returns_409(self): super(TestL3NatTestCase, self).test_router_delete_with_floatingip_existed_returns_409() @common_v3.with_external_subnet def test_router_add_and_remove_gateway_tenant_ctx(self): super(TestL3NatTestCase, self).test_router_add_and_remove_gateway_tenant_ctx() @common_v3.with_external_subnet def test_router_add_and_remove_gateway(self): super(TestL3NatTestCase, self).test_router_add_and_remove_gateway() def test_router_update_gateway_upon_subnet_create_max_ips_ipv6(self): self.skipTest('not supported') def test_router_add_gateway_multiple_subnets_ipv6(self): self.skipTest('multiple ipv6 subnets not supported') def test__notify_gateway_port_ip_changed(self): self.skipTest('not supported') def test__notify_gateway_port_ip_not_changed(self): self.skipTest('not supported') def test_floatingip_via_router_interface_returns_201(self): self.skipTest('not supported') def test_floatingip_via_router_interface_returns_404(self): self.skipTest('not supported') def test_router_delete_dhcpv6_stateless_subnet_inuse_returns_409(self): self.skipTest('DHCPv6 not supported') @common_v3.with_disable_dhcp def test_router_add_interface_ipv6_subnet(self): self.skipTest('DHCPv6 not supported') @common_v3.with_disable_dhcp def test_router_add_interface_ipv6_subnet_without_gateway_ip(self): super(TestL3NatTestCase, self).test_router_add_interface_ipv6_subnet_without_gateway_ip() @common_v3.with_disable_dhcp def test_router_add_interface_multiple_ipv6_subnets_different_net(self): super(TestL3NatTestCase, self).\ test_router_add_interface_multiple_ipv6_subnets_different_net() @common_v3.with_disable_dhcp def test_create_floatingip_with_assoc_to_ipv6_subnet(self): super(TestL3NatTestCase, self).test_create_floatingip_with_assoc_to_ipv6_subnet() def test_router_add_iface_ipv6_ext_ra_subnet_returns_400(self): self.skipTest('DHCPv6 not supported') @common_v3.with_external_subnet def test_floatingip_list_with_sort(self): super(TestL3NatTestCase, self).test_floatingip_list_with_sort() @common_v3.with_external_subnet_once def test_floatingip_with_assoc_fails(self): super(TestL3NatTestCase, self).test_floatingip_with_assoc_fails() @common_v3.with_external_subnet_second_time def test_floatingip_update_same_fixed_ip_same_port(self): super(TestL3NatTestCase, self).test_floatingip_update_same_fixed_ip_same_port() @common_v3.with_external_subnet def test_floatingip_list_with_pagination_reverse(self): super(TestL3NatTestCase, self).test_floatingip_list_with_pagination_reverse() @common_v3.with_external_subnet_once def test_floatingip_association_on_unowned_router(self): super(TestL3NatTestCase, self).test_floatingip_association_on_unowned_router() @common_v3.with_external_network def test_delete_ext_net_with_disassociated_floating_ips(self): super(TestL3NatTestCase, self).test_delete_ext_net_with_disassociated_floating_ips() @common_v3.with_external_network def test_create_floatingip_with_subnet_and_invalid_fip_address(self): super( TestL3NatTestCase, self).test_create_floatingip_with_subnet_and_invalid_fip_address() @common_v3.with_external_subnet def test_create_floatingip_with_duplicated_specific_ip(self): super(TestL3NatTestCase, self).test_create_floatingip_with_duplicated_specific_ip() @common_v3.with_external_subnet def test_create_floatingip_with_subnet_id_non_admin(self): super(TestL3NatTestCase, self).test_create_floatingip_with_subnet_id_non_admin() @common_v3.with_external_subnet def test_floatingip_list_with_pagination(self): super(TestL3NatTestCase, self).test_floatingip_list_with_pagination() @common_v3.with_external_subnet def test_create_floatingips_native_quotas(self): super(TestL3NatTestCase, self).test_create_floatingips_native_quotas() @common_v3.with_external_network def test_create_floatingip_with_multisubnet_id(self): super(TestL3NatTestCase, self).test_create_floatingip_with_multisubnet_id() @common_v3.with_external_network def test_create_floatingip_with_subnet_id_and_fip_address(self): super(TestL3NatTestCase, self).test_create_floatingip_with_subnet_id_and_fip_address() @common_v3.with_external_subnet def test_create_floatingip_with_specific_ip(self): super(TestL3NatTestCase, self).test_create_floatingip_with_specific_ip() @common_v3.with_external_network def test_create_floatingip_ipv6_and_ipv4_network_creates_ipv4(self): super(TestL3NatTestCase, self).test_create_floatingip_ipv6_and_ipv4_network_creates_ipv4() @common_v3.with_external_subnet_once def test_create_floatingip_non_admin_context_agent_notification(self): super( TestL3NatTestCase, self).test_create_floatingip_non_admin_context_agent_notification() @common_v3.with_external_subnet def test_create_floatingip_no_ext_gateway_return_404(self): super(TestL3NatTestCase, self).test_create_floatingip_no_ext_gateway_return_404() @common_v3.with_external_subnet def test_create_floatingip_with_specific_ip_out_of_allocation(self): super(TestL3NatTestCase, self).test_create_floatingip_with_specific_ip_out_of_allocation() @common_v3.with_external_subnet_third_time def test_floatingip_update_different_router(self): super(TestL3NatTestCase, self).test_floatingip_update_different_router() def test_router_add_gateway_notifications(self): with self.router() as r,\ self._create_l3_ext_network() as ext_net,\ self.subnet(network=ext_net): with mock.patch.object(registry, 'publish') as publish: self._add_external_gateway_to_router( r['router']['id'], ext_net['network']['id']) expected = [mock.call( resources.ROUTER_GATEWAY, events.AFTER_CREATE, mock.ANY, payload=mock.ANY)] publish.assert_has_calls(expected) def test_create_l3_ext_network_with_default_tier0(self): self._test_create_l3_ext_network() def test_floatingip_update(self): super(TestL3NatTestCase, self).test_floatingip_update( expected_status=constants.FLOATINGIP_STATUS_DOWN) @common_v3.with_external_subnet_second_time def test_floatingip_with_invalid_create_port(self): self._test_floatingip_with_invalid_create_port(self._plugin_name) def test_network_update_external(self): # This plugin does not support updating the external flag of a network self.skipTest('not supported') def test_network_update_external_failure(self): # This plugin does not support updating the external flag of a network # This is tested with a different test self.skipTest('not supported') def test_router_add_gateway_dup_subnet1_returns_400(self): self.skipTest('not supported') def test_router_add_interface_dup_subnet2_returns_400(self): self.skipTest('not supported') def test_router_add_interface_ipv6_port_existing_network_returns_400(self): self.skipTest('multiple ipv6 subnets not supported') def test_routes_update_for_multiple_routers(self): self.skipTest('not supported') def test_floatingip_multi_external_one_internal(self): self.skipTest('not supported') def test_floatingip_same_external_and_internal(self): self.skipTest('not supported') def test_route_update_with_external_route(self): self.skipTest('not supported') def test_floatingip_update_subnet_gateway_disabled(self): self.skipTest('not supported') def test_router_add_interface_by_port_other_tenant_address_out_of_pool( self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_router_add_interface_by_port_other_tenant_address_in_pool(self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_router_add_interface_by_port_admin_address_out_of_pool(self): # multiple fixed ips per port are not supported self.skipTest('not supported') def test_router_delete_with_lb_service(self): self.lb_mock1.stop() self.lb_mock2.stop() # Create the LB object - here the delete callback is registered loadbalancer = loadbalancer_mgr.EdgeLoadBalancerManagerFromDict() oct_listener = octavia_listener.NSXOctaviaListenerEndpoint( loadbalancer=loadbalancer) with self.router() as router: with mock.patch('vmware_nsxlib.v3.load_balancer.Service.' 'get_router_lb_service'),\ mock.patch('vmware_nsx.db.db.get_nsx_router_id', return_value='1'),\ mock.patch.object( nsx_db, 'has_nsx_lbaas_loadbalancer_binding_by_router', return_value=True): self.assertRaises(nc_exc.CallbackFailure, self.plugin_instance.delete_router, context.get_admin_context(), router['router']['id']) # Unregister callback oct_listener._unsubscribe_router_delete_callback() self.lb_mock1.start() self.lb_mock2.start() def test_multiple_subnets_on_different_routers(self): with self.network() as network: with self.subnet(network=network) as s1,\ self.subnet(network=network, cidr='11.0.0.0/24') as s2,\ self.router() as r1,\ self.router() as r2: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self.assertRaises(n_exc.Conflict, self.plugin_instance.add_router_interface, context.get_admin_context(), r2['router']['id'], {'subnet_id': s2['subnet']['id']}) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) self._router_interface_action('add', r2['router']['id'], s2['subnet']['id'], None) self._router_interface_action('remove', r2['router']['id'], s2['subnet']['id'], None) def test_multiple_subnets_on_same_router(self): with self.network() as network: with self.subnet(network=network) as s1,\ self.subnet(network=network, cidr='11.0.0.0/24') as s2,\ self.router() as r1: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) self.assertRaises(n_exc.InvalidInput, self.plugin_instance.add_router_interface, context.get_admin_context(), r1['router']['id'], {'subnet_id': s2['subnet']['id']}) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) def test_router_remove_interface_inuse_return_409(self): with self.router() as r1,\ self._create_l3_ext_network() as ext_net,\ self.subnet(network=ext_net) as ext_subnet,\ self.subnet(cidr='11.0.0.0/24') as s1: self._router_interface_action( 'add', r1['router']['id'], s1['subnet']['id'], None) self._add_external_gateway_to_router( r1['router']['id'], ext_subnet['subnet']['network_id']) with self.port(subnet=s1,) as p: fip_res = self._create_floatingip( self.fmt, ext_subnet['subnet']['network_id'], subnet_id=ext_subnet['subnet']['id'], port_id=p['port']['id']) fip = self.deserialize(self.fmt, fip_res) self._router_interface_action( 'remove', r1['router']['id'], s1['subnet']['id'], None, expected_code=exc.HTTPConflict.code) self._delete('floatingips', fip['floatingip']['id']) self._remove_external_gateway_from_router( r1['router']['id'], ext_subnet['subnet']['network_id']) self._router_interface_action('remove', r1['router']['id'], s1['subnet']['id'], None) def test_router_update_on_external_port(self): with self.router() as r: with self._create_l3_ext_network() as ext_net,\ self.subnet(network=ext_net, cidr='10.0.1.0/24') as s: self._add_external_gateway_to_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) net_id = body['router']['external_gateway_info']['network_id'] self.assertEqual(net_id, s['subnet']['network_id']) port_res = self._list_ports( 'json', 200, s['subnet']['network_id'], tenant_id=r['router']['tenant_id'], device_owner=constants.DEVICE_OWNER_ROUTER_GW) port_list = self.deserialize('json', port_res) self.assertEqual(len(port_list['ports']), 1) routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}] self.assertRaises(n_exc.InvalidInput, self.plugin_instance.update_router, context.get_admin_context(), r['router']['id'], {'router': {'routes': routes}}) updates = {'admin_state_up': False} self.assertRaises(n_exc.InvalidInput, self.plugin_instance.update_router, context.get_admin_context(), r['router']['id'], {'router': updates}) self._remove_external_gateway_from_router( r['router']['id'], s['subnet']['network_id']) body = self._show('routers', r['router']['id']) gw_info = body['router']['external_gateway_info'] self.assertIsNone(gw_info) def test_router_on_vlan_net(self): providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 10} with mock.patch('vmware_nsxlib.v3.core_resources.NsxLibTransportZone.' 'get_transport_type', return_value='VLAN'): result = self._create_network(fmt='json', name='badvlan_net', admin_state_up=True, providernet_args=providernet_args, arg_list=( pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID)) vlan_network = self.deserialize('json', result) with self.router() as r1,\ self._create_l3_ext_network() as ext_net,\ self.subnet(network=ext_net) as ext_subnet,\ self.subnet(cidr='11.0.0.0/24', network=vlan_network) as s1: # adding a vlan interface with no GW should fail self._router_interface_action( 'add', r1['router']['id'], s1['subnet']['id'], None, expected_code=400) # adding GW self._add_external_gateway_to_router( r1['router']['id'], ext_subnet['subnet']['network_id']) # adding the vlan interface self._router_interface_action( 'add', r1['router']['id'], s1['subnet']['id'], None) # adding a floating ip with self.port(subnet=s1) as p: fip_res = self._create_floatingip( self.fmt, ext_subnet['subnet']['network_id'], subnet_id=ext_subnet['subnet']['id'], port_id=p['port']['id']) fip = self.deserialize(self.fmt, fip_res) self.assertEqual(p['port']['id'], fip['floatingip']['port_id']) def test_create_router_gateway_fails(self): self.skipTest('not supported') def test_router_remove_ipv6_subnet_from_interface(self): self.skipTest('not supported') def test_router_add_interface_multiple_ipv6_subnets_same_net(self): self.skipTest('not supported') def test_router_add_interface_multiple_ipv4_subnets(self): self.skipTest('not supported') def test_floatingip_update_to_same_port_id_twice(self): self.skipTest('Plugin changes floating port status') def _test_create_subnetpool(self, prefixes, expected=None, admin=False, **kwargs): keys = kwargs.copy() keys.setdefault('tenant_id', self._tenant_id) with self.subnetpool(prefixes, admin, **keys) as subnetpool: self._validate_resource(subnetpool, keys, 'subnetpool') if expected: self._compare_resource(subnetpool, expected, 'subnetpool') return subnetpool def _update_router_enable_snat(self, router_id, network_id, enable_snat): return self._update('routers', router_id, {'router': {'external_gateway_info': {'network_id': network_id, 'enable_snat': enable_snat}}}) def test_router_no_snat_with_different_address_scope(self): """Test that if the router has no snat, you cannot add an interface from a different address scope than the gateway. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self._create_l3_ext_network() as ext_net: as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/24') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) # create a regular network on another address scope with self.address_scope(name='as2') as addr_scope2, \ self.network() as net: as_id2 = addr_scope2['address_scope']['id'] subnet2 = netaddr.IPNetwork('20.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as_id2) subnetpool_id2 = subnetpool2['subnetpool']['id'] data = {'subnet': { 'network_id': net['network']['id'], 'subnetpool_id': subnetpool_id2, 'ip_version': 4, 'tenant_id': net['network']['tenant_id']}} req = self.new_create_request('subnets', data) int_subnet = self.deserialize( self.fmt, req.get_response(self.api)) # create a no snat router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['subnet']['network_id']) self._update_router_enable_snat( r['router']['id'], ext_subnet['subnet']['network_id'], False) # should fail adding the interface to the router err_code = exc.HTTPBadRequest.code self._router_interface_action('add', r['router']['id'], int_subnet['subnet']['id'], None, err_code) def test_router_no_snat_with_same_address_scope(self): """Test that if the router has no snat, you can add an interface from the same address scope as the gateway. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self._create_l3_ext_network() as ext_net: as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) # create a regular network on the same address scope with self.network() as net: data = {'subnet': { 'network_id': net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'tenant_id': net['network']['tenant_id']}} req = self.new_create_request('subnets', data) int_subnet = self.deserialize( self.fmt, req.get_response(self.api)) # create a no snat router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['subnet']['network_id']) self._update_router_enable_snat( r['router']['id'], ext_subnet['subnet']['network_id'], False) # should succeed adding the interface to the router self._router_interface_action('add', r['router']['id'], int_subnet['subnet']['id'], None) def _mock_add_snat_rule(self): return mock.patch("vmware_nsxlib.v3.router.RouterLib." "add_gw_snat_rule") def _mock_add_remove_service_router(self): return mock.patch("vmware_nsxlib.v3.core_resources." "NsxLibLogicalRouter.update") def _mock_del_snat_rule(self): return mock.patch("vmware_nsxlib.v3.router.RouterLib." "delete_gw_snat_rule_by_source") def _prepare_external_subnet_on_address_scope(self, ext_net, address_scope): as_id = address_scope['address_scope']['id'] subnet = netaddr.IPNetwork('10.10.10.0/21') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp1', min_prefixlen='24', address_scope_id=as_id) subnetpool_id = subnetpool['subnetpool']['id'] data = {'subnet': { 'network_id': ext_net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'enable_dhcp': False, 'tenant_id': ext_net['network']['tenant_id']}} req = self.new_create_request('subnets', data) ext_subnet = self.deserialize(self.fmt, req.get_response(self.api)) return ext_subnet['subnet'] def _create_subnet_and_assert_snat_rules(self, subnetpool_id, router_id, assert_snat_deleted=False, assert_snat_added=False): # create a regular network on the given subnet pool with self.network() as net: data = {'subnet': { 'network_id': net['network']['id'], 'subnetpool_id': subnetpool_id, 'ip_version': 4, 'tenant_id': net['network']['tenant_id']}} req = self.new_create_request('subnets', data) int_subnet = self.deserialize( self.fmt, req.get_response(self.api)) with self._mock_add_snat_rule() as add_nat,\ self._mock_del_snat_rule() as delete_nat: # Add the interface self._router_interface_action( 'add', router_id, int_subnet['subnet']['id'], None) if assert_snat_deleted: delete_nat.assert_called() else: delete_nat.assert_not_called() if assert_snat_added: add_nat.assert_called() else: add_nat.assert_not_called() def test_add_service_router_enable_snat(self): with self.address_scope(name='as1') as addr_scope, \ self._create_l3_ext_network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, addr_scope) # create a router with this gateway with self.router() as r, \ mock.patch("vmware_nsxlib.v3.router.RouterLib." "has_service_router", return_value=False),\ self._mock_add_remove_service_router() as change_sr: router_id = r['router']['id'] self._add_external_gateway_to_router( router_id, ext_subnet['network_id']) # Checking that router update is being called with # edge_cluster_uuid, for creating a service router change_sr.assert_any_call( mock.ANY, edge_cluster_id=NSX_EDGE_CLUSTER_UUID, enable_standby_relocation=True) def test_remove_service_router_disable_snat(self): with self.address_scope(name='as1') as addr_scope, \ self._create_l3_ext_network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, addr_scope) # create a router with this gateway, disable snat with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['network_id']) with mock.patch("vmware_nsxlib.v3.router.RouterLib." "has_service_router", return_value=True),\ self._mock_add_remove_service_router() as change_sr: self._update_router_enable_snat( r['router']['id'], ext_subnet['network_id'], False) # Checking that router update is being called # and setting edge_cluster_uuid to None, for service # router removal. change_sr.assert_called_once_with( mock.ANY, edge_cluster_id=None, enable_standby_relocation=False) def test_router_address_scope_snat_rules(self): """Test that if the router interface had the same address scope as the gateway - snat rule is not added. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self._create_l3_ext_network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, addr_scope) # create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['network_id']) # create a regular network on same address scope # and verify no snat change as_id = addr_scope['address_scope']['id'] subnet = netaddr.IPNetwork('30.10.10.0/24') subnetpool = self._test_create_subnetpool( [subnet.cidr], name='sp2', min_prefixlen='24', address_scope_id=as_id) as_id = addr_scope['address_scope']['id'] subnetpool_id = subnetpool['subnetpool']['id'] self._create_subnet_and_assert_snat_rules( subnetpool_id, r['router']['id']) # create a regular network on a different address scope # and verify snat rules are added with self.address_scope(name='as2') as addr_scope2: as2_id = addr_scope2['address_scope']['id'] subnet2 = netaddr.IPNetwork('20.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as2_id) subnetpool2_id = subnetpool2['subnetpool']['id'] self._create_subnet_and_assert_snat_rules( subnetpool2_id, r['router']['id'], assert_snat_added=True) def _test_router_address_scope_change(self, change_gw=False): """When subnetpool address scope changes, and router that was originally under same address scope, results having different address scopes, relevant snat rules are added. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self._create_l3_ext_network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, addr_scope) # create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['network_id']) # create a regular network on same address scope # and verify no snat change as_id = addr_scope['address_scope']['id'] subnet2 = netaddr.IPNetwork('40.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as_id) subnetpool2_id = subnetpool2['subnetpool']['id'] self._create_subnet_and_assert_snat_rules( subnetpool2_id, r['router']['id']) # change address scope of the first subnetpool with self.address_scope(name='as2') as addr_scope2,\ self._mock_add_snat_rule() as add_nat: as2_id = addr_scope2['address_scope']['id'] data = {'subnetpool': { 'address_scope_id': as2_id}} if change_gw: subnetpool_to_update = ext_subnet['subnetpool_id'] else: subnetpool_to_update = subnetpool2_id req = self.new_update_request('subnetpools', data, subnetpool_to_update) req.get_response(self.api) add_nat.assert_called_once() def test_router_address_scope_change(self): self._test_router_address_scope_change() def test_router_address_scope_gw_change(self): self._test_router_address_scope_change(change_gw=True) def _test_3leg_router_address_scope_change(self, change_gw=False, change_2gw=False): """Test address scope change scenarios with router that covers 3 address scopes """ # create an external network on one address scope with self.address_scope(name='as1') as as1, \ self.address_scope(name='as2') as as2, \ self.address_scope(name='as3') as as3, \ self._create_l3_ext_network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, as1) as1_id = as1['address_scope']['id'] # create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['network_id']) # create a regular network on address scope 2 # and verify snat change as2_id = as2['address_scope']['id'] subnet2 = netaddr.IPNetwork('20.10.10.0/24') subnetpool2 = self._test_create_subnetpool( [subnet2.cidr], name='sp2', min_prefixlen='24', address_scope_id=as2_id) subnetpool2_id = subnetpool2['subnetpool']['id'] self._create_subnet_and_assert_snat_rules( subnetpool2_id, r['router']['id'], assert_snat_added=True) # create a regular network on address scope 3 # verify no snat change as3_id = as3['address_scope']['id'] subnet3 = netaddr.IPNetwork('30.10.10.0/24') subnetpool3 = self._test_create_subnetpool( [subnet3.cidr], name='sp2', min_prefixlen='24', address_scope_id=as3_id) subnetpool3_id = subnetpool3['subnetpool']['id'] self._create_subnet_and_assert_snat_rules( subnetpool3_id, r['router']['id'], assert_snat_added=True) with self._mock_add_snat_rule() as add_nat, \ self._mock_del_snat_rule() as del_nat: if change_gw: # change address scope of GW subnet subnetpool_to_update = ext_subnet['subnetpool_id'] else: subnetpool_to_update = subnetpool2_id if change_2gw: # change subnet2 to be in GW address scope target_as = as1_id else: target_as = as3_id data = {'subnetpool': { 'address_scope_id': target_as}} req = self.new_update_request('subnetpools', data, subnetpool_to_update) req.get_response(self.api) if change_gw: # The test changed address scope of gw subnet. # Both previous rules should be deleted, # and one new rule for subnet2 should be added del_nat.assert_called() self.assertEqual(2, del_nat.call_count) add_nat.assert_called_once() else: if change_2gw: # The test changed address scope of subnet2 to be # same as GW address scope. # Snat rule for as2 will be deleted. No effect on as3 # rule. del_nat.assert_called_once() else: # The test changed address scope of subnet2 to # as3. Affected snat rule should be re-created. del_nat.assert_called_once() add_nat.assert_called_once() def test_3leg_router_address_scope_change(self): self._test_3leg_router_address_scope_change() def test_3leg_router_address_scope_change_to_gw(self): self._test_3leg_router_address_scope_change(change_2gw=True) def test_3leg_router_gw_address_scope_change(self): self._test_3leg_router_address_scope_change(change_gw=True) def test_subnetpool_router_address_scope_change_no_effect(self): """When all router interfaces are allocated from same subnetpool, changing address scope on this subnetpool should not affect snat rules. """ # create an external network on one address scope with self.address_scope(name='as1') as addr_scope, \ self._create_l3_ext_network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, addr_scope) # create a router with this gateway with self.router() as r: self._add_external_gateway_to_router( r['router']['id'], ext_subnet['network_id']) # create a regular network on same address scope # and verify no snat change self._create_subnet_and_assert_snat_rules( ext_subnet['subnetpool_id'], r['router']['id']) with self.address_scope(name='as2') as addr_scope2,\ self._mock_add_snat_rule() as add_nat,\ self._mock_del_snat_rule() as delete_nat: as2_id = addr_scope2['address_scope']['id'] # change address scope of the subnetpool data = {'subnetpool': { 'address_scope_id': as2_id}} req = self.new_update_request('subnetpools', data, ext_subnet['subnetpool_id']) req.get_response(self.api) add_nat.assert_not_called() delete_nat.assert_not_called() def test_router_admin_state(self): """It is not allowed to set the router admin-state to down""" with self.router() as r: self._update('routers', r['router']['id'], {'router': {'admin_state_up': False}}, expected_code=exc.HTTPBadRequest.code) def test_router_dhcp_relay_dhcp_enabled(self): """Verify that the relay service is added to the router interface""" self._enable_dhcp_relay() with self.network() as network: with mock.patch.object(self.plugin, 'validate_router_dhcp_relay'),\ self.subnet(network=network, enable_dhcp=True) as s1,\ self.router() as r1,\ mock.patch.object(self.plugin.nsxlib.logical_router_port, 'update') as mock_update_port: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) mock_update_port.assert_called_once_with( mock.ANY, relay_service_uuid=NSX_DHCP_RELAY_SRV, subnets=mock.ANY) def test_router_dhcp_relay_dhcp_disabled(self): """Verify that the relay service is not added to the router interface If the subnet do not have enabled dhcp """ self._enable_dhcp_relay() with self.network() as network: with mock.patch.object(self.plugin, 'validate_router_dhcp_relay'),\ self.subnet(network=network, enable_dhcp=False) as s1,\ self.router() as r1,\ mock.patch.object(self.plugin.nsxlib.logical_router_port, 'update') as mock_update_port: self._router_interface_action('add', r1['router']['id'], s1['subnet']['id'], None) mock_update_port.assert_called_once_with( mock.ANY, relay_service_uuid=None, subnets=mock.ANY) def test_router_dhcp_relay_no_ipam(self): """Verify that a router cannot be created with relay and no ipam""" # Add the relay service to the config and availability zones self._enable_dhcp_relay() self.assertRaises(n_exc.InvalidInput, self.plugin_instance.create_router, context.get_admin_context(), {'router': {'name': 'rtr'}}) def test_router_add_gateway_no_subnet_forbidden(self): with self.router() as r: with self._create_l3_ext_network() as n: self._add_external_gateway_to_router( r['router']['id'], n['network']['id'], expected_code=exc.HTTPBadRequest.code) def test_router_add_gateway_no_subnet(self): self.skipTest('No support for no subnet gateway set') @mock.patch.object(nsx_plugin.NsxV3Plugin, 'validate_availability_zones') def test_create_router_with_availability_zone(self, mock_validate_az): name = 'rtr-with-zone' zone = ['zone1'] mock_validate_az.return_value = None with self.router(name=name, availability_zone_hints=zone) as rtr: az_hints = rtr['router']['availability_zone_hints'] self.assertListEqual(zone, az_hints) def _test_route_update_illegal(self, destination): routes = [{'destination': destination, 'nexthop': '10.0.1.3'}] with self.router() as r: with self.subnet(cidr='10.0.1.0/24') as s: fixed_ip_data = [{'ip_address': '10.0.1.2'}] with self.port(subnet=s, fixed_ips=fixed_ip_data) as p: self._router_interface_action( 'add', r['router']['id'], None, p['port']['id']) self._update('routers', r['router']['id'], {'router': {'routes': routes}}, expected_code=400) def test_route_update_illegal(self): self._test_route_update_illegal('0.0.0.0/0') self._test_route_update_illegal('0.0.0.0/16') def test_update_router_distinct_edge_cluster(self): self.mock_get_edge_cluster.stop() edge_cluster = uuidutils.generate_uuid() mock.patch( "vmware_nsxlib.v3.core_resources.NsxLibEdgeCluster." "get_id_by_name_or_id", return_value=edge_cluster).start() cfg.CONF.set_override('edge_cluster', edge_cluster, 'nsx_v3') self._initialize_azs() with self.address_scope(name='as1') as addr_scope, \ self._create_l3_ext_network() as ext_net: ext_subnet = self._prepare_external_subnet_on_address_scope( ext_net, addr_scope) # create a router with this gateway with self.router() as r, \ mock.patch("vmware_nsxlib.v3.router.RouterLib." "has_service_router", return_value=False),\ self._mock_add_remove_service_router() as change_sr: router_id = r['router']['id'] self._add_external_gateway_to_router( router_id, ext_subnet['network_id']) change_sr.assert_any_call( mock.ANY, edge_cluster_id=edge_cluster, enable_standby_relocation=True) self.mock_get_edge_cluster.start() def test_router_add_interface_cidr_overlapped_with_gateway(self): with self.router() as r,\ self._create_l3_ext_network() as ext_net,\ self.subnet(cidr='10.0.1.0/24') as s1,\ self.subnet(network=ext_net, cidr='10.0.0.0/16', enable_dhcp=False) as s2: self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) res = self._router_interface_action( 'add', r['router']['id'], s1['subnet']['id'], None, expected_code=exc.HTTPBadRequest.code) self.assertIn('NeutronError', res) def test_router_add_gateway_overlapped_with_interface_cidr(self): with self.router() as r,\ self._create_l3_ext_network() as ext_net,\ self.subnet(cidr='10.0.1.0/24') as s1,\ self.subnet(network=ext_net, cidr='10.0.0.0/16', enable_dhcp=False) as s2: self._router_interface_action( 'add', r['router']['id'], s1['subnet']['id'], None) res = self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id'], expected_code=exc.HTTPBadRequest.code) self.assertIn('NeutronError', res) def test_router_add_interface_by_port_cidr_overlapped_with_gateway(self): with self.router() as r,\ self._create_l3_ext_network() as ext_net,\ self.subnet(cidr='10.0.1.0/24') as s1,\ self.subnet(network=ext_net, cidr='10.0.0.0/16', enable_dhcp=False) as s2,\ self.port(subnet=s1) as p: self._add_external_gateway_to_router( r['router']['id'], s2['subnet']['network_id']) res = self._router_interface_action( 'add', r['router']['id'], None, p['port']['id'], expected_code=exc.HTTPBadRequest.code) self.assertIn('NeutronError', res) def test_create_floatingip_invalid_fixed_ipv6_address_returns_400(self): self.skipTest('Failed because of illegal port id') def test_create_floatingip_with_router_interface_device_owner_fail(self): # This tests that an error is raised when trying to assign a router # interface port with floatingip. with self.subnet(cidr='30.0.0.0/24', gateway_ip=None) as private_sub: with self.port( subnet=private_sub, device_owner=constants.DEVICE_OWNER_ROUTER_INTF) as p: port_id = p['port']['id'] with self.router() as r: self._router_interface_action('add', r['router']['id'], None, port_id) with self.external_network() as public_net, self.subnet( network=public_net, cidr='12.0.0.0/24') as public_sub: self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) self._make_floatingip( self.fmt, public_sub['subnet']['network_id'], port_id=port_id, http_status=exc.HTTPBadRequest.code) def test_assign_floatingip_to_router_interface_device_owner_fail(self): # This tests that an error is raised when trying to assign a router # interface port with floatingip. with self.subnet(cidr='30.0.0.0/24', gateway_ip=None) as private_sub: with self.port( subnet=private_sub, device_owner=constants.DEVICE_OWNER_ROUTER_INTF) as p: port_id = p['port']['id'] with self.router() as r: self._router_interface_action('add', r['router']['id'], None, port_id) with self.external_network() as public_net, self.subnet( network=public_net, cidr='12.0.0.0/24') as public_sub: self._add_external_gateway_to_router( r['router']['id'], public_sub['subnet']['network_id']) fip = self._make_floatingip(self.fmt, public_sub[ 'subnet']['network_id']) self._update('floatingips', fip['floatingip'][ 'id'], {'floatingip': {'port_id': port_id}}, expected_code=exc.HTTPBadRequest.code) class ExtGwModeTestCase(test_ext_gw_mode.ExtGwModeIntTestCase, L3NatTest): def test_router_gateway_set_fail_after_port_create(self): self.skipTest("TBD") @common_v3.with_external_subnet def _test_router_update_ext_gwinfo(self, snat_input_value, snat_expected_value=False, expected_http_code=exc.HTTPOk.code): return super(ExtGwModeTestCase, self)._test_router_update_ext_gwinfo( snat_input_value, snat_expected_value=snat_expected_value, expected_http_code=expected_http_code) @common_v3.with_external_subnet def test_router_gateway_set_retry(self): super(ExtGwModeTestCase, self).test_router_gateway_set_retry() @common_v3.with_external_subnet def _test_router_create_show_ext_gwinfo(self, *args, **kwargs): return super(ExtGwModeTestCase, self)._test_router_create_show_ext_gwinfo(*args, **kwargs) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1622527 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsxlib/0000755000175000017500000000000000000000000023010 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.242255 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsxlib/mh/0000755000175000017500000000000000000000000023414 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsxlib/mh/__init__.py0000644000175000017500000000000000000000000025513 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsxlib/mh/base.py0000644000175000017500000000700600000000000024703 0ustar00coreycorey00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from neutron.tests import base from neutron.tests.unit.api.v2 import test_base from vmware_nsx.api_client import client from vmware_nsx.api_client import exception from vmware_nsx.api_client import version from vmware_nsx.common import config # noqa from vmware_nsx import nsx_cluster as cluster from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsxlib import fake _uuid = test_base._uuid class NsxlibTestCase(base.BaseTestCase): def setUp(self): self.fc = fake.FakeClient(vmware.STUBS_PATH) self.mock_nsxapi = mock.patch(vmware.NSXAPI_NAME, autospec=True) instance = self.mock_nsxapi.start() instance.return_value.login.return_value = "the_cookie" fake_version = getattr(self, 'fake_version', "3.0") instance.return_value.get_version.return_value = ( version.Version(fake_version)) instance.return_value.request.side_effect = self.fc.fake_request self.fake_cluster = cluster.NSXCluster( name='fake-cluster', nsx_controllers=['1.1.1.1:999'], default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar') self.fake_cluster.api_client = client.NsxApiClient( ('1.1.1.1', '999', True), self.fake_cluster.nsx_user, self.fake_cluster.nsx_password, self.fake_cluster.http_timeout, self.fake_cluster.retries, self.fake_cluster.redirects) super(NsxlibTestCase, self).setUp() self.addCleanup(self.fc.reset_all) def _build_tag_dict(self, tags): # This syntax is needed for python 2.6 compatibility return dict((t['scope'], t['tag']) for t in tags) class NsxlibNegativeBaseTestCase(base.BaseTestCase): def setUp(self): self.fc = fake.FakeClient(vmware.STUBS_PATH) self.mock_nsxapi = mock.patch(vmware.NSXAPI_NAME, autospec=True) instance = self.mock_nsxapi.start() instance.return_value.login.return_value = "the_cookie" # Choose 3.0, but the version is irrelevant for the aim of # these tests as calls are throwing up errors anyway fake_version = getattr(self, 'fake_version', "3.0") instance.return_value.get_version.return_value = ( version.Version(fake_version)) def _faulty_request(*args, **kwargs): raise exception.NsxApiException() instance.return_value.request.side_effect = _faulty_request self.fake_cluster = cluster.NSXCluster( name='fake-cluster', nsx_controllers=['1.1.1.1:999'], default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar') self.fake_cluster.api_client = client.NsxApiClient( ('1.1.1.1', '999', True), self.fake_cluster.nsx_user, self.fake_cluster.nsx_password, self.fake_cluster.http_timeout, self.fake_cluster.retries, self.fake_cluster.redirects) super(NsxlibNegativeBaseTestCase, self).setUp() self.addCleanup(self.fc.reset_all) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsxlib/mh/test_lsn.py0000644000175000017500000003453200000000000025630 0ustar00coreycorey00000000000000# Copyright 2013 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.tests import base from neutron_lib import exceptions from oslo_serialization import jsonutils import six from vmware_nsx.api_client import exception as api_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import utils from vmware_nsx.nsxlib.mh import lsn as lsnlib class LSNTestCase(base.BaseTestCase): def setUp(self): super(LSNTestCase, self).setUp() self.mock_request_p = mock.patch( 'vmware_nsx.nsxlib.mh.do_request') self.mock_request = self.mock_request_p.start() self.cluster = mock.Mock() self.cluster.default_service_cluster_uuid = 'foo' def test_service_cluster_None(self): self.mock_request.return_value = None expected = lsnlib.service_cluster_exists(None, None) self.assertFalse(expected) def test_service_cluster_found(self): self.mock_request.return_value = { "results": [ { "_href": "/ws.v1/service-cluster/foo_uuid", "display_name": "foo_name", "uuid": "foo_uuid", "tags": [], "_schema": "/ws.v1/schema/ServiceClusterConfig", "gateways": [] } ], "result_count": 1 } expected = lsnlib.service_cluster_exists(None, 'foo_uuid') self.assertTrue(expected) def test_service_cluster_not_found(self): self.mock_request.side_effect = exceptions.NotFound() expected = lsnlib.service_cluster_exists(None, 'foo_uuid') self.assertFalse(expected) def test_lsn_for_network_create(self): net_id = "foo_network_id" tags = utils.get_tags(n_network_id=net_id) obj = {"edge_cluster_uuid": "foo", "tags": tags} lsnlib.lsn_for_network_create(self.cluster, net_id) self.mock_request.assert_called_once_with( "POST", "/ws.v1/lservices-node", jsonutils.dumps(obj, sort_keys=True), cluster=self.cluster) def test_lsn_for_network_get(self): net_id = "foo_network_id" lsn_id = "foo_lsn_id" self.mock_request.return_value = { "results": [{"uuid": "foo_lsn_id"}], "result_count": 1 } result = lsnlib.lsn_for_network_get(self.cluster, net_id) self.assertEqual(lsn_id, result) self.mock_request.assert_called_once_with( "GET", ("/ws.v1/lservices-node?fields=uuid&tag=%s&" "tag_scope=n_network_id" % net_id), cluster=self.cluster) def test_lsn_for_network_get_none(self): net_id = "foo_network_id" self.mock_request.return_value = { "results": [{"uuid": "foo_lsn_id1"}, {"uuid": "foo_lsn_id2"}], "result_count": 2 } result = lsnlib.lsn_for_network_get(self.cluster, net_id) self.assertIsNone(result) def test_lsn_for_network_get_raise_not_found(self): net_id = "foo_network_id" self.mock_request.return_value = { "results": [], "result_count": 0 } self.assertRaises(exceptions.NotFound, lsnlib.lsn_for_network_get, self.cluster, net_id) def test_lsn_delete(self): lsn_id = "foo_id" lsnlib.lsn_delete(self.cluster, lsn_id) self.mock_request.assert_called_once_with( "DELETE", "/ws.v1/lservices-node/%s" % lsn_id, cluster=self.cluster) def _test_lsn_port_host_entries_update(self, lsn_type, hosts_data): lsn_id = 'foo_lsn_id' lsn_port_id = 'foo_lsn_port_id' lsnlib.lsn_port_host_entries_update( self.cluster, lsn_id, lsn_port_id, lsn_type, hosts_data) self.mock_request.assert_called_once_with( 'PUT', '/ws.v1/lservices-node/%s/lport/%s/%s' % (lsn_id, lsn_port_id, lsn_type), jsonutils.dumps({'hosts': hosts_data}, sort_keys=True), cluster=self.cluster) def test_lsn_port_dhcp_entries_update(self): hosts_data = [{"ip_address": "11.22.33.44", "mac_address": "aa:bb:cc:dd:ee:ff"}, {"ip_address": "44.33.22.11", "mac_address": "ff:ee:dd:cc:bb:aa"}] self._test_lsn_port_host_entries_update("dhcp", hosts_data) def test_lsn_port_metadata_entries_update(self): hosts_data = [{"ip_address": "11.22.33.44", "device_id": "foo_vm_uuid"}] self._test_lsn_port_host_entries_update("metadata-proxy", hosts_data) def test_lsn_port_create(self): port_data = { "ip_address": "1.2.3.0/24", "mac_address": "aa:bb:cc:dd:ee:ff", "subnet_id": "foo_subnet_id" } port_id = "foo_port_id" self.mock_request.return_value = {"uuid": port_id} lsn_id = "foo_lsn_id" result = lsnlib.lsn_port_create(self.cluster, lsn_id, port_data) self.assertEqual(result, port_id) tags = utils.get_tags(n_subnet_id=port_data["subnet_id"], n_mac_address=port_data["mac_address"]) port_obj = { "ip_address": port_data["ip_address"], "mac_address": port_data["mac_address"], "type": "LogicalServicesNodePortConfig", "tags": tags } self.mock_request.assert_called_once_with( "POST", "/ws.v1/lservices-node/%s/lport" % lsn_id, jsonutils.dumps(port_obj, sort_keys=True), cluster=self.cluster) def test_lsn_port_delete(self): lsn_id = "foo_lsn_id" lsn_port_id = "foo_port_id" lsnlib.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) self.mock_request.assert_called_once_with( "DELETE", "/ws.v1/lservices-node/%s/lport/%s" % (lsn_id, lsn_port_id), cluster=self.cluster) def test_lsn_port_get_with_filters(self): lsn_id = "foo_lsn_id" port_id = "foo_port_id" filters = {"tag": "foo_tag", "tag_scope": "foo_scope"} self.mock_request.return_value = { "results": [{"uuid": port_id}], "result_count": 1 } result = lsnlib._lsn_port_get(self.cluster, lsn_id, filters) self.assertEqual(result, port_id) self.mock_request.assert_called_once_with( "GET", ("/ws.v1/lservices-node/%s/lport?fields=uuid&tag=%s&" "tag_scope=%s" % (lsn_id, filters["tag"], filters["tag_scope"])), cluster=self.cluster) def test_lsn_port_get_with_filters_return_none(self): self.mock_request.return_value = { "results": [{"uuid": "foo1"}, {"uuid": "foo2"}], "result_count": 2 } result = lsnlib._lsn_port_get(self.cluster, "lsn_id", None) self.assertIsNone(result) def test_lsn_port_get_with_filters_raises_not_found(self): self.mock_request.return_value = {"results": [], "result_count": 0} self.assertRaises(exceptions.NotFound, lsnlib._lsn_port_get, self.cluster, "lsn_id", None) def test_lsn_port_info_get(self): self.mock_request.return_value = { "tags": [ {"scope": "n_mac_address", "tag": "fa:16:3e:27:fd:a0"}, {"scope": "n_subnet_id", "tag": "foo_subnet_id"}, ], "mac_address": "aa:bb:cc:dd:ee:ff", "ip_address": "0.0.0.0/0", "uuid": "foo_lsn_port_id" } result = lsnlib.lsn_port_info_get( self.cluster, 'foo_lsn_id', 'foo_lsn_port_id') self.mock_request.assert_called_once_with( 'GET', '/ws.v1/lservices-node/foo_lsn_id/lport/foo_lsn_port_id', cluster=self.cluster) self.assertIn('subnet_id', result) self.assertIn('mac_address', result) def test_lsn_port_info_get_raise_not_found(self): self.mock_request.side_effect = exceptions.NotFound self.assertRaises(exceptions.NotFound, lsnlib.lsn_port_info_get, self.cluster, mock.ANY, mock.ANY) def test_lsn_port_plug_network(self): lsn_id = "foo_lsn_id" lsn_port_id = "foo_lsn_port_id" lswitch_port_id = "foo_lswitch_port_id" lsnlib.lsn_port_plug_network( self.cluster, lsn_id, lsn_port_id, lswitch_port_id) self.mock_request.assert_called_once_with( "PUT", ("/ws.v1/lservices-node/%s/lport/%s/" "attachment") % (lsn_id, lsn_port_id), jsonutils.dumps({"peer_port_uuid": lswitch_port_id, "type": "PatchAttachment"}, sort_keys=True), cluster=self.cluster) def test_lsn_port_plug_network_raise_conflict(self): lsn_id = "foo_lsn_id" lsn_port_id = "foo_lsn_port_id" lswitch_port_id = "foo_lswitch_port_id" self.mock_request.side_effect = api_exc.Conflict self.assertRaises( nsx_exc.LsnConfigurationConflict, lsnlib.lsn_port_plug_network, self.cluster, lsn_id, lsn_port_id, lswitch_port_id) def _test_lsn_port_dhcp_configure( self, lsn_id, lsn_port_id, is_enabled, opts): lsnlib.lsn_port_dhcp_configure( self.cluster, lsn_id, lsn_port_id, is_enabled, opts) opt_array = [ {"name": key, "value": val} for key, val in six.iteritems(opts) ] self.mock_request.assert_has_calls([ mock.call("PUT", "/ws.v1/lservices-node/%s/dhcp" % lsn_id, jsonutils.dumps({"enabled": is_enabled}, sort_keys=True), cluster=self.cluster), mock.call("PUT", ("/ws.v1/lservices-node/%s/" "lport/%s/dhcp") % (lsn_id, lsn_port_id), jsonutils.dumps({"options": opt_array}, sort_keys=True), cluster=self.cluster) ]) def test_lsn_port_dhcp_configure_empty_opts(self): lsn_id = "foo_lsn_id" lsn_port_id = "foo_lsn_port_id" is_enabled = False opts = {} self._test_lsn_port_dhcp_configure( lsn_id, lsn_port_id, is_enabled, opts) def test_lsn_port_dhcp_configure_with_opts(self): lsn_id = "foo_lsn_id" lsn_port_id = "foo_lsn_port_id" is_enabled = True opts = {"opt1": "val1", "opt2": "val2"} self._test_lsn_port_dhcp_configure( lsn_id, lsn_port_id, is_enabled, opts) def _test_lsn_metadata_configure( self, lsn_id, is_enabled, opts, expected_opts): lsnlib.lsn_metadata_configure( self.cluster, lsn_id, is_enabled, opts) lsn_obj = {"enabled": is_enabled} lsn_obj.update(expected_opts) self.mock_request.assert_has_calls([ mock.call("PUT", "/ws.v1/lservices-node/%s/metadata-proxy" % lsn_id, jsonutils.dumps(lsn_obj, sort_keys=True), cluster=self.cluster), ]) def test_lsn_port_metadata_configure_empty_secret(self): lsn_id = "foo_lsn_id" is_enabled = True opts = { "metadata_server_ip": "1.2.3.4", "metadata_server_port": "8775" } expected_opts = { "metadata_server_ip": "1.2.3.4", "metadata_server_port": "8775", } self._test_lsn_metadata_configure( lsn_id, is_enabled, opts, expected_opts) def test_lsn_metadata_configure_with_secret(self): lsn_id = "foo_lsn_id" is_enabled = True opts = { "metadata_server_ip": "1.2.3.4", "metadata_server_port": "8775", "metadata_proxy_shared_secret": "foo_secret" } expected_opts = { "metadata_server_ip": "1.2.3.4", "metadata_server_port": "8775", "options": [{ "name": "metadata_proxy_shared_secret", "value": "foo_secret" }] } self._test_lsn_metadata_configure( lsn_id, is_enabled, opts, expected_opts) def _test_lsn_port_host_action( self, lsn_port_action_func, extra_action, action, host): lsn_id = "foo_lsn_id" lsn_port_id = "foo_lsn_port_id" lsn_port_action_func(self.cluster, lsn_id, lsn_port_id, host) self.mock_request.assert_called_once_with( "POST", ("/ws.v1/lservices-node/%s/lport/" "%s/%s?action=%s") % (lsn_id, lsn_port_id, extra_action, action), jsonutils.dumps(host, sort_keys=True), cluster=self.cluster) def test_lsn_port_dhcp_host_add(self): host = { "ip_address": "1.2.3.4", "mac_address": "aa:bb:cc:dd:ee:ff" } self._test_lsn_port_host_action( lsnlib.lsn_port_dhcp_host_add, "dhcp", "add_host", host) def test_lsn_port_dhcp_host_remove(self): host = { "ip_address": "1.2.3.4", "mac_address": "aa:bb:cc:dd:ee:ff" } self._test_lsn_port_host_action( lsnlib.lsn_port_dhcp_host_remove, "dhcp", "remove_host", host) def test_lsn_port_metadata_host_add(self): host = { "ip_address": "1.2.3.4", "instance_id": "foo_instance_id" } self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_add, "metadata-proxy", "add_host", host) def test_lsn_port_metadata_host_remove(self): host = { "ip_address": "1.2.3.4", "instance_id": "foo_instance_id" } self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_remove, "metadata-proxy", "remove_host", host) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/nsxlib/mh/test_switch.py0000644000175000017500000003475300000000000026342 0ustar00coreycorey00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import hashlib import mock from neutron.tests.unit.api.v2 import test_base from neutron_lib import constants from neutron_lib import exceptions from vmware_nsx.common import utils from vmware_nsx.nsxlib.mh import switch as switchlib from vmware_nsx.tests.unit.nsxlib.mh import base _uuid = test_base._uuid class LogicalSwitchesTestCase(base.NsxlibTestCase): def test_create_and_get_lswitches_single(self): tenant_id = 'pippo' transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, _uuid(), tenant_id, 'fake-switch', transport_zones_config) res_lswitch = switchlib.get_lswitches(self.fake_cluster, lswitch['uuid']) self.assertEqual(len(res_lswitch), 1) self.assertEqual(res_lswitch[0]['uuid'], lswitch['uuid']) def test_create_and_get_lswitches_single_name_exceeds_40_chars(self): tenant_id = 'pippo' transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, tenant_id, _uuid(), '*' * 50, transport_zones_config) res_lswitch = switchlib.get_lswitches(self.fake_cluster, lswitch['uuid']) self.assertEqual(len(res_lswitch), 1) self.assertEqual(res_lswitch[0]['uuid'], lswitch['uuid']) self.assertEqual(res_lswitch[0]['display_name'], '*' * 40) def test_create_and_get_lswitches_multiple(self): tenant_id = 'pippo' transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] network_id = _uuid() main_lswitch = switchlib.create_lswitch( self.fake_cluster, network_id, tenant_id, 'fake-switch', transport_zones_config, tags=[{'scope': 'multi_lswitch', 'tag': 'True'}]) # Create secondary lswitch second_lswitch = switchlib.create_lswitch( self.fake_cluster, network_id, tenant_id, 'fake-switch-2', transport_zones_config) res_lswitch = switchlib.get_lswitches(self.fake_cluster, network_id) self.assertEqual(len(res_lswitch), 2) switch_uuids = [ls['uuid'] for ls in res_lswitch] self.assertIn(main_lswitch['uuid'], switch_uuids) self.assertIn(second_lswitch['uuid'], switch_uuids) for ls in res_lswitch: if ls['uuid'] == main_lswitch['uuid']: main_ls = ls else: second_ls = ls main_ls_tags = self._build_tag_dict(main_ls['tags']) second_ls_tags = self._build_tag_dict(second_ls['tags']) self.assertIn('multi_lswitch', main_ls_tags) self.assertNotIn('multi_lswitch', second_ls_tags) self.assertIn('quantum_net_id', main_ls_tags) self.assertIn('quantum_net_id', second_ls_tags) self.assertEqual(main_ls_tags['quantum_net_id'], network_id) self.assertEqual(second_ls_tags['quantum_net_id'], network_id) def _test_update_lswitch(self, tenant_id, name, tags): transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, _uuid(), 'pippo', 'fake-switch', transport_zones_config) switchlib.update_lswitch(self.fake_cluster, lswitch['uuid'], name, tenant_id=tenant_id, tags=tags) res_lswitch = switchlib.get_lswitches(self.fake_cluster, lswitch['uuid']) self.assertEqual(len(res_lswitch), 1) self.assertEqual(res_lswitch[0]['display_name'], name) if not tags: # no need to validate tags return switch_tags = self._build_tag_dict(res_lswitch[0]['tags']) for tag in tags: self.assertIn(tag['scope'], switch_tags) self.assertEqual(tag['tag'], switch_tags[tag['scope']]) def test_update_lswitch(self): self._test_update_lswitch(None, 'new-name', [{'scope': 'new_tag', 'tag': 'xxx'}]) def test_update_lswitch_no_tags(self): self._test_update_lswitch(None, 'new-name', None) def test_update_lswitch_tenant_id(self): self._test_update_lswitch('whatever', 'new-name', None) def test_update_non_existing_lswitch_raises(self): self.assertRaises(exceptions.NetworkNotFound, switchlib.update_lswitch, self.fake_cluster, 'whatever', 'foo', 'bar') def test_delete_networks(self): transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, _uuid(), 'pippo', 'fake-switch', transport_zones_config) switchlib.delete_networks(self.fake_cluster, lswitch['uuid'], [lswitch['uuid']]) self.assertRaises(exceptions.NotFound, switchlib.get_lswitches, self.fake_cluster, lswitch['uuid']) def test_delete_non_existing_lswitch_raises(self): self.assertRaises(exceptions.NetworkNotFound, switchlib.delete_networks, self.fake_cluster, 'whatever', ['whatever']) class LogicalPortsTestCase(base.NsxlibTestCase): def _create_switch_and_port(self, tenant_id='pippo', neutron_port_id='whatever', name='name', device_id='device_id'): transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch(self.fake_cluster, _uuid(), tenant_id, 'fake-switch', transport_zones_config) lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'], tenant_id, neutron_port_id, name, device_id, True) return lswitch, lport def test_create_and_get_port(self): lswitch, lport = self._create_switch_and_port() lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) self.assertEqual(lport['uuid'], lport_res['uuid']) # Try again with relation lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid'], relations='LogicalPortStatus') self.assertEqual(lport['uuid'], lport_res['uuid']) def test_plug_interface(self): lswitch, lport = self._create_switch_and_port() switchlib.plug_vif_interface(self.fake_cluster, lswitch['uuid'], lport['uuid'], 'VifAttachment', 'fake') lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) self.assertEqual(lport['uuid'], lport_res['uuid']) def test_get_port_by_tag(self): lswitch, lport = self._create_switch_and_port() lport2 = switchlib.get_port_by_neutron_tag(self.fake_cluster, lswitch['uuid'], 'whatever') self.assertIsNotNone(lport2) self.assertEqual(lport['uuid'], lport2['uuid']) def test_get_port_by_tag_not_found_with_switch_id_raises_not_found(self): tenant_id = 'pippo' neutron_port_id = 'whatever' transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] lswitch = switchlib.create_lswitch( self.fake_cluster, tenant_id, _uuid(), 'fake-switch', transport_zones_config) self.assertRaises(exceptions.NotFound, switchlib.get_port_by_neutron_tag, self.fake_cluster, lswitch['uuid'], neutron_port_id) def test_get_port_by_tag_not_find_wildcard_lswitch_returns_none(self): tenant_id = 'pippo' neutron_port_id = 'whatever' transport_zones_config = [{'zone_uuid': _uuid(), 'transport_type': 'stt'}] switchlib.create_lswitch( self.fake_cluster, tenant_id, _uuid(), 'fake-switch', transport_zones_config) lport = switchlib.get_port_by_neutron_tag( self.fake_cluster, '*', neutron_port_id) self.assertIsNone(lport) def test_get_port_status(self): lswitch, lport = self._create_switch_and_port() status = switchlib.get_port_status( self.fake_cluster, lswitch['uuid'], lport['uuid']) self.assertEqual(constants.PORT_STATUS_ACTIVE, status) def test_get_port_status_non_existent_raises(self): self.assertRaises(exceptions.PortNotFoundOnNetwork, switchlib.get_port_status, self.fake_cluster, 'boo', 'boo') def test_update_port(self): lswitch, lport = self._create_switch_and_port() switchlib.update_port( self.fake_cluster, lswitch['uuid'], lport['uuid'], 'neutron_port_id', 'pippo2', 'new_name', 'device_id', False) lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) self.assertEqual(lport['uuid'], lport_res['uuid']) self.assertEqual('new_name', lport_res['display_name']) self.assertEqual('False', lport_res['admin_status_enabled']) port_tags = self._build_tag_dict(lport_res['tags']) self.assertIn('os_tid', port_tags) self.assertIn('q_port_id', port_tags) self.assertIn('vm_id', port_tags) def test_create_port_device_id_less_than_40_chars(self): lswitch, lport = self._create_switch_and_port() lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) port_tags = self._build_tag_dict(lport_res['tags']) self.assertEqual('device_id', port_tags['vm_id']) def test_create_port_device_id_more_than_40_chars(self): dev_id = "this_is_a_very_long_device_id_with_lots_of_characters" lswitch, lport = self._create_switch_and_port(device_id=dev_id) lport_res = switchlib.get_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) port_tags = self._build_tag_dict(lport_res['tags']) self.assertNotEqual(len(dev_id), len(port_tags['vm_id'])) def test_get_ports_with_obsolete_and_new_vm_id_tag(self): def obsolete(device_id, obfuscate=False): return hashlib.sha1(device_id.encode()).hexdigest() with mock.patch.object(utils, 'device_id_to_vm_id', new=obsolete): dev_id1 = "short-dev-id-1" _, lport1 = self._create_switch_and_port(device_id=dev_id1) dev_id2 = "short-dev-id-2" _, lport2 = self._create_switch_and_port(device_id=dev_id2) lports = switchlib.get_ports(self.fake_cluster, None, [dev_id1]) port_tags = self._build_tag_dict(lports['whatever']['tags']) self.assertNotEqual(dev_id1, port_tags['vm_id']) lports = switchlib.get_ports(self.fake_cluster, None, [dev_id2]) port_tags = self._build_tag_dict(lports['whatever']['tags']) self.assertEqual(dev_id2, port_tags['vm_id']) def test_update_non_existent_port_raises(self): self.assertRaises(exceptions.PortNotFoundOnNetwork, switchlib.update_port, self.fake_cluster, 'boo', 'boo', 'boo', 'boo', 'boo', 'boo', False) def test_delete_port(self): lswitch, lport = self._create_switch_and_port() switchlib.delete_port(self.fake_cluster, lswitch['uuid'], lport['uuid']) self.assertRaises(exceptions.PortNotFoundOnNetwork, switchlib.get_port, self.fake_cluster, lswitch['uuid'], lport['uuid']) def test_delete_non_existent_port_raises(self): lswitch = self._create_switch_and_port()[0] self.assertRaises(exceptions.PortNotFoundOnNetwork, switchlib.delete_port, self.fake_cluster, lswitch['uuid'], 'bad_port_uuid') def test_query_lswitch_ports(self): lswitch, lport = self._create_switch_and_port() switch_port_uuids = [ switchlib.create_lport( self.fake_cluster, lswitch['uuid'], 'pippo', 'qportid-%s' % k, 'port-%s' % k, 'deviceid-%s' % k, True)['uuid'] for k in range(2)] switch_port_uuids.append(lport['uuid']) ports = switchlib.query_lswitch_lports( self.fake_cluster, lswitch['uuid']) self.assertEqual(len(ports), 3) for res_port in ports: self.assertIn(res_port['uuid'], switch_port_uuids) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.242255 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/osc/0000755000175000017500000000000000000000000022275 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/osc/__init__.py0000644000175000017500000000000000000000000024374 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.242255 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/osc/v2/0000755000175000017500000000000000000000000022624 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/osc/v2/__init__.py0000644000175000017500000000000000000000000024723 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/osc/v2/test_port.py0000644000175000017500000002666600000000000025241 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import mock from openstackclient.tests.unit.network.v2 import fakes as network_fakes from openstackclient.tests.unit.network.v2 import test_port from openstackclient.tests.unit import utils as tests_utils from vmware_nsx.extensions import maclearning from vmware_nsx.extensions import providersecuritygroup from vmware_nsx.extensions import vnicindex from vmware_nsx.osc.v2 import port supported_extensions = (vnicindex.ALIAS, providersecuritygroup.ALIAS, maclearning.ALIAS) class TestCreatePort(test_port.TestCreatePort): def setUp(self): super(TestCreatePort, self).setUp() # Get the command object to test self.cmd = port.NsxCreatePort(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_create_with_arg_and_val(self, arg_name, arg_val, is_valid=True): self.network.create_port.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self._port.name, '--network', self._port.network_id, conv_name, str(arg_val) ] verifylist = [ ('name', self._port.name), ('network', self._port.network_id,), (arg_name, arg_val), ('enable', True), ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_port.assert_called_once_with(**{ 'admin_state_up': True, 'network_id': self._port.network_id, 'name': self._port.name, arg_name: arg_val, }) ref_columns, ref_data = self._get_common_cols_data(self._port) self.assertEqual(ref_columns, columns) self.assertItemEqual(ref_data, data) def _test_create_with_vnix_index(self, val, is_valid=True): self._test_create_with_arg_and_val('vnic_index', val, is_valid) def test_create_with_vnic_index(self): self._test_create_with_vnix_index(1) def test_create_with_illegal_vnic_index(self): self._test_create_with_vnix_index('illegal', is_valid=False) def test_create_with_provider_security_group(self): # create a port with 1 provider security group secgroup = network_fakes.FakeSecurityGroup.create_one_security_group() self.network.find_security_group = mock.Mock(return_value=secgroup) arglist = [ '--network', self._port.network_id, '--provider-security-group', secgroup.id, 'test-port', ] verifylist = [ ('network', self._port.network_id,), ('enable', True), ('provider_security_groups', [secgroup.id]), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_port.assert_called_once_with(**{ 'admin_state_up': True, 'network_id': self._port.network_id, 'provider_security_groups': [secgroup.id], 'name': 'test-port', }) ref_columns, ref_data = self._get_common_cols_data(self._port) self.assertEqual(ref_columns, columns) self.assertItemEqual(ref_data, data) def test_create_with_provider_security_groups(self): # create a port with few provider security groups sg_1 = network_fakes.FakeSecurityGroup.create_one_security_group() sg_2 = network_fakes.FakeSecurityGroup.create_one_security_group() self.network.find_security_group = mock.Mock(side_effect=[sg_1, sg_2]) arglist = [ '--network', self._port.network_id, '--provider-security-group', sg_1.id, '--provider-security-group', sg_2.id, 'test-port', ] verifylist = [ ('network', self._port.network_id,), ('enable', True), ('provider_security_groups', [sg_1.id, sg_2.id]), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_port.assert_called_once_with(**{ 'admin_state_up': True, 'network_id': self._port.network_id, 'provider_security_groups': [sg_1.id, sg_2.id], 'name': 'test-port', }) ref_columns, ref_data = self._get_common_cols_data(self._port) self.assertEqual(ref_columns, columns) self.assertItemEqual(ref_data, data) def test_create_with_provider_security_group_by_name(self): # create a port with 1 provider security group secgroup = network_fakes.FakeSecurityGroup.create_one_security_group() self.network.find_security_group = mock.Mock(return_value=secgroup) arglist = [ '--network', self._port.network_id, '--provider-security-group', secgroup.name, 'test-port', ] verifylist = [ ('network', self._port.network_id,), ('enable', True), ('provider_security_groups', [secgroup.name]), ('name', 'test-port'), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_port.assert_called_once_with(**{ 'admin_state_up': True, 'network_id': self._port.network_id, 'provider_security_groups': [secgroup.id], 'name': 'test-port', }) ref_columns, ref_data = self._get_common_cols_data(self._port) self.assertEqual(ref_columns, columns) self.assertItemEqual(ref_data, data) def _test_create_with_flag_arg( self, arg_name, validate_name, validate_val): self.network.create_port.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self._port.name, '--network', self._port.network_id, conv_name ] verifylist = [ ('name', self._port.name), ('network', self._port.network_id,), (arg_name, True), ('enable', True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_port.assert_called_once_with(**{ 'admin_state_up': True, 'network_id': self._port.network_id, 'name': self._port.name, validate_name: validate_val, }) ref_columns, ref_data = self._get_common_cols_data(self._port) self.assertEqual(ref_columns, columns) self.assertItemEqual(ref_data, data) def test_create_with_mac_learning(self): self._test_create_with_flag_arg( 'enable_mac_learning', 'mac_learning_enabled', True) def test_create_with_no_mac_learning(self): self._test_create_with_flag_arg( 'disable_mac_learning', 'mac_learning_enabled', False) class TestSetPort(test_port.TestSetPort): def setUp(self): super(TestSetPort, self).setUp() # Get the command object to test self.cmd = port.NsxSetPort(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_set_with_arg_and_val(self, arg_name, arg_val, is_valid=True): self.network.update_port.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self._port.name, conv_name, str(arg_val) ] verifylist = [ ('port', self._port.name), (arg_name, arg_val) ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = {arg_name: arg_val} self.network.update_port.assert_called_once_with( self._port, **attrs) self.assertIsNone(result) def _test_set_Vnic_index(self, val, is_valid=True): self._test_set_with_arg_and_val('vnic_index', val, is_valid) def test_set_vnic_index(self): self._test_set_Vnic_index(1) def test_set_illegal_vnic_index(self): # check illegal index self._test_set_Vnic_index('illegal', is_valid=False) def test_set_provider_security_group(self): # It is not allowed to change the provider security groups sg = network_fakes.FakeSecurityGroup.create_one_security_group() self.network.find_security_group = mock.Mock(return_value=sg) arglist = [ '--provider-security-group', sg.id, self._port.name, ] verifylist = [ ('provider_security_groups', [sg.id]), ('port', self._port.name), ] self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) def _test_set_with_flag_arg(self, arg_name, validate_name, validate_val, is_valid=True): self.network.update_port.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self._port.name, conv_name ] verifylist = [ ('port', self._port.name), (arg_name, True) ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = {validate_name: validate_val} self.network.update_port.assert_called_once_with( self._port, **attrs) self.assertIsNone(result) def test_set_with_mac_learning(self): self._test_set_with_flag_arg( 'enable_mac_learning', 'mac_learning_enabled', True) def test_set_with_no_mac_learning(self): self._test_set_with_flag_arg( 'disable_mac_learning', 'mac_learning_enabled', False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/osc/v2/test_router.py0000644000175000017500000001241600000000000025561 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import mock from openstackclient.tests.unit.network.v2 import test_router from openstackclient.tests.unit import utils as tests_utils from vmware_nsx.extensions import routersize from vmware_nsx.extensions import routertype from vmware_nsx.osc.v2 import router supported_extensions = (routersize.ALIAS, routertype.ALIAS) class TestCreateRouter(test_router.TestCreateRouter): def setUp(self): super(TestCreateRouter, self).setUp() # Get the command object to test self.cmd = router.NsxCreateRouter(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_create_with_arg_and_val(self, arg_name, arg_val, is_valid=True): self.network.create_router.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self.new_router.name, conv_name, arg_val ] verifylist = [ ('name', self.new_router.name), (arg_name, arg_val), ('enable', True), ('distributed', False), ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = (self.cmd.take_action(parsed_args)) self.network.create_router.assert_called_once_with(**{ 'admin_state_up': True, 'name': self.new_router.name, arg_name: arg_val, }) self.assertEqual(self.columns, columns) self.assertItemEqual(self.data, data) def _test_create_with_size(self, size, is_valid=True): self._test_create_with_arg_and_val('router_size', size, is_valid) def test_create_with_sizes(self): # check all router types for rtr_size in routersize.VALID_EDGE_SIZES: self._test_create_with_size(rtr_size) def test_create_with_illegal_size(self): self._test_create_with_size('illegal', is_valid=False) def _test_create_with_type(self, rtr_type, is_valid=True): self._test_create_with_arg_and_val('router_type', rtr_type, is_valid) def test_create_with_types(self): # check all router types for rtr_type in routertype.VALID_TYPES: self._test_create_with_type(rtr_type) def test_create_with_illegal_type(self): self._test_create_with_type('illegal', is_valid=False) class TestSetRouter(test_router.TestSetRouter): def setUp(self): super(TestSetRouter, self).setUp() # Get the command object to test self.cmd = router.NsxSetRouter(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_set_with_arg_and_val(self, arg_name, arg_val, is_valid=True): self.network.update_router.reset_mock() # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ self._router.name, conv_name, arg_val ] verifylist = [ ('router', self._router.name), (arg_name, arg_val) ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = {arg_name: arg_val} self.network.update_router.assert_called_once_with( self._router, **attrs) self.assertIsNone(result) def _test_set_size(self, size, is_valid=True): self._test_set_with_arg_and_val('router_size', size, is_valid) def test_set_sizes(self): # check all router types for rtr_size in routersize.VALID_EDGE_SIZES: self._test_set_size(rtr_size) def test_set_illegal_size(self): # check illegal size self._test_set_size('illegal', is_valid=False) def _test_set_type(self, rtr_type, is_valid=True): self._test_set_with_arg_and_val('router_type', rtr_type, is_valid) def test_set_types(self): # check all router types for rtr_type in routertype.VALID_TYPES: self._test_set_type(rtr_type) def test_set_illegal_type(self): self._test_set_type('illegal', is_valid=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/osc/v2/test_security_group.py0000644000175000017500000001562200000000000027326 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import mock from openstackclient.tests.unit.network.v2 import ( test_security_group_network as test_security_group) from openstackclient.tests.unit import utils as tests_utils from vmware_nsx.extensions import providersecuritygroup from vmware_nsx.extensions import securitygrouplogging from vmware_nsx.extensions import securitygrouppolicy from vmware_nsx.osc.v2 import security_group supported_extensions = (securitygrouplogging.ALIAS, providersecuritygroup.ALIAS, securitygrouppolicy.ALIAS) class TestCreateSecurityGroup( test_security_group.TestCreateSecurityGroupNetwork): def setUp(self): super(TestCreateSecurityGroup, self).setUp() # Get the command object to test self.cmd = security_group.NsxCreateSecurityGroup( self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_create_with_flag_arg( self, arg_name, validate_name, validate_val): self.network.create_security_group = mock.Mock( return_value=self._security_group) # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ '--description', self._security_group.description, conv_name, self._security_group.name ] verifylist = [ ('description', self._security_group.description), ('name', self._security_group.name), (arg_name, True), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.network.create_security_group.assert_called_once_with(**{ 'description': self._security_group.description, 'name': self._security_group.name, validate_name: validate_val, }) self.assertEqual(self.columns, columns) self.assertItemEqual(self.data, data) def test_create_with_logging(self): self._test_create_with_flag_arg('logging', 'logging', True) def test_create_with_no_logging(self): self._test_create_with_flag_arg('no_logging', 'logging', False) def test_create_with_provider(self): self._test_create_with_flag_arg('provider', 'provider', True) def _test_create_with_arg_val(self, arg_name, arg_val): self.network.create_security_group = mock.Mock( return_value=self._security_group) # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ '--description', self._security_group.description, conv_name, str(arg_val), self._security_group.name ] verifylist = [ ('description', self._security_group.description), ('name', self._security_group.name), (arg_name, arg_val), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.network.create_security_group.assert_called_once_with(**{ 'description': self._security_group.description, 'name': self._security_group.name, arg_name: arg_val, }) self.assertEqual(self.columns, columns) self.assertItemEqual(self.data, data) def test_create_with_policy(self): self._test_create_with_arg_val('policy', 'policy-1') class TestSetSecurityGroup( test_security_group.TestSetSecurityGroupNetwork): def setUp(self): super(TestSetSecurityGroup, self).setUp() # Get the command object to test self.cmd = security_group.NsxSetSecurityGroup( self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_set_with_flag_arg(self, arg_name, validate_name, validate_val, is_valid=True): self.network.create_security_group = mock.Mock( return_value=self._security_group) # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ conv_name, self._security_group.name ] verifylist = [ (arg_name, True), ('group', self._security_group.name), ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.network.update_security_group.assert_called_once_with( self._security_group, **{validate_name: validate_val}) self.assertIsNone(result) def test_set_with_logging(self): self._test_set_with_flag_arg('logging', 'logging', True) def test_set_with_no_logging(self): self._test_set_with_flag_arg('no_logging', 'logging', False) def test_set_with_provider(self): # modifying the provider flag should fail self._test_set_with_flag_arg('provider', 'provider', True, is_valid=False) def _test_set_with_arg_val(self, arg_name, arg_val): self.network.create_security_group = mock.Mock( return_value=self._security_group) # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ conv_name, str(arg_val), self._security_group.name ] verifylist = [ (arg_name, arg_val), ('group', self._security_group.name), ] parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) self.network.update_security_group.assert_called_once_with( self._security_group, **{arg_name: arg_val}) self.assertIsNone(result) def test_set_with_policyr(self): self._test_set_with_arg_val('policy', 'policy-1') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/osc/v2/test_subnet.py0000644000175000017500000001244400000000000025542 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import mock from openstackclient.tests.unit.network.v2 import test_subnet from openstackclient.tests.unit import utils as tests_utils from vmware_nsx.extensions import dhcp_mtu from vmware_nsx.extensions import dns_search_domain from vmware_nsx.osc.v2 import subnet supported_extensions = (dhcp_mtu.ALIAS, dns_search_domain.ALIAS) class TestCreateSubnet(test_subnet.TestCreateSubnet): def setUp(self): super(TestCreateSubnet, self).setUp() # Get the command object to test self.cmd = subnet.NsxCreateSubnet(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_create_with_arg_and_val(self, arg_name, arg_val, is_valid=True): self.network.create_subnet = mock.Mock(return_value=self._subnet) # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ "--subnet-range", self._subnet.cidr, "--network", self._subnet.network_id, conv_name, str(arg_val), self._subnet.name ] verifylist = [ ('name', self._subnet.name), ('subnet_range', self._subnet.cidr), ('network', self._subnet.network_id), ('ip_version', self._subnet.ip_version), ('gateway', 'auto'), (arg_name, arg_val), ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) self.network.create_subnet.assert_called_once_with(**{ 'cidr': mock.ANY, 'ip_version': mock.ANY, 'network_id': mock.ANY, 'name': self._subnet.name, arg_name: arg_val, }) self.assertEqual(self.columns, columns) self.assertItemEqual(self.data, data) def _test_create_with_tag(self, add_tags=True): self.skipTest('Unblock gate') def _test_create_with_mtu(self, mtu, is_valid=True): self._test_create_with_arg_and_val('dhcp_mtu', mtu, is_valid) def test_create_with_mtu(self): # check a valid value self._test_create_with_mtu(1500) def test_create_with_illegal_mtu(self): self._test_create_with_mtu('illegal', is_valid=False) def _test_create_with_search_domain(self, val, is_valid=True): self._test_create_with_arg_and_val('dns_search_domain', val, is_valid) def test_create_with_search_domain(self): # check a valid value self._test_create_with_search_domain('www.aaa.com') # Cannot check illegal search domain - validation is on the server side class TestSetSubnet(test_subnet.TestSetSubnet): def setUp(self): super(TestSetSubnet, self).setUp() # Get the command object to test self.cmd = subnet.NsxSetSubnet(self.app, self.namespace) # mock the relevant extensions get_ext = mock.patch('vmware_nsx.osc.v2.utils.get_extensions').start() get_ext.return_value = supported_extensions def _test_set_with_arg_and_val(self, arg_name, arg_val, is_valid=True): # add '--' to the arg name and change '_' to '-' conv_name = '--' + re.sub('_', '-', arg_name) arglist = [ conv_name, str(arg_val), self._subnet.name, ] verifylist = [ (arg_name, arg_val), ('subnet', self._subnet.name), ] if not is_valid: self.assertRaises(tests_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) return parsed_args = self.check_parser(self.cmd, arglist, verifylist) result = self.cmd.take_action(parsed_args) attrs = { arg_name: arg_val } self.network.update_subnet.assert_called_with(self._subnet, **attrs) self.assertIsNone(result) def _test_set_mtu(self, mtu, is_valid=True): self._test_set_with_arg_and_val('dhcp_mtu', mtu, is_valid) def test_set_mtu(self): # check a valid value self._test_set_mtu(1500) def test_set_illegal_mtu(self): self._test_set_mtu('illegal', is_valid=False) def _test_set_with_search_domain(self, val, is_valid=True): self._test_set_with_arg_and_val('dns_search_domain', val, is_valid) def test_set_with_search_domain(self): # check a valid value self._test_set_with_search_domain('www.aaa.com') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.242255 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/0000755000175000017500000000000000000000000023334 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/__init__.py0000644000175000017500000000000000000000000025433 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.242255 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/dynamic_routing/0000755000175000017500000000000000000000000026527 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/dynamic_routing/__init__.py0000644000175000017500000000000000000000000030626 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/dynamic_routing/test_nsxv_bgp_driver.py0000644000175000017500000003224200000000000033344 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron.api import extensions from neutron_dynamic_routing.db import bgp_db # noqa from neutron_dynamic_routing import extensions as dr_extensions from neutron_dynamic_routing.extensions import bgp as ext_bgp from neutron_dynamic_routing.tests.unit.db import test_bgp_db from neutron_lib.api.definitions import address_scope from neutron_lib import context from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from vmware_nsx.common import exceptions as exc from vmware_nsx.db import nsxv_db from vmware_nsx.plugins.nsx_v.drivers import ( shared_router_driver as router_driver) from vmware_nsx.services.dynamic_routing import bgp_plugin from vmware_nsx.services.dynamic_routing.nsx_v import driver as bgp_driver from vmware_nsx.tests.unit.nsx_v import test_plugin BGP_PLUGIN = 'vmware_nsx.services.dynamic_routing.bgp_plugin.NSXvBgpPlugin' class TestNSXvBgpPlugin(test_plugin.NsxVPluginV2TestCase, test_bgp_db.BgpTests): def setUp(self): extensions.append_api_extensions_path(dr_extensions.__path__) service_plugins = {ext_bgp.BGP_EXT_ALIAS: BGP_PLUGIN} super(TestNSXvBgpPlugin, self).setUp(service_plugins=service_plugins) self.bgp_plugin = bgp_plugin.NSXvBgpPlugin() self.nsxv_driver = self.bgp_plugin.drivers['nsx-v'] self.nsxv_driver._validate_gateway_network = mock.Mock() self.nsxv_driver._validate_bgp_configuration_on_peer_esg = ( mock.Mock()) self.plugin = directory.get_plugin() self.l3plugin = self.plugin self.plugin.init_is_complete = True self.context = context.get_admin_context() self.project_id = 'dummy_project' @contextlib.contextmanager def gw_network(self, external=True, **kwargs): with super(TestNSXvBgpPlugin, self).gw_network(external=external, **kwargs) as gw_net: if external: gw_net['network']['router:external'] = True gw_net['network'][address_scope.IPV4_ADDRESS_SCOPE] = True yield gw_net @contextlib.contextmanager def subnet(self, network=None, **kwargs): if network and network['network'].get('router:external'): kwargs['gateway_ip'] = None kwargs['enable_dhcp'] = False with super(TestNSXvBgpPlugin, self).subnet(network=network, **kwargs) as sub: yield sub @contextlib.contextmanager def router(self, **kwargs): if 'external_gateway_info' in kwargs: kwargs['external_gateway_info']['enable_snat'] = False with super(TestNSXvBgpPlugin, self).router(**kwargs) as r: yield r @contextlib.contextmanager def esg_bgp_peer(self, esg_id): data = {'name': '', 'peer_ip': '192.168.1.10', 'remote_as': '65000', 'esg_id': esg_id, 'auth_type': 'none', 'password': '', 'tenant_id': self.project_id} bgp_peer = self.bgp_plugin.create_bgp_peer(self.context, {'bgp_peer': data}) yield bgp_peer self.bgp_plugin.delete_bgp_peer(self.context, bgp_peer['id']) @contextlib.contextmanager def bgp_speaker(self, ip_version, local_as, name='my-speaker', advertise_fip_host_routes=True, advertise_tenant_networks=True, networks=None, peers=None): data = {'ip_version': ip_version, test_bgp_db.ADVERTISE_FIPS_KEY: advertise_fip_host_routes, 'advertise_tenant_networks': advertise_tenant_networks, 'local_as': local_as, 'name': name, 'tenant_id': self.project_id} bgp_speaker = self.bgp_plugin.create_bgp_speaker(self.context, {'bgp_speaker': data}) bgp_speaker_id = bgp_speaker['id'] if networks: for network_id in networks: self.bgp_plugin.add_gateway_network( self.context, bgp_speaker_id, {'network_id': network_id}) if peers: for peer_id in peers: self.bgp_plugin.add_bgp_peer(self.context, bgp_speaker_id, {'bgp_peer_id': peer_id}) yield self.bgp_plugin.get_bgp_speaker(self.context, bgp_speaker_id) def test_get_external_networks_for_port_same_address_scope_v6(self): self.skipTest("IPv6 not supported by this plugin.") def test_get_external_networks_for_port_different_address_scope_v6(self): self.skipTest("IPv6 not supported by this plugin.") def test__get_dvr_fixed_ip_routes_by_bgp_speaker_same_scope(self): self.skipTest("DVR specific.") def test_get_external_networks_for_port_different_address_scope_v4(self): self.skipTest("DVR specific.") def test__get_dvr_fixed_ip_routes_by_bgp_speaker_different_scope(self): self.skipTest("DVR specific.") def test__get_dvr_fixed_ip_routes_by_bgp_speaker_no_scope(self): self.skipTest("DVR specific.") def test_create_v6_bgp_speaker(self): fake_bgp_speaker = { "bgp_speaker": { "ip_version": 6, "local_as": "1000", "name": "bgp-speaker", "tenant_id": self.project_id } } self.assertRaises(n_exc.InvalidInput, self.bgp_plugin.create_bgp_speaker, self.context, fake_bgp_speaker) def test_create_v6_bgp_peer(self): fake_bgp_peer = { "bgp_peer": { "auth_type": "none", "remote_as": "1000", "name": "bgp-peer", "peer_ip": "fc00::/7", "tenant_id": self.project_id } } self.assertRaises(n_exc.InvalidInput, self.bgp_plugin.create_bgp_peer, self.context, fake_bgp_peer) def test_bgp_peer_esg_id(self): edge_id = 'edge-123' with self.esg_bgp_peer(esg_id='edge-123') as esg_peer: self.assertEqual(edge_id, esg_peer['esg_id']) peer_id = esg_peer['id'] bgp_peer = self.bgp_plugin.get_bgp_peer(self.context, peer_id) self.assertEqual(edge_id, bgp_peer['esg_id']) def test_create_bgp_peer_md5_auth_no_password(self): bgp_peer = {'bgp_peer': {'auth_type': 'md5', 'password': None, 'peer_ip': '10.0.0.3', 'tenant_id': self.project_id}} self.assertRaises(ext_bgp.InvalidBgpPeerMd5Authentication, self.bgp_plugin.create_bgp_peer, self.context, bgp_peer) def test_add_non_external_gateway_network(self): self.nsxv_driver._validate_gateway_network = ( bgp_driver.NSXvBgpDriver( self.bgp_plugin)._validate_gateway_network) with self.gw_network(external=False) as net,\ self.subnetpool_with_address_scope(4, prefixes=['8.0.0.0/8']) as sp: network_id = net['network']['id'] with self.bgp_speaker(sp['ip_version'], 1234) as speaker: self.assertRaises(exc.NsxBgpNetworkNotExternal, self.bgp_plugin.add_gateway_network, self.context, speaker['id'], {'network_id': network_id}) @mock.patch.object(nsxv_db, 'get_nsxv_bgp_speaker_binding', return_value={'bgp_identifier': '10.0.0.11'}) def test_shared_router_on_gateway_clear(self, m1): with self.gw_network(external=True) as net,\ self.subnetpool_with_address_scope(4, prefixes=['10.0.0.0/24']) as sp: with self.subnet(network=net, subnetpool_id=sp['id']) as s1,\ self.bgp_speaker(sp['ip_version'], 1234, networks=[net['network']['id']]): subnet_id = s1['subnet']['id'] gw_info1 = {'network_id': net['network']['id'], 'external_fixed_ips': [{'ip_address': '10.0.0.11', 'subnet_id': subnet_id}]} gw_info2 = {'network_id': net['network']['id'], 'external_fixed_ips': [{'ip_address': '10.0.0.12', 'subnet_id': subnet_id}]} router_obj = router_driver.RouterSharedDriver(self.plugin) with mock.patch.object(self.plugin, '_find_router_driver', return_value=router_obj): with self.router(external_gateway_info=gw_info1) as rtr1,\ self.router(external_gateway_info=gw_info2) as rtr2,\ mock.patch.object( self.nsxv_driver, '_get_router_edge_info', return_value=('edge-1', False)),\ mock.patch.object( self.plugin.edge_manager, 'get_routers_on_same_edge', return_value=[rtr1['id'], rtr2['id']]),\ mock.patch.object( self.nsxv_driver, '_update_edge_bgp_identifier') as up_bgp: gw_clear = {u'router': {u'external_gateway_info': {}}} self.plugin.update_router(self.context, rtr1['id'], gw_clear) up_bgp.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, '10.0.0.12') def test__bgp_speakers_for_gateway_network_by_ip_version(self): # REVISIT(roeyc): Base class test use ipv6 which is not supported. pass def test__bgp_speakers_for_gateway_network_by_ip_version_no_binding(self): # REVISIT(roeyc): Base class test use ipv6 which is not supported. pass def test__tenant_prefixes_by_router_no_gateway_port(self): # REVISIT(roeyc): Base class test use ipv6 which is not supported. pass def test_all_routes_by_bgp_speaker_different_tenant_address_scope(self): # REVISIT(roeyc): Base class test use ipv6 which is not supported. pass def test__get_address_scope_ids_for_bgp_speaker(self): pass def test__get_dvr_fip_host_routes_by_binding(self): pass def test__get_dvr_fip_host_routes_by_router(self): pass def test__get_fip_next_hop_dvr(self): pass def test__get_fip_next_hop_legacy(self): pass def test_get_routes_by_bgp_speaker_id_with_fip_dvr(self): pass def test_ha_router_fips_has_no_next_hop_to_fip_agent_gateway(self): pass def test_legacy_router_fips_has_no_next_hop_to_fip_agent_gateway(self): pass def test_floatingip_update_callback(self): pass def test_get_ipv6_tenant_subnet_routes_by_bgp_speaker_ipv6(self): pass def test_get_routes_by_bgp_speaker_id_with_fip(self): # base class tests uses no-snat router with floating ips self.skipTest('No SNAT with floating ips not supported') def test_get_routes_by_bgp_speaker_binding_with_fip(self): # base class tests uses no-snat router with floating ips self.skipTest('No SNAT with floating ips not supported') def test__get_routes_by_router_with_fip(self): # base class tests uses no-snat router with floating ips self.skipTest('No SNAT with floating ips not supported') def test_add_bgp_peer_with_bad_id(self): with self.subnetpool_with_address_scope( 4, prefixes=['8.0.0.0/8']) as sp: with self.bgp_speaker(sp['ip_version'], 1234) as speaker: self.assertRaises(ext_bgp.BgpPeerNotFound, self.bgp_plugin.add_bgp_peer, self.context, speaker['id'], {'bgp_peer_id': 'aaa'}) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.242255 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/flowclassifier/0000755000175000017500000000000000000000000026350 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/flowclassifier/__init__.py0000644000175000017500000000000000000000000030447 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/flowclassifier/test_nsxv_driver.py0000644000175000017500000002624300000000000032341 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_utils import importutils from vmware_nsx.services.flowclassifier.nsx_v import driver as nsx_v_driver from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns from neutron.api import extensions as api_ext from neutron.common import config from neutron_lib.api.definitions import portbindings from neutron_lib import context from neutron_lib.plugins import directory from networking_sfc.db import flowclassifier_db as fdb from networking_sfc.extensions import flowclassifier from networking_sfc.services.flowclassifier.common import context as fc_ctx from networking_sfc.services.flowclassifier.common import exceptions as fc_exc from networking_sfc.tests import base from networking_sfc.tests.unit.db import test_flowclassifier_db class TestNsxvFlowClassifierDriver( test_flowclassifier_db.FlowClassifierDbPluginTestCaseBase, base.NeutronDbPluginV2TestCase): resource_prefix_map = dict([ (k, flowclassifier.FLOW_CLASSIFIER_PREFIX) for k in flowclassifier.RESOURCE_ATTRIBUTE_MAP.keys() ]) def setUp(self): # init the flow classifier plugin flowclassifier_plugin = ( test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS) service_plugins = { flowclassifier.FLOW_CLASSIFIER_EXT: flowclassifier_plugin } fdb.FlowClassifierDbPlugin.supported_extension_aliases = [ flowclassifier.FLOW_CLASSIFIER_EXT] fdb.FlowClassifierDbPlugin.path_prefix = ( flowclassifier.FLOW_CLASSIFIER_PREFIX ) super(TestNsxvFlowClassifierDriver, self).setUp( ext_mgr=None, plugin=None, service_plugins=service_plugins ) self.flowclassifier_plugin = importutils.import_object( flowclassifier_plugin) ext_mgr = api_ext.PluginAwareExtensionManager( test_flowclassifier_db.extensions_path, { flowclassifier.FLOW_CLASSIFIER_EXT: self.flowclassifier_plugin } ) app = config.load_paste_app('extensions_test_app') self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr) self.ctx = context.get_admin_context() # use the fake vcns mock_vcns = mock.patch(vmware.VCNS_NAME, autospec=True) mock_vcns_instance = mock_vcns.start() self.fc2 = fake_vcns.FakeVcns() mock_vcns_instance.return_value = self.fc2 # use the nsxv flow classifier driver self._profile_id = 'serviceprofile-1' cfg.CONF.set_override('service_insertion_profile_id', self._profile_id, 'nsxv') cfg.CONF.set_override('service_insertion_redirect_all', True, 'nsxv') self.driver = nsx_v_driver.NsxvFlowClassifierDriver() self.driver.initialize() self._fc_name = 'test1' self._fc_description = 'test 1' self._fc_source = '10.10.0.0/24' self._fc_dest = '20.10.0.0/24' self._fc_prot = 'TCP' self._fc_source_ports = range(100, 115) self._fc_dest_ports = range(80, 81) self._fc = {'name': self._fc_name, 'description': self._fc_description, 'logical_source_port': None, 'logical_destination_port': None, 'source_ip_prefix': self._fc_source, 'destination_ip_prefix': self._fc_dest, 'protocol': self._fc_prot, 'source_port_range_min': self._fc_source_ports[0], 'source_port_range_max': self._fc_source_ports[-1], 'destination_port_range_min': self._fc_dest_ports[0], 'destination_port_range_max': self._fc_dest_ports[-1]} def tearDown(self): super(TestNsxvFlowClassifierDriver, self).tearDown() def test_driver_init(self): self.assertEqual(self._profile_id, self.driver._profile_id) self.assertEqual(self.driver._security_group_id, '0') orig_get_plugin = directory.get_plugin def mocked_get_plugin(plugin=None): # mock only the core plugin if plugin: return orig_get_plugin(plugin) return mock_nsxv_plugin mock_nsxv_plugin = mock.Mock() fc_plugin = directory.get_plugin(flowclassifier.FLOW_CLASSIFIER_EXT) with mock.patch.object(directory, 'get_plugin', new=mocked_get_plugin): with mock.patch.object( mock_nsxv_plugin, 'add_vms_to_service_insertion') as fake_add: with mock.patch.object( fc_plugin, 'create_flow_classifier') as fake_create: self.driver.init_complete(None, None, {}) # check that the plugin was called to add vms to the # security group self.assertTrue(fake_add.called) # check that redirect_all flow classifier entry # was created self.assertTrue(fake_create.called) def test_create_flow_classifier_precommit(self): with self.flow_classifier(flow_classifier=self._fc) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) # just make sure it does not raise an exception self.driver.create_flow_classifier_precommit(fc_context) def test_create_flow_classifier_precommit_logical_source_port(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as src_port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_source_port': src_port['port']['id'] }) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) self.assertRaises( fc_exc.FlowClassifierBadRequest, self.driver.create_flow_classifier_precommit, fc_context) def test_create_flow_classifier_precommit_logical_dest_port(self): with self.port( name='port1', device_owner='compute', device_id='test', arg_list=( portbindings.HOST_ID, ), **{portbindings.HOST_ID: 'test'} ) as dst_port: with self.flow_classifier(flow_classifier={ 'name': 'test1', 'logical_destination_port': dst_port['port']['id'] }) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) self.assertRaises( fc_exc.FlowClassifierBadRequest, self.driver.create_flow_classifier_precommit, fc_context) def _validate_rule_structure(self, rule): self.assertEqual(self._fc_description, rule.find('notes').text) self.assertEqual('ipv4', rule.find('packetType').text) self.assertEqual( self._fc_source, rule.find('sources').find('source').find('value').text) self.assertEqual( self._fc_dest, rule.find('destinations').find('destination').find('value').text) ports = "%s-%s" % (self._fc_source_ports[0], self._fc_source_ports[-1]) if self._fc_source_ports[0] == self._fc_source_ports[-1]: ports = str(self._fc_source_ports[0]) self.assertEqual( ports, rule.find('services').find('service').find('sourcePort').text) ports = "%s-%s" % (self._fc_dest_ports[0], self._fc_dest_ports[-1]) if self._fc_dest_ports[0] == self._fc_dest_ports[-1]: ports = str(self._fc_dest_ports[0]) self.assertEqual( ports, rule.find('services').find('service').find('destinationPort').text) self.assertEqual( self._fc_prot, rule.find('services').find('service').find('protocolName').text) self.assertTrue(rule.find('name').text.startswith(self._fc_name)) def test_create_flow_classifier(self): with self.flow_classifier(flow_classifier=self._fc) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) with mock.patch.object( self.driver, 'update_redirect_section_in_backed') as mock_update_section: self.driver.create_flow_classifier(fc_context) self.assertTrue(mock_update_section.called) section = mock_update_section.call_args[0][0] self._validate_rule_structure(section.find('rule')) def test_update_flow_classifier(self): with self.flow_classifier(flow_classifier=self._fc) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) self.driver.create_flow_classifier(fc_context) with mock.patch.object( self.driver, 'update_redirect_section_in_backed') as mock_update_section: self.driver.update_flow_classifier(fc_context) self.assertTrue(mock_update_section.called) section = mock_update_section.call_args[0][0] self._validate_rule_structure(section.find('rule')) def test_delete_flow_classifier(self): with self.flow_classifier(flow_classifier=self._fc) as fc: fc_context = fc_ctx.FlowClassifierContext( self.flowclassifier_plugin, self.ctx, fc['flow_classifier'] ) self.driver.create_flow_classifier(fc_context) with mock.patch.object( self.driver, 'update_redirect_section_in_backed') as mock_update_section: self.driver.delete_flow_classifier(fc_context) self.assertTrue(mock_update_section.called) section = mock_update_section.call_args[0][0] # make sure the rule is not there self.assertIsNone(section.find('rule')) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.242255 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/ipam/0000755000175000017500000000000000000000000024262 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/ipam/__init__.py0000644000175000017500000000000000000000000026361 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/ipam/test_nsxv3_driver.py0000644000175000017500000001434300000000000030334 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import netaddr from oslo_config import cfg from oslo_utils import uuidutils from vmware_nsx.tests.unit.nsx_v3 import test_plugin from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import nsx_constants as error class MockIPPools(object): def patch_nsxlib_ipam(self): self.nsx_pools = {} def _create_pool(*args, **kwargs): pool_id = uuidutils.generate_uuid() gateway_ip = None if kwargs.get('gateway_ip'): gateway_ip = str(kwargs['gateway_ip']) subnet = {"allocation_ranges": kwargs.get('allocation_ranges'), "gateway_ip": gateway_ip, "cidr": args[0]} pool = {'id': pool_id, 'subnets': [subnet]} self.nsx_pools[pool_id] = {'pool': pool, 'allocated': []} return {'id': pool_id} def _update_pool(pool_id, **kwargs): pool = self.nsx_pools[pool_id]['pool'] subnet = pool['subnets'][0] if 'gateway_ip' in kwargs: if kwargs['gateway_ip']: subnet["gateway_ip"] = str(kwargs['gateway_ip']) else: del subnet["gateway_ip"] if 'allocation_ranges' in kwargs: if kwargs['allocation_ranges']: subnet["allocation_ranges"] = kwargs['allocation_ranges'] else: del subnet["allocation_ranges"] def _delete_pool(pool_id): del self.nsx_pools[pool_id] def _get_pool(pool_id): return self.nsx_pools[pool_id]['pool'] def _allocate_ip(*args, **kwargs): nsx_pool = self.nsx_pools[args[0]] if kwargs.get('ip_addr'): ip_addr = netaddr.IPAddress(kwargs['ip_addr']) # verify that this ip was not yet allocated if ip_addr in nsx_pool['allocated']: raise nsx_lib_exc.ManagerError( manager='dummy', operation='allocate', details='IP already allocated', error_code=error.ERR_CODE_IPAM_IP_ALLOCATED) # skip ip validation for this mock. nsx_pool['allocated'].append(ip_addr) return {'allocation_id': str(ip_addr)} # get an unused ip from the pool ranges = nsx_pool['pool']['subnets'][0]['allocation_ranges'] for ip_range in ranges: r = netaddr.IPRange(ip_range['start'], ip_range['end']) for ip_addr in r: if ip_addr not in nsx_pool['allocated']: nsx_pool['allocated'].append(ip_addr) return {'allocation_id': str(ip_addr)} # no IP was found raise nsx_lib_exc.ManagerError( manager='dummy', operation='allocate', details='All IPs in the pool are allocated', error_code=error.ERR_CODE_IPAM_POOL_EXHAUSTED) def _release_ip(*args, **kwargs): nsx_pool = self.nsx_pools[args[0]] ip_addr = netaddr.IPAddress(args[1]) nsx_pool['allocated'].remove(ip_addr) mock.patch( "vmware_nsxlib.v3.resources.IpPool.get", side_effect=_get_pool).start() mock.patch( "vmware_nsxlib.v3.resources.IpPool.create", side_effect=_create_pool).start() mock.patch( "vmware_nsxlib.v3.resources.IpPool.update", side_effect=_update_pool).start() mock.patch( "vmware_nsxlib.v3.resources.IpPool.delete", side_effect=_delete_pool).start() mock.patch( "vmware_nsxlib.v3.resources.IpPool.allocate", side_effect=_allocate_ip).start() mock.patch( "vmware_nsxlib.v3.resources.IpPool.release", side_effect=_release_ip).start() class TestNsxv3IpamSubnets(test_plugin.TestSubnetsV2, MockIPPools): """Run the nsxv3 plugin subnets tests with the ipam driver.""" def setUp(self): cfg.CONF.set_override( "ipam_driver", "vmware_nsx.services.ipam.nsx_v3.driver.Nsxv3IpamDriver") super(TestNsxv3IpamSubnets, self).setUp() self.patch_nsxlib_ipam() def test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets(self): self.skipTest('Update ipam subnet is not supported') def test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets(self): self.skipTest('Update ipam subnet is not supported') class TestNsxv3IpamPorts(test_plugin.TestPortsV2, MockIPPools): """Run the nsxv3 plugin ports tests with the ipam driver.""" def setUp(self): cfg.CONF.set_override( "ipam_driver", "vmware_nsx.services.ipam.nsx_v3.driver.Nsxv3IpamDriver") super(TestNsxv3IpamPorts, self).setUp() self.patch_nsxlib_ipam() def test_create_port_invalid_fixed_ip_address_v6_pd_slaac(self): self.skipTest('Update ipam subnet is not supported') def test_update_port_invalid_subnet_v6_pd_slaac(self): self.skipTest('Update ipam subnet is not supported') def test_update_port_update_ip_address_only(self): self.skipTest('Update ipam subnet is not supported') def test_update_port_invalid_fixed_ip_address_v6_pd_slaac(self): self.skipTest('Update ipam subnet is not supported') def test_ip_allocation_for_ipv6_2_subnet_slaac_mode(self): self.skipTest('Only one ipv6 subnet per network is supported') def test_create_port_with_multiple_ipv4_and_ipv6_subnets(self): self.skipTest('Only one ipv6 subnet per network is supported') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/ipam/test_nsxv_driver.py0000644000175000017500000001221100000000000030241 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from vmware_nsx.tests.unit.nsx_v import test_plugin from neutron_lib.api.definitions import provider_net as pnet class TestNsxvIpamSubnets(test_plugin.TestSubnetsV2): """Run the nsxv plugin subnets tests with the ipam driver""" def setUp(self): cfg.CONF.set_override( "ipam_driver", "vmware_nsx.services.ipam.nsx_v.driver.NsxvIpamDriver") super(TestNsxvIpamSubnets, self).setUp() def provider_net(self): name = 'dvs-provider-net' providernet_args = {pnet.NETWORK_TYPE: 'vlan', pnet.SEGMENTATION_ID: 43, pnet.PHYSICAL_NETWORK: 'dvs-uuid'} return self.network(name=name, do_delete=False, providernet_args=providernet_args, arg_list=(pnet.NETWORK_TYPE, pnet.SEGMENTATION_ID, pnet.PHYSICAL_NETWORK)) def test_provider_net_use_driver(self): with self.provider_net() as net: before = len(self.fc2._ipam_pools) with self.subnet(network=net, cidr='10.10.10.0/29', enable_dhcp=False): self.assertEqual(before + 1, len(self.fc2._ipam_pools)) def test_ext_net_use_driver(self): with self.network(router__external=True) as net: before = len(self.fc2._ipam_pools) with self.subnet(network=net, cidr='10.10.10.0/29', enable_dhcp=False): self.assertEqual(before + 1, len(self.fc2._ipam_pools)) def test_regular_net_dont_use_driver(self): with self.network() as net: before = len(self.fc2._ipam_pools) with self.subnet(network=net, cidr='10.10.10.0/29', enable_dhcp=False): self.assertEqual(before, len(self.fc2._ipam_pools)) def test_no_more_ips(self): # create a small provider network, and use all the IPs with self.provider_net() as net: with self.subnet(network=net, cidr='10.10.10.0/29', enable_dhcp=False) as subnet: # create ports on this subnet until there are no more free ips # legal ips are 10.10.10.2 - 10.10.10.6 fixed_ips = [{'subnet_id': subnet['subnet']['id']}] for counter in range(5): port_res = self._create_port( self.fmt, net['network']['id'], fixed_ips=fixed_ips) port = self.deserialize('json', port_res) self.assertIn('port', port) # try to create another one - should fail port_res = self._create_port( self.fmt, net['network']['id'], fixed_ips=fixed_ips) port = self.deserialize('json', port_res) self.assertIn('NeutronError', port) self.assertIn('message', port['NeutronError']) self.assertTrue(('No more IP addresses available' in port['NeutronError']['message'])) def test_use_same_ips(self): # create a provider network and try to allocate the same ip twice with self.provider_net() as net: with self.subnet(network=net, cidr='10.10.10.0/24', enable_dhcp=False) as subnet: fixed_ips = [{'ip_address': '10.10.10.2', 'subnet_id': subnet['subnet']['id']}] # First port should succeed port_res = self._create_port( self.fmt, net['network']['id'], fixed_ips=fixed_ips) port = self.deserialize('json', port_res) self.assertIn('port', port) # try to create another one - should fail port_res = self._create_port( self.fmt, net['network']['id'], fixed_ips=fixed_ips) port = self.deserialize('json', port_res) self.assertIn('NeutronError', port) self.assertIn('message', port['NeutronError']) self.assertTrue(('already allocated in subnet' in port['NeutronError']['message'])) class TestNsxvIpamPorts(test_plugin.TestPortsV2): """Run the nsxv plugin ports tests with the ipam driver""" def setUp(self): cfg.CONF.set_override( "ipam_driver", "vmware_nsx.services.ipam.nsx_v.driver.NsxvIpamDriver") super(TestNsxvIpamPorts, self).setUp() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1586542531.242255 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/l2gateway/0000755000175000017500000000000000000000000025233 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/l2gateway/__init__.py0000644000175000017500000000000000000000000027332 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/l2gateway/test_nsxv3_driver.py0000644000175000017500000004315600000000000031311 0ustar00coreycorey00000000000000# Copyright (c) 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from networking_l2gw.db.l2gateway import l2gateway_db from networking_l2gw.services.l2gateway.common import config from networking_l2gw.services.l2gateway.common import constants from networking_l2gw.services.l2gateway import exceptions as l2gw_exc from networking_l2gw.services.l2gateway import plugin as core_l2gw_plugin from networking_l2gw.tests.unit.db import test_l2gw_db from oslo_config import cfg from oslo_utils import importutils from oslo_utils import uuidutils from neutron.tests import base from neutron_lib.api.definitions import provider_net as providernet from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context from neutron_lib import exceptions as n_exc from vmware_nsx.common import utils as nsx_utils from vmware_nsx.services.l2gateway.nsx_v3 import driver as nsx_v3_driver from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsx_v3_plugin from vmware_nsxlib.v3 import nsx_constants NSX_V3_PLUGIN_CLASS = ('vmware_nsx.plugins.nsx_v3.plugin.NsxV3Plugin') NSX_V3_L2GW_DRIVER_CLASS_PATH = ('vmware_nsx.services.l2gateway.' 'nsx_v3.driver.NsxV3Driver') NSX_DEFAULT_BEP_NAME = "default-bridge-endpoint-profile" class TestNsxV3L2GatewayDriver(test_l2gw_db.L2GWTestCase, test_nsx_v3_plugin.NsxV3PluginTestCaseMixin, base.BaseTestCase): def setUp(self): super(TestNsxV3L2GatewayDriver, self).setUp() self.core_plugin = importutils.import_object(NSX_V3_PLUGIN_CLASS) self.driver = nsx_v3_driver.NsxV3Driver(mock.MagicMock()) mock.patch.object(config, 'register_l2gw_opts_helper') mock.patch('neutron.services.service_base.load_drivers', return_value=({'dummyprovider': self.driver}, 'dummyprovider')).start() mock.patch.object(l2gateway_db.L2GatewayMixin, '__init__'), mock.patch.object(l2gateway_db, 'subscribe') mock.patch('neutron.db.servicetype_db.ServiceTypeManager.get_instance', return_value=mock.MagicMock()).start() mock_default_bep_uuid = uuidutils.generate_uuid() mock.patch('vmware_nsxlib.v3.core_resources.' 'NsxLibBridgeEndpointProfile.get_id_by_name_or_id', return_value=mock_default_bep_uuid).start() mock.patch('vmware_nsxlib.v3.core_resources.' 'NsxLibBridgeEndpointProfile.get', return_value={'id': mock_default_bep_uuid, 'edge_cluster_id': 'meh'}).start() mock.patch('vmware_nsxlib.v3.core_resources.' 'NsxLibTransportZone.get_transport_type', return_value="VLAN").start() self.l2gw_plugin = core_l2gw_plugin.L2GatewayPlugin() self.context = context.get_admin_context() def _get_nw_data(self, provider=False): net_data = super(TestNsxV3L2GatewayDriver, self)._get_nw_data() net_spec = net_data['network'] net_spec['port_security_enabled'] = True if provider: net_spec[providernet.NETWORK_TYPE] = ( nsx_utils.NsxV3NetworkTypes.VLAN) net_spec[providernet.SEGMENTATION_ID] = 666 return net_data def test_nsxl2gw_driver_init(self): with mock.patch.object(nsx_v3_driver.NsxV3Driver, 'subscribe_callback_notifications') as sub: with mock.patch.object(nsx_v3_driver.LOG, 'debug') as debug: nsx_v3_driver.NsxV3Driver(mock.MagicMock()) self.assertTrue(sub.called) self.assertTrue(debug.called) def test_create_default_l2_gateway(self): def_bep_name = NSX_DEFAULT_BEP_NAME cfg.CONF.set_override("default_bridge_endpoint_profile", def_bep_name, "nsx_v3") with mock.patch.object(nsx_v3_driver.NsxV3Driver, '_get_bridge_vlan_tz_id', return_value=['some_tz_id']) as mock_get_tz: nsx_v3_driver.NsxV3Driver(mock.MagicMock()) def_bep_id = ( self.nsxlib.bridge_endpoint_profile.get_id_by_name_or_id( def_bep_name)) # fake the callback invoked after init registry.publish(resources.PROCESS, events.BEFORE_SPAWN, mock.MagicMock()) l2gws = self.driver._get_l2_gateways(self.context) def_l2gw = None for l2gw in l2gws: for device in l2gw['devices']: if device['device_name'] == def_bep_id: def_l2gw = l2gw self.assertIsNotNone(def_l2gw) self.assertTrue(def_l2gw.devices[0].device_name, def_bep_id) self.assertTrue(def_l2gw.devices[0].interfaces[0].interface_name, 'some_tz_id') mock_get_tz.assert_called_once_with({'id': def_bep_id, 'edge_cluster_id': 'meh'}) def test_create_duplicate_default_l2_gateway_noop(self): def_bep_name = NSX_DEFAULT_BEP_NAME cfg.CONF.set_override("default_bridge_endpoint_profile", def_bep_name, "nsx_v3") with mock.patch.object(nsx_v3_driver.NsxV3Driver, '_get_bridge_vlan_tz_id', return_value=['some_tz_id']): for i in range(0, 2): nsx_v3_driver.NsxV3Driver(mock.MagicMock()) # fake the callback invoked after init registry.publish(resources.PROCESS, events.BEFORE_SPAWN, mock.MagicMock()) l2gws = self.driver._get_l2_gateways(self.context) # Verify whether only one default L2 gateway is created self.assertEqual(1, len(l2gws)) def test_create_default_l2_gateway_no_bc_uuid_noop(self): with mock.patch.object(nsx_v3_driver.NsxV3Driver, 'subscribe_callback_notifications'): nsx_v3_driver.NsxV3Driver(mock.MagicMock()) l2gws = self.driver._get_l2_gateways(self.context) # Verify no default L2 gateway is created if bridge endpoint # profile id is not configured in nsx.ini self.assertEqual([], l2gws) def test_create_l2_gateway_multiple_devices_fail(self): invalid_l2gw_dict = { "l2_gateway": { "tenant_id": "fake_tenant_id", "name": "invalid_l2gw", "devices": [{"interfaces": [{"name": "interface1"}], "device_name": "device1"}, {"interfaces": [{"name": "interface_2"}], "device_name": "device2"}]}} self.assertRaises(n_exc.InvalidInput, self.l2gw_plugin.create_l2_gateway, self.context, invalid_l2gw_dict) def test_create_l2_gateway_multiple_interfaces_fail(self): invalid_l2gw_dict = { "l2_gateway": { "tenant_id": "fake_tenant_id", "name": "invalid_l2gw", "devices": [{"interfaces": [{"name": "interface1"}, {"name": "interface2"}], "device_name": "device1"}]}} self.assertRaises(n_exc.InvalidInput, self.l2gw_plugin.create_l2_gateway, self.context, invalid_l2gw_dict) def test_create_l2_gateway_invalid_device_name_fail(self): invalid_l2gw_dict = { "l2_gateway": { "tenant_id": "fake_tenant_id", "name": "invalid_l2gw", "devices": [{"interfaces": [{"name": "interface_1"}], "device_name": "device-1"}]}} self.assertRaises(n_exc.InvalidInput, self.l2gw_plugin.create_l2_gateway, self.context, invalid_l2gw_dict) def test_create_l2_gateway_valid(self): bc_uuid = uuidutils.generate_uuid() l2gw_data = self._get_l2_gateway_data(name='gw1', device_name=bc_uuid) l2gw = self.l2gw_plugin.create_l2_gateway(self.context, l2gw_data) self.assertIsNotNone(l2gw) self.assertEqual("gw1", l2gw["name"]) self.assertEqual("port1", l2gw["devices"][0]["interfaces"][0]["name"]) self.assertEqual(bc_uuid, l2gw["devices"][0]["device_name"]) def test_create_l2_gateway_connection(self): type(self.driver)._core_plugin = self.core_plugin bc_uuid = uuidutils.generate_uuid() l2gw_data = self._get_l2_gateway_data(name='def-l2gw', device_name=bc_uuid) l2gw = self._create_l2gateway(l2gw_data) net_data = self._get_nw_data() net = self.core_plugin.create_network(self.context, net_data) l2gw_conn_data = {constants.CONNECTION_RESOURCE_NAME: { 'l2_gateway_id': l2gw['id'], 'tenant_id': 'fake_tenant_id', 'network_id': net['id']}} l2gw_conn = self.l2gw_plugin.create_l2_gateway_connection( self.context, l2gw_conn_data) self.assertIsNotNone(l2gw_conn) self.assertEqual(net['id'], l2gw_conn['network_id']) self.assertEqual(l2gw['id'], l2gw_conn['l2_gateway_id']) def test_create_l2_gateway_connections_same_params(self): type(self.driver)._core_plugin = self.core_plugin be_uuid = uuidutils.generate_uuid() bep_uuid = uuidutils.generate_uuid() l2gw_data1 = self._get_l2_gateway_data_without_seg_id( name='def-l2gw1', device_name=bep_uuid) l2gw1 = self._create_l2gateway(l2gw_data1) l2gw_data2 = self._get_l2_gateway_data_without_seg_id( name='def-l2gw2', device_name=bep_uuid) l2gw2 = self._create_l2gateway(l2gw_data2) net_data = self._get_nw_data() net = self.core_plugin.create_network(self.context, net_data) l2gw_conn_data1 = {constants.CONNECTION_RESOURCE_NAME: { 'l2_gateway_id': l2gw1['id'], 'tenant_id': 'fake_tenant_id', 'segmentation_id': '666', 'network_id': net['id']}} # Override "global" mock to return a known id with mock.patch('vmware_nsxlib.v3.core_resources.' 'NsxLibBridgeEndpoint.create', return_value={'id': be_uuid}): self.l2gw_plugin.create_l2_gateway_connection( self.context, l2gw_conn_data1) fake_be = {'id': be_uuid, 'vlan': 666, 'bridge_endpoint_profile_id': bep_uuid} with mock.patch('vmware_nsxlib.v3.NsxLib.' 'search_all_resource_by_attributes', return_value=[fake_be]): l2gw_conn_data2 = {constants.CONNECTION_RESOURCE_NAME: { 'l2_gateway_id': l2gw2['id'], 'tenant_id': 'fake_tenant_id', 'segmentation_id': 666, 'network_id': net['id']}} self.assertRaises(n_exc.InvalidInput, self.l2gw_plugin.create_l2_gateway_connection, self.context, l2gw_conn_data2) def test_create_l2_gateway_connections_different_bridge(self): type(self.driver)._core_plugin = self.core_plugin bc_uuid1 = uuidutils.generate_uuid() bc_uuid2 = uuidutils.generate_uuid() l2gw_data1 = self._get_l2_gateway_data(name='def-l2gw1', device_name=bc_uuid1) l2gw1 = self._create_l2gateway(l2gw_data1) l2gw_data2 = self._get_l2_gateway_data(name='def-l2gw2', device_name=bc_uuid2) l2gw2 = self._create_l2gateway(l2gw_data2) net_data = self._get_nw_data() net = self.core_plugin.create_network(self.context, net_data) l2gw_conn_data1 = {constants.CONNECTION_RESOURCE_NAME: { 'l2_gateway_id': l2gw1['id'], 'tenant_id': 'fake_tenant_id', 'network_id': net['id']}} self.l2gw_plugin.create_l2_gateway_connection( self.context, l2gw_conn_data1) l2gw_conn_data2 = {constants.CONNECTION_RESOURCE_NAME: { 'l2_gateway_id': l2gw2['id'], 'tenant_id': 'fake_tenant_id', 'network_id': net['id']}} self.l2gw_plugin.create_l2_gateway_connection( self.context, l2gw_conn_data2) def test_create_l2_gateway_connection_invalid_network_type_fails(self): type(self.driver)._core_plugin = self.core_plugin bep_uuid = uuidutils.generate_uuid() l2gw_data = self._get_l2_gateway_data(name='def-l2gw', device_name=bep_uuid) l2gw = self._create_l2gateway(l2gw_data) net_data = self._get_nw_data(provider=True) net = self.core_plugin.create_network(self.context, net_data) l2gw_conn_data = {constants.CONNECTION_RESOURCE_NAME: { 'l2_gateway_id': l2gw['id'], 'tenant_id': 'fake_tenant_id', 'network_id': net['id']}} self.assertRaises(n_exc.InvalidInput, self.l2gw_plugin.create_l2_gateway_connection, self.context, l2gw_conn_data) def test_delete_l2_gateway_connection(self): type(self.driver)._core_plugin = self.core_plugin bc_uuid = uuidutils.generate_uuid() l2gw_data = self._get_l2_gateway_data(name='def-l2gw', device_name=bc_uuid) l2gw = self._create_l2gateway(l2gw_data) net_data = self._get_nw_data() net = self.core_plugin.create_network(self.context, net_data) l2gw_conn_data = {constants.CONNECTION_RESOURCE_NAME: { 'l2_gateway_id': l2gw['id'], 'tenant_id': 'fake_tenant_id', 'project_id': 'fake_tenant_id', 'network_id': net['id']}} l2gw_conn = self.l2gw_plugin.create_l2_gateway_connection( self.context, l2gw_conn_data) self.l2gw_plugin.delete_l2_gateway_connection(self.context, l2gw_conn['id']) # Verify that the L2 gateway connection was deleted self.assertRaises(l2gw_exc.L2GatewayConnectionNotFound, self.l2gw_plugin.get_l2_gateway_connection, self.context, l2gw_conn['id']) ports = self.core_plugin.get_ports(self.context) # Verify that the L2 gateway connection port was cleaned up self.assertEqual(0, len(ports)) def test_create_l2_gateway_connection_creates_port(self): type(self.driver)._core_plugin = self.core_plugin bc_uuid = uuidutils.generate_uuid() l2gw_data = self._get_l2_gateway_data(name='def-l2gw', device_name=bc_uuid) l2gw = self._create_l2gateway(l2gw_data) net_data = self._get_nw_data() net = self.core_plugin.create_network(self.context, net_data) l2gw_conn_data = { 'id': uuidutils.generate_uuid(), 'l2_gateway_id': l2gw['id'], 'tenant_id': 'fake_tenant_id', 'network_id': net['id']} self.driver.create_l2_gateway_connection_postcommit(self.context, l2gw_conn_data) ports = self.core_plugin.get_ports(self.context) # Verify that the L2 gateway connection port was created with device # owner BRIDGEENDPOINT self.assertEqual(1, len(ports)) port = ports[0] self.assertEqual(nsx_constants.BRIDGE_ENDPOINT, port['device_owner']) # Verify that the L2 gateway connection port was created with no # fixed ips self.assertEqual(0, len(port.get('fixed_ips'))) def test_core_plugin_delete_l2_gateway_connection_port_fail(self): type(self.driver)._core_plugin = self.core_plugin bc_uuid = uuidutils.generate_uuid() l2gw_data = self._get_l2_gateway_data(name='def-l2gw', device_name=bc_uuid) l2gw = self._create_l2gateway(l2gw_data) net_data = self._get_nw_data() net = self.core_plugin.create_network(self.context, net_data) l2gw_conn_data = { 'id': uuidutils.generate_uuid(), 'l2_gateway_id': l2gw['id'], 'tenant_id': 'fake_tenant_id', 'network_id': net['id']} self.driver.create_l2_gateway_connection_postcommit(self.context, l2gw_conn_data) port = self.core_plugin.get_ports(self.context)[0] self.assertRaises(n_exc.ServicePortInUse, self.core_plugin.delete_port, self.context, port['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/l2gateway/test_nsxv_driver.py0000644000175000017500000002462600000000000031227 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from networking_l2gw.db.l2gateway import l2gateway_db from neutron_lib import context from neutron_lib import exceptions as n_exc from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.db import nsxv_db from vmware_nsx.dvs import dvs_utils from vmware_nsx.services.l2gateway.nsx_v import driver as nsx_v_driver from vmware_nsx.tests.unit.nsx_v import test_plugin CORE_PLUGIN = "vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2" class TestL2gatewayDriver(base.BaseTestCase): def setUp(self): super(TestL2gatewayDriver, self).setUp() self.context = context.get_admin_context() self.plugin = nsx_v_driver.NsxvL2GatewayDriver(mock.MagicMock()) def test_validate_device_with_multi_devices(self): fake_l2gw_dict = {"l2_gateway": {"tenant_id": "fake__tenant_id", "name": "fake_l2gw", "devices": [{"interfaces": [{"name": "fake_inter"}], "device_name": "fake_dev"}, {"interfaces": [{"name": "fake_inter_1"}], "device_name": "fake_dev_1"}]}} with mock.patch.object(l2gateway_db.L2GatewayMixin, '_admin_check'): self.assertRaises(n_exc.InvalidInput, self.plugin.create_l2_gateway, self.context, fake_l2gw_dict) def test_validate_interface_with_multi_interfaces(self): fake_l2gw_dict = {"l2_gateway": {"tenant_id": "fake_tenant_id", "name": "fake_l2gw", "devices": [{"interfaces": [{"name": "fake_inter_1"}, {"name": "fake_inter_2"}], "device_name": "fake_dev"}]}} with mock.patch.object(l2gateway_db.L2GatewayMixin, '_admin_check'): self.assertRaises(n_exc.InvalidInput, self.plugin.create_l2_gateway, self.context, fake_l2gw_dict) @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._nsxv') def test_validate_interface_with_invalid_interfaces(self, _nsxv): fake_interfaces = [{"name": "fake_inter"}] _nsxv.vcns.validate_network.return_value = False self.assertRaises(n_exc.InvalidInput, self.plugin._validate_interface_list, self.context, fake_interfaces) @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._edge_manager') def test_create_gw_edge_failure(self, edge_manager): with mock.patch.object(nsxv_db, 'get_nsxv_router_binding', return_value=None): self.assertRaises(nsx_exc.NsxL2GWDeviceNotFound, self.plugin._create_l2_gateway_edge, self.context) @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin._admin_check') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_device_list') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_interface_list') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._create_l2_gateway_edge') @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin.create_l2_gateway') def test_create_l2_gateway_failure(self, create_l2gw, _create_l2gw_edge, val_inter, val_dev, _admin_check): fake_l2gw_dict = {"l2_gateway": {"tenant_id": "fake_teannt_id", "name": "fake_l2gw", "devices": [{"interfaces": [{"name": "fake_inter"}], "device_name": "fake_dev"}]}} _create_l2gw_edge.side_effect = nsx_exc.NsxL2GWDeviceNotFound self.assertRaises(nsx_exc.NsxL2GWDeviceNotFound, self.plugin.create_l2_gateway, self.context, fake_l2gw_dict) @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin._admin_check') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_device_list') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_interface_list') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._create_l2_gateway_edge') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._edge_manager') def test_create_l2_gateway(self, edge_manager, _create_l2gw_edge, val_inter, val_dev, _admin_check): fake_l2gw_dict = {"l2_gateway": {"tenant_id": "fake_teannt_id", "name": "fake_l2gw", "devices": [{"interfaces": [{"name": "fake_inter"}], "device_name": "fake_dev"}]}} fake_devices = [{"interfaces": [{"name": "fake_inter"}], "device_name": "fake_dev"}] fake_interfaces = [{"name": "fake_inter"}] _create_l2gw_edge.return_value = 'fake_dev' self.plugin.create_l2_gateway(self.context, fake_l2gw_dict) _admin_check.assert_called_with(self.context, 'CREATE') val_dev.assert_called_with(fake_devices) val_inter.assert_called_with(self.context, fake_interfaces) @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin._admin_check') @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin.get_l2_gateway_connection') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._get_device') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._nsxv') def test_delete_l2_gateway_connection(self, nsxv, get_devices, get_conn, admin_check): fake_conn_dict = {'l2_gateway_id': 'fake_l2gw_id'} fake_device_dict = {'id': 'fake_dev_id', 'device_name': 'fake_dev_name'} get_conn.return_value = fake_conn_dict get_devices.return_value = fake_device_dict self.plugin.delete_l2_gateway_connection(self.context, fake_conn_dict) admin_check.assert_called_with(self.context, 'DELETE') get_conn.assert_called_with(self.context, fake_conn_dict) get_devices.assert_called_with(self.context, 'fake_l2gw_id') self.plugin._nsxv().del_bridge.asert_called_with('fake_dev_name') @mock.patch('networking_l2gw.db.l2gateway.l2gateway_db.' 'L2GatewayMixin._admin_check') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._get_device') @mock.patch('vmware_nsx.db.' 'nsxv_db.get_nsxv_router_binding_by_edge') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._edge_manager') def test_delete_l2_gateway(self, edge_manager, get_nsxv_router, get_devices, admin_check): fake_device_dict = {"id": "fake_dev_id", "device_name": "fake_edge_name", "l2_gateway_id": "fake_l2gw_id"} fake_rtr_binding = {"router_id": 'fake_router_id'} get_devices.return_value = fake_device_dict get_nsxv_router.return_value = fake_rtr_binding self.plugin.delete_l2_gateway(self.context, 'fake_l2gw_id') admin_check.assert_called_with(self.context, 'DELETE') get_devices.assert_called_with(self.context, 'fake_l2gw_id') get_nsxv_router.assert_called_with(self.context.session, "fake_edge_name") class TestL2GatewayDriverRouter(test_plugin.NsxVPluginV2TestCase): @mock.patch.object(dvs_utils, 'dvs_create_session') def setUp(self, *mocks): # init the nsxv plugin, edge manager and fake vcns super(TestL2GatewayDriverRouter, self).setUp(plugin=CORE_PLUGIN, ext_mgr=None) self.context = context.get_admin_context() # init the L2 gateway driver self.driver = nsx_v_driver.NsxvL2GatewayDriver(mock.MagicMock()) @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_device_list') @mock.patch('vmware_nsx.services.l2gateway.' 'nsx_v.driver.NsxvL2GatewayDriver._validate_interface_list') def test_create_l2_gateway_router(self, val_inter, val_dev): # Verify that creating the router doesn't fail fake_l2gw_dict = {"l2_gateway": {"tenant_id": "fake_teannt_id", "name": "fake_l2gw", "devices": [{"interfaces": [{"name": "fake_inter"}], "device_name": "fake_dev"}]}} self.driver.create_l2_gateway(self.context, fake_l2gw_dict) def test_create_l2_gateway_router_edge(self): # Verify that the router edge is really created edge_id = self.driver._create_l2_gateway_edge(self.context) self.assertEqual('edge-1', edge_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2462552 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/lbaas/0000755000175000017500000000000000000000000024416 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/lbaas/__init__.py0000644000175000017500000000000000000000000026515 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/lbaas/lb_constants.py0000644000175000017500000001475300000000000027473 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. LB_METHOD_ROUND_ROBIN = 'ROUND_ROBIN' LB_METHOD_LEAST_CONNECTIONS = 'LEAST_CONNECTIONS' LB_METHOD_SOURCE_IP = 'SOURCE_IP' SUPPORTED_LB_ALGORITHMS = (LB_METHOD_LEAST_CONNECTIONS, LB_METHOD_ROUND_ROBIN, LB_METHOD_SOURCE_IP) PROTOCOL_TCP = 'TCP' PROTOCOL_HTTP = 'HTTP' PROTOCOL_HTTPS = 'HTTPS' PROTOCOL_TERMINATED_HTTPS = 'TERMINATED_HTTPS' POOL_SUPPORTED_PROTOCOLS = (PROTOCOL_TCP, PROTOCOL_HTTPS, PROTOCOL_HTTP) LISTENER_SUPPORTED_PROTOCOLS = (PROTOCOL_TCP, PROTOCOL_HTTPS, PROTOCOL_HTTP, PROTOCOL_TERMINATED_HTTPS) LISTENER_POOL_COMPATIBLE_PROTOCOLS = ( (PROTOCOL_TCP, PROTOCOL_TCP), (PROTOCOL_HTTP, PROTOCOL_HTTP), (PROTOCOL_HTTPS, PROTOCOL_HTTPS), (PROTOCOL_HTTP, PROTOCOL_TERMINATED_HTTPS)) HEALTH_MONITOR_PING = 'PING' HEALTH_MONITOR_TCP = 'TCP' HEALTH_MONITOR_HTTP = 'HTTP' HEALTH_MONITOR_HTTPS = 'HTTPS' SUPPORTED_HEALTH_MONITOR_TYPES = (HEALTH_MONITOR_HTTP, HEALTH_MONITOR_HTTPS, HEALTH_MONITOR_PING, HEALTH_MONITOR_TCP) HTTP_METHOD_GET = 'GET' HTTP_METHOD_HEAD = 'HEAD' HTTP_METHOD_POST = 'POST' HTTP_METHOD_PUT = 'PUT' HTTP_METHOD_DELETE = 'DELETE' HTTP_METHOD_TRACE = 'TRACE' HTTP_METHOD_OPTIONS = 'OPTIONS' HTTP_METHOD_CONNECT = 'CONNECT' HTTP_METHOD_PATCH = 'PATCH' SUPPORTED_HTTP_METHODS = (HTTP_METHOD_GET, HTTP_METHOD_HEAD, HTTP_METHOD_POST, HTTP_METHOD_PUT, HTTP_METHOD_DELETE, HTTP_METHOD_TRACE, HTTP_METHOD_OPTIONS, HTTP_METHOD_CONNECT, HTTP_METHOD_PATCH) # URL path regex according to RFC 3986 # Format: path = "/" *( "/" segment ) # segment = *pchar # pchar = unreserved / pct-encoded / sub-delims / ":" / "@" # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" # pct-encoded = "%" HEXDIG HEXDIG # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" # / "*" / "+" / "," / ";" / "=" SUPPORTED_URL_PATH = ( "^(/([a-zA-Z0-9-._~!$&\'()*+,;=:@]|(%[a-fA-F0-9]{2}))*)+$") SESSION_PERSISTENCE_SOURCE_IP = 'SOURCE_IP' SESSION_PERSISTENCE_HTTP_COOKIE = 'HTTP_COOKIE' SESSION_PERSISTENCE_APP_COOKIE = 'APP_COOKIE' SUPPORTED_SP_TYPES = (SESSION_PERSISTENCE_SOURCE_IP, SESSION_PERSISTENCE_HTTP_COOKIE, SESSION_PERSISTENCE_APP_COOKIE) L7_RULE_TYPE_HOST_NAME = 'HOST_NAME' L7_RULE_TYPE_PATH = 'PATH' L7_RULE_TYPE_FILE_TYPE = 'FILE_TYPE' L7_RULE_TYPE_HEADER = 'HEADER' L7_RULE_TYPE_COOKIE = 'COOKIE' SUPPORTED_L7_RULE_TYPES = (L7_RULE_TYPE_HOST_NAME, L7_RULE_TYPE_PATH, L7_RULE_TYPE_FILE_TYPE, L7_RULE_TYPE_HEADER, L7_RULE_TYPE_COOKIE) L7_RULE_COMPARE_TYPE_REGEX = 'REGEX' L7_RULE_COMPARE_TYPE_STARTS_WITH = 'STARTS_WITH' L7_RULE_COMPARE_TYPE_ENDS_WITH = 'ENDS_WITH' L7_RULE_COMPARE_TYPE_CONTAINS = 'CONTAINS' L7_RULE_COMPARE_TYPE_EQUAL_TO = 'EQUAL_TO' SUPPORTED_L7_RULE_COMPARE_TYPES = (L7_RULE_COMPARE_TYPE_REGEX, L7_RULE_COMPARE_TYPE_STARTS_WITH, L7_RULE_COMPARE_TYPE_ENDS_WITH, L7_RULE_COMPARE_TYPE_CONTAINS, L7_RULE_COMPARE_TYPE_EQUAL_TO) L7_POLICY_ACTION_REJECT = 'REJECT' L7_POLICY_ACTION_REDIRECT_TO_POOL = 'REDIRECT_TO_POOL' L7_POLICY_ACTION_REDIRECT_TO_URL = 'REDIRECT_TO_URL' SUPPORTED_L7_POLICY_ACTIONS = (L7_POLICY_ACTION_REJECT, L7_POLICY_ACTION_REDIRECT_TO_POOL, L7_POLICY_ACTION_REDIRECT_TO_URL) URL_REGEX = "http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|\ (?:%[0-9a-fA-F][0-9a-fA-F]))+" # See RFCs 2616, 2965, 6265, 7230: Should match characters valid in a # http header or cookie name. HTTP_HEADER_COOKIE_NAME_REGEX = r'\A[a-zA-Z0-9!#$%&\'*+-.^_`|~]+\Z' # See RFCs 2616, 2965, 6265: Should match characters valid in a cookie value. HTTP_COOKIE_VALUE_REGEX = r'\A[a-zA-Z0-9!#$%&\'()*+-./:<=>?@[\]^_`{|}~]+\Z' # See RFC 7230: Should match characters valid in a header value. HTTP_HEADER_VALUE_REGEX = (r'\A[a-zA-Z0-9' r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]+\Z') # Also in RFC 7230: Should match characters valid in a header value # when quoted with double quotes. HTTP_QUOTED_HEADER_VALUE_REGEX = (r'\A"[a-zA-Z0-9 \t' r'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~\\]*"\Z') STATS_ACTIVE_CONNECTIONS = 'active_connections' STATS_MAX_CONNECTIONS = 'max_connections' STATS_TOTAL_CONNECTIONS = 'total_connections' STATS_CURRENT_SESSIONS = 'current_sessions' STATS_MAX_SESSIONS = 'max_sessions' STATS_TOTAL_SESSIONS = 'total_sessions' STATS_IN_BYTES = 'bytes_in' STATS_OUT_BYTES = 'bytes_out' STATS_CONNECTION_ERRORS = 'connection_errors' STATS_RESPONSE_ERRORS = 'response_errors' STATS_STATUS = 'status' STATS_HEALTH = 'health' STATS_FAILED_CHECKS = 'failed_checks' # Constants to extend status strings in neutron.plugins.common.constants ONLINE = 'ONLINE' OFFLINE = 'OFFLINE' DEGRADED = 'DEGRADED' DISABLED = 'DISABLED' NO_MONITOR = 'NO_MONITOR' OPERATING_STATUSES = (ONLINE, OFFLINE, DEGRADED, DISABLED, NO_MONITOR) NO_CHECK = 'no check' # LBaaS V2 Agent Constants LBAAS_AGENT_SCHEDULER_V2_EXT_ALIAS = 'lbaas_agent_schedulerv2' AGENT_TYPE_LOADBALANCERV2 = 'Loadbalancerv2 agent' LOADBALANCER_PLUGINV2 = 'n-lbaasv2-plugin' LOADBALANCER_AGENTV2 = 'n-lbaasv2_agent' LOADBALANCER = "LOADBALANCER" LOADBALANCERV2 = "LOADBALANCERV2" # Used to check number of connections per second allowed # for the LBaaS V1 vip and LBaaS V2 listeners. -1 indicates # no limit, the value cannot be less than -1. MIN_CONNECT_VALUE = -1 # LBaas V2 Table entities LISTENER_EVENT = 'listener' LISTENER_STATS_EVENT = 'listener_stats' LOADBALANCER_EVENT = 'loadbalancer' LOADBALANCER_STATS_EVENT = 'loadbalancer_stats' MEMBER_EVENT = 'member' OPERATING_STATUS = 'operating_status' POOL_EVENT = 'pool' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/lbaas/lb_data_models.py0000644000175000017500000010613000000000000027722 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This module holds the data models for the load balancer service plugin. These are meant simply as replacement data structures for dictionaries and SQLAlchemy models. Using dictionaries as data containers for many components causes readability issues and does not intuitively give the benefits of what classes and OO give. Using SQLAlchemy models as data containers for many components can become an issue if you do not want to give certain components access to the database. These data models do provide methods for instantiation from SQLAlchemy models and also converting to dictionaries. """ from neutron.db.models import servicetype as servicetype_db from neutron.db import models_v2 from neutron_lib.db import model_base import six from sqlalchemy.ext import orderinglist from sqlalchemy.orm import collections from vmware_nsx.tests.unit.services.lbaas import lb_db_models as models L7_POLICY_ACTION_REDIRECT_TO_POOL = 'REDIRECT_TO_POOL' HEALTH_MONITOR_PING = 'PING' HEALTH_MONITOR_TCP = 'TCP' class BaseDataModel(object): # NOTE(ihrachys): we could reuse the list to provide a default __init__ # implementation. That would require handling custom default values though. fields = [] def to_dict(self, **kwargs): ret = {} for attr in self.__dict__: if attr.startswith('_') or not kwargs.get(attr, True): continue value = self.__dict__[attr] if isinstance(getattr(self, attr), list): ret[attr] = [] for item in value: if isinstance(item, BaseDataModel): ret[attr].append(item.to_dict()) else: ret[attr] = item elif isinstance(getattr(self, attr), BaseDataModel): ret[attr] = value.to_dict() elif six.PY2 and isinstance(value, six.text_type): ret[attr.encode('utf8')] = value.encode('utf8') else: ret[attr] = value return ret def to_api_dict(self, **kwargs): return {} @classmethod def from_dict(cls, model_dict): fields = {k: v for k, v in model_dict.items() if k in cls.fields} return cls(**fields) @classmethod def from_sqlalchemy_model(cls, sa_model, calling_classes=None): calling_classes = calling_classes or [] attr_mapping = vars(cls).get("attr_mapping") instance = cls() for attr_name in cls.fields: if attr_name.startswith('_'): continue if attr_mapping and attr_name in attr_mapping.keys(): attr = getattr(sa_model, attr_mapping[attr_name]) elif hasattr(sa_model, attr_name): attr = getattr(sa_model, attr_name) else: continue # Handles M:1 or 1:1 relationships if isinstance(attr, model_base.BASEV2): if hasattr(instance, attr_name): data_class = SA_MODEL_TO_DATA_MODEL_MAP[attr.__class__] # Don't recurse down object classes too far. If we have # seen the same object class more than twice, we are # probably in a loop. if data_class and calling_classes.count(data_class) < 2: setattr(instance, attr_name, data_class.from_sqlalchemy_model( attr, calling_classes=calling_classes + [cls])) # Handles 1:M or N:M relationships elif (isinstance(attr, collections.InstrumentedList) or isinstance(attr, orderinglist.OrderingList)): for item in attr: if hasattr(instance, attr_name): data_class = SA_MODEL_TO_DATA_MODEL_MAP[item.__class__] # Don't recurse down object classes too far. If we have # seen the same object class more than twice, we are # probably in a loop. if (data_class and calling_classes.count(data_class) < 2): attr_list = getattr(instance, attr_name) or [] attr_list.append(data_class.from_sqlalchemy_model( item, calling_classes=calling_classes + [cls])) setattr(instance, attr_name, attr_list) # This isn't a relationship so it must be a "primitive" else: setattr(instance, attr_name, attr) return instance @property def root_loadbalancer(self): """Returns the loadbalancer this instance is attached to.""" if isinstance(self, LoadBalancer): lb = self elif isinstance(self, Listener): lb = self.loadbalancer elif isinstance(self, L7Policy): lb = self.listener.loadbalancer elif isinstance(self, L7Rule): lb = self.policy.listener.loadbalancer elif isinstance(self, Pool): lb = self.loadbalancer elif isinstance(self, SNI): lb = self.listener.loadbalancer else: # Pool Member or Health Monitor lb = self.pool.loadbalancer return lb # NOTE(brandon-logan) AllocationPool, HostRoute, Subnet, IPAllocation, Port, # and ProviderResourceAssociation are defined here because there aren't any # data_models defined in core neutron or neutron services. Instead of jumping # through the hoops to create those I've just defined them here. If ever # data_models or similar are defined in those packages, those should be used # instead of these. class AllocationPool(BaseDataModel): fields = ['start', 'end'] def __init__(self, start=None, end=None): self.start = start self.end = end class HostRoute(BaseDataModel): fields = ['destination', 'nexthop'] def __init__(self, destination=None, nexthop=None): self.destination = destination self.nexthop = nexthop class Network(BaseDataModel): fields = ['id', 'name', 'description', 'mtu'] def __init__(self, id=None, name=None, description=None, mtu=None): self.id = id self.name = name self.description = description self.mtu = mtu class Subnet(BaseDataModel): fields = ['id', 'name', 'tenant_id', 'network_id', 'ip_version', 'cidr', 'gateway_ip', 'enable_dhcp', 'ipv6_ra_mode', 'ipv6_address_mode', 'shared', 'dns_nameservers', 'host_routes', 'allocation_pools', 'subnetpool_id'] def __init__(self, id=None, name=None, tenant_id=None, network_id=None, ip_version=None, cidr=None, gateway_ip=None, enable_dhcp=None, ipv6_ra_mode=None, ipv6_address_mode=None, shared=None, dns_nameservers=None, host_routes=None, allocation_pools=None, subnetpool_id=None): self.id = id self.name = name self.tenant_id = tenant_id self.network_id = network_id self.ip_version = ip_version self.cidr = cidr self.gateway_ip = gateway_ip self.enable_dhcp = enable_dhcp self.ipv6_ra_mode = ipv6_ra_mode self.ipv6_address_mode = ipv6_address_mode self.shared = shared self.dns_nameservers = dns_nameservers self.host_routes = host_routes self.allocation_pools = allocation_pools self.subnetpool_id = subnetpool_id @classmethod def from_dict(cls, model_dict): host_routes = model_dict.pop('host_routes', []) allocation_pools = model_dict.pop('allocation_pools', []) model_dict['host_routes'] = [HostRoute.from_dict(route) for route in host_routes] model_dict['allocation_pools'] = [AllocationPool.from_dict(ap) for ap in allocation_pools] return super(Subnet, cls).from_dict(model_dict) class IPAllocation(BaseDataModel): fields = ['port_id', 'ip_address', 'subnet_id', 'network_id'] def __init__(self, port_id=None, ip_address=None, subnet_id=None, network_id=None): self.port_id = port_id self.ip_address = ip_address self.subnet_id = subnet_id self.network_id = network_id @classmethod def from_dict(cls, model_dict): subnet = model_dict.pop('subnet', None) # TODO(blogan): add subnet to __init__. Can't do it yet because it # causes issues with converting SA models into data models. instance = super(IPAllocation, cls).from_dict(model_dict) setattr(instance, 'subnet', None) if subnet: setattr(instance, 'subnet', Subnet.from_dict(subnet)) return instance class Port(BaseDataModel): fields = ['id', 'tenant_id', 'name', 'network_id', 'mac_address', 'admin_state_up', 'status', 'device_id', 'device_owner', 'fixed_ips', 'network'] def __init__(self, id=None, tenant_id=None, name=None, network_id=None, mac_address=None, admin_state_up=None, status=None, device_id=None, device_owner=None, fixed_ips=None, network=None): self.id = id self.tenant_id = tenant_id self.name = name self.network_id = network_id self.mac_address = mac_address self.admin_state_up = admin_state_up self.status = status self.device_id = device_id self.device_owner = device_owner self.fixed_ips = fixed_ips or [] self.network = network @classmethod def from_dict(cls, model_dict): fixed_ips = model_dict.pop('fixed_ips', []) model_dict['fixed_ips'] = [IPAllocation.from_dict(fixed_ip) for fixed_ip in fixed_ips] if model_dict.get('network'): network_dict = model_dict.pop('network') model_dict['network'] = Network.from_dict(network_dict) return super(Port, cls).from_dict(model_dict) class ProviderResourceAssociation(BaseDataModel): fields = ['provider_name', 'resource_id'] def __init__(self, provider_name=None, resource_id=None): self.provider_name = provider_name self.resource_id = resource_id @classmethod def from_dict(cls, model_dict): device_driver = model_dict.pop('device_driver', None) instance = super(ProviderResourceAssociation, cls).from_dict( model_dict) setattr(instance, 'device_driver', device_driver) return instance class SessionPersistence(BaseDataModel): fields = ['pool_id', 'type', 'cookie_name', 'pool'] def __init__(self, pool_id=None, type=None, cookie_name=None, pool=None): self.pool_id = pool_id self.type = type self.cookie_name = cookie_name self.pool = pool def to_api_dict(self): return super(SessionPersistence, self).to_dict(pool=False, pool_id=False) @classmethod def from_dict(cls, model_dict): pool = model_dict.pop('pool', None) if pool: model_dict['pool'] = Pool.from_dict( pool) return super(SessionPersistence, cls).from_dict(model_dict) class LoadBalancerStatistics(BaseDataModel): fields = ['loadbalancer_id', 'bytes_in', 'bytes_out', 'active_connections', 'total_connections', 'loadbalancer'] def __init__(self, loadbalancer_id=None, bytes_in=None, bytes_out=None, active_connections=None, total_connections=None, loadbalancer=None): self.loadbalancer_id = loadbalancer_id self.bytes_in = bytes_in self.bytes_out = bytes_out self.active_connections = active_connections self.total_connections = total_connections self.loadbalancer = loadbalancer def to_api_dict(self): return super(LoadBalancerStatistics, self).to_dict( loadbalancer_id=False, loadbalancer=False) class HealthMonitor(BaseDataModel): fields = ['id', 'tenant_id', 'type', 'delay', 'timeout', 'max_retries', 'http_method', 'url_path', 'expected_codes', 'provisioning_status', 'admin_state_up', 'pool', 'name', 'max_retries_down'] def __init__(self, id=None, tenant_id=None, type=None, delay=None, timeout=None, max_retries=None, http_method=None, url_path=None, expected_codes=None, provisioning_status=None, admin_state_up=None, pool=None, name=None, max_retries_down=None): self.id = id self.tenant_id = tenant_id self.type = type self.delay = delay self.timeout = timeout self.max_retries = max_retries self.http_method = http_method self.url_path = url_path self.expected_codes = expected_codes self.provisioning_status = provisioning_status self.admin_state_up = admin_state_up self.pool = pool self.name = name self.max_retries_down = max_retries_down def attached_to_loadbalancer(self): return bool(self.pool and self.pool.loadbalancer) def to_api_dict(self): ret_dict = super(HealthMonitor, self).to_dict( provisioning_status=False, pool=False) ret_dict['pools'] = [] if self.pool: ret_dict['pools'].append({'id': self.pool.id}) if self.type in [HEALTH_MONITOR_TCP, HEALTH_MONITOR_PING]: ret_dict.pop('http_method') ret_dict.pop('url_path') ret_dict.pop('expected_codes') return ret_dict @classmethod def from_dict(cls, model_dict): pool = model_dict.pop('pool', None) if pool: model_dict['pool'] = Pool.from_dict( pool) return super(HealthMonitor, cls).from_dict(model_dict) class Pool(BaseDataModel): fields = ['id', 'tenant_id', 'name', 'description', 'healthmonitor_id', 'protocol', 'lb_algorithm', 'admin_state_up', 'operating_status', 'provisioning_status', 'members', 'healthmonitor', 'session_persistence', 'loadbalancer_id', 'loadbalancer', 'listener', 'listeners', 'l7_policies'] # Map deprecated attribute names to new ones. attr_mapping = {'sessionpersistence': 'session_persistence'} def __init__(self, id=None, tenant_id=None, name=None, description=None, healthmonitor_id=None, protocol=None, lb_algorithm=None, admin_state_up=None, operating_status=None, provisioning_status=None, members=None, healthmonitor=None, session_persistence=None, loadbalancer_id=None, loadbalancer=None, listener=None, listeners=None, l7_policies=None): self.id = id self.tenant_id = tenant_id self.name = name self.description = description self.healthmonitor_id = healthmonitor_id self.protocol = protocol self.lb_algorithm = lb_algorithm self.admin_state_up = admin_state_up self.operating_status = operating_status self.provisioning_status = provisioning_status self.members = members or [] self.healthmonitor = healthmonitor self.session_persistence = session_persistence # NOTE(eezhova): Old attribute name is kept for backwards # compatibility with out-of-tree drivers. self.sessionpersistence = self.session_persistence self.loadbalancer_id = loadbalancer_id self.loadbalancer = loadbalancer self.listener = listener self.listeners = listeners or [] self.l7_policies = l7_policies or [] def attached_to_loadbalancer(self): return bool(self.loadbalancer) def to_api_dict(self): ret_dict = super(Pool, self).to_dict( provisioning_status=False, operating_status=False, healthmonitor=False, session_persistence=False, loadbalancer_id=False, loadbalancer=False, listener_id=False) ret_dict['loadbalancers'] = [] if self.loadbalancer: ret_dict['loadbalancers'].append({'id': self.loadbalancer.id}) ret_dict['session_persistence'] = None if self.session_persistence: ret_dict['session_persistence'] = ( self.session_persistence.to_api_dict()) ret_dict['members'] = [{'id': member.id} for member in self.members] ret_dict['listeners'] = [{'id': listener.id} for listener in self.listeners] if self.listener: ret_dict['listener_id'] = self.listener.id else: ret_dict['listener_id'] = None ret_dict['l7_policies'] = [{'id': l7_policy.id} for l7_policy in self.l7_policies] return ret_dict @classmethod def from_dict(cls, model_dict): healthmonitor = model_dict.pop('healthmonitor', None) session_persistence = model_dict.pop('session_persistence', None) model_dict.pop('sessionpersistence', None) loadbalancer = model_dict.pop('loadbalancer', None) members = model_dict.pop('members', []) model_dict['members'] = [Member.from_dict(member) for member in members] listeners = model_dict.pop('listeners', []) model_dict['listeners'] = [Listener.from_dict(listener) for listener in listeners] l7_policies = model_dict.pop('l7_policies', []) model_dict['l7_policies'] = [L7Policy.from_dict(policy) for policy in l7_policies] if healthmonitor: model_dict['healthmonitor'] = HealthMonitor.from_dict( healthmonitor) if session_persistence: model_dict['session_persistence'] = SessionPersistence.from_dict( session_persistence) if loadbalancer: model_dict['loadbalancer'] = LoadBalancer.from_dict(loadbalancer) return super(Pool, cls).from_dict(model_dict) class Member(BaseDataModel): fields = ['id', 'tenant_id', 'pool_id', 'address', 'protocol_port', 'weight', 'admin_state_up', 'subnet_id', 'operating_status', 'provisioning_status', 'pool', 'name'] def __init__(self, id=None, tenant_id=None, pool_id=None, address=None, protocol_port=None, weight=None, admin_state_up=None, subnet_id=None, operating_status=None, provisioning_status=None, pool=None, name=None): self.id = id self.tenant_id = tenant_id self.pool_id = pool_id self.address = address self.protocol_port = protocol_port self.weight = weight self.admin_state_up = admin_state_up self.subnet_id = subnet_id self.operating_status = operating_status self.provisioning_status = provisioning_status self.pool = pool self.name = name def attached_to_loadbalancer(self): return bool(self.pool and self.pool.loadbalancer) def to_api_dict(self): return super(Member, self).to_dict( provisioning_status=False, operating_status=False, pool=False) @classmethod def from_dict(cls, model_dict): pool = model_dict.pop('pool', None) if pool: model_dict['pool'] = Pool.from_dict( pool) return super(Member, cls).from_dict(model_dict) class SNI(BaseDataModel): fields = ['listener_id', 'tls_container_id', 'position', 'listener'] def __init__(self, listener_id=None, tls_container_id=None, position=None, listener=None): self.listener_id = listener_id self.tls_container_id = tls_container_id self.position = position self.listener = listener def attached_to_loadbalancer(self): return bool(self.listener and self.listener.loadbalancer) def to_api_dict(self): return super(SNI, self).to_dict(listener=False) class TLSContainer(BaseDataModel): fields = ['id', 'certificate', 'private_key', 'passphrase', 'intermediates', 'primary_cn'] def __init__(self, id=None, certificate=None, private_key=None, passphrase=None, intermediates=None, primary_cn=None): self.id = id self.certificate = certificate self.private_key = private_key self.passphrase = passphrase self.intermediates = intermediates self.primary_cn = primary_cn class L7Rule(BaseDataModel): fields = ['id', 'tenant_id', 'l7policy_id', 'type', 'compare_type', 'invert', 'key', 'value', 'provisioning_status', 'admin_state_up', 'policy'] def __init__(self, id=None, tenant_id=None, l7policy_id=None, type=None, compare_type=None, invert=None, key=None, value=None, provisioning_status=None, admin_state_up=None, policy=None): self.id = id self.tenant_id = tenant_id self.l7policy_id = l7policy_id self.type = type self.compare_type = compare_type self.invert = invert self.key = key self.value = value self.provisioning_status = provisioning_status self.admin_state_up = admin_state_up self.policy = policy def attached_to_loadbalancer(self): return bool(self.policy.listener.loadbalancer) def to_api_dict(self): ret_dict = super(L7Rule, self).to_dict( provisioning_status=False, policy=False, l7policy_id=False) ret_dict['policies'] = [] if self.policy: ret_dict['policies'].append({'id': self.policy.id}) return ret_dict @classmethod def from_dict(cls, model_dict): policy = model_dict.pop('policy', None) if policy: model_dict['policy'] = L7Policy.from_dict(policy) return super(L7Rule, cls).from_dict(model_dict) class L7Policy(BaseDataModel): fields = ['id', 'tenant_id', 'name', 'description', 'listener_id', 'action', 'redirect_pool_id', 'redirect_url', 'position', 'admin_state_up', 'provisioning_status', 'listener', 'rules', 'redirect_pool'] def __init__(self, id=None, tenant_id=None, name=None, description=None, listener_id=None, action=None, redirect_pool_id=None, redirect_url=None, position=None, admin_state_up=None, provisioning_status=None, listener=None, rules=None, redirect_pool=None): self.id = id self.tenant_id = tenant_id self.name = name self.description = description self.listener_id = listener_id self.action = action self.redirect_pool_id = redirect_pool_id self.redirect_pool = redirect_pool self.redirect_url = redirect_url self.position = position self.admin_state_up = admin_state_up self.provisioning_status = provisioning_status self.listener = listener self.rules = rules or [] def attached_to_loadbalancer(self): return bool(self.listener.loadbalancer) def to_api_dict(self): ret_dict = super(L7Policy, self).to_dict( listener=False, listener_id=True, provisioning_status=False, redirect_pool=False) ret_dict['listeners'] = [] if self.listener: ret_dict['listeners'].append({'id': self.listener.id}) ret_dict['rules'] = [{'id': rule.id} for rule in self.rules] if ret_dict.get('action') == L7_POLICY_ACTION_REDIRECT_TO_POOL: del ret_dict['redirect_url'] return ret_dict @classmethod def from_dict(cls, model_dict): listener = model_dict.pop('listener', None) redirect_pool = model_dict.pop('redirect_pool', None) rules = model_dict.pop('rules', []) if listener: model_dict['listener'] = Listener.from_dict(listener) if redirect_pool: model_dict['redirect_pool'] = Pool.from_dict(redirect_pool) model_dict['rules'] = [L7Rule.from_dict(rule) for rule in rules] return super(L7Policy, cls).from_dict(model_dict) class Listener(BaseDataModel): fields = ['id', 'tenant_id', 'name', 'description', 'default_pool_id', 'loadbalancer_id', 'protocol', 'default_tls_container_id', 'sni_containers', 'protocol_port', 'connection_limit', 'admin_state_up', 'provisioning_status', 'operating_status', 'default_pool', 'loadbalancer', 'l7_policies'] def __init__(self, id=None, tenant_id=None, name=None, description=None, default_pool_id=None, loadbalancer_id=None, protocol=None, default_tls_container_id=None, sni_containers=None, protocol_port=None, connection_limit=None, admin_state_up=None, provisioning_status=None, operating_status=None, default_pool=None, loadbalancer=None, l7_policies=None): self.id = id self.tenant_id = tenant_id self.name = name self.description = description self.default_pool_id = default_pool_id self.loadbalancer_id = loadbalancer_id self.protocol = protocol self.default_tls_container_id = default_tls_container_id self.sni_containers = sni_containers or [] self.protocol_port = protocol_port self.connection_limit = connection_limit self.admin_state_up = admin_state_up self.operating_status = operating_status self.provisioning_status = provisioning_status self.default_pool = default_pool self.loadbalancer = loadbalancer self.l7_policies = l7_policies or [] def attached_to_loadbalancer(self): return bool(self.loadbalancer) def to_api_dict(self): ret_dict = super(Listener, self).to_dict( loadbalancer=False, loadbalancer_id=False, default_pool=False, operating_status=False, provisioning_status=False, sni_containers=False, default_tls_container=False) # NOTE(blogan): Returning a list to future proof for M:N objects # that are not yet implemented. ret_dict['loadbalancers'] = [] if self.loadbalancer: ret_dict['loadbalancers'].append({'id': self.loadbalancer.id}) ret_dict['sni_container_refs'] = [container.tls_container_id for container in self.sni_containers] ret_dict['default_tls_container_ref'] = self.default_tls_container_id del ret_dict['l7_policies'] ret_dict['l7policies'] = [{'id': l7_policy.id} for l7_policy in self.l7_policies] return ret_dict @classmethod def from_dict(cls, model_dict): default_pool = model_dict.pop('default_pool', None) loadbalancer = model_dict.pop('loadbalancer', None) sni_containers = model_dict.pop('sni_containers', []) model_dict['sni_containers'] = [SNI.from_dict(sni) for sni in sni_containers] l7_policies = model_dict.pop('l7_policies', []) if default_pool: model_dict['default_pool'] = Pool.from_dict(default_pool) if loadbalancer: model_dict['loadbalancer'] = LoadBalancer.from_dict(loadbalancer) model_dict['l7_policies'] = [L7Policy.from_dict(policy) for policy in l7_policies] return super(Listener, cls).from_dict(model_dict) class LoadBalancer(BaseDataModel): fields = ['id', 'tenant_id', 'name', 'description', 'vip_subnet_id', 'vip_port_id', 'vip_address', 'provisioning_status', 'operating_status', 'admin_state_up', 'vip_port', 'stats', 'provider', 'listeners', 'pools', 'flavor_id'] def __init__(self, id=None, tenant_id=None, name=None, description=None, vip_subnet_id=None, vip_port_id=None, vip_address=None, provisioning_status=None, operating_status=None, admin_state_up=None, vip_port=None, stats=None, provider=None, listeners=None, pools=None, flavor_id=None): self.id = id self.tenant_id = tenant_id self.name = name self.description = description self.vip_subnet_id = vip_subnet_id self.vip_port_id = vip_port_id self.vip_address = vip_address self.operating_status = operating_status self.provisioning_status = provisioning_status self.admin_state_up = admin_state_up self.vip_port = vip_port self.stats = stats self.provider = provider self.listeners = listeners or [] self.flavor_id = flavor_id self.pools = pools or [] def attached_to_loadbalancer(self): return True def _construct_full_graph_api_dict(self): api_listeners = [] for listener in self.listeners: api_listener = listener.to_api_dict() del api_listener['loadbalancers'] del api_listener['default_pool_id'] if listener.default_pool: api_pool = listener.default_pool.to_api_dict() del api_pool['listeners'] del api_pool['listener'] del api_pool['listener_id'] del api_pool['healthmonitor_id'] del api_pool['loadbalancers'] del api_pool['l7_policies'] del api_pool['sessionpersistence'] if listener.default_pool.healthmonitor: api_hm = listener.default_pool.healthmonitor.to_api_dict() del api_hm['pools'] api_pool['healthmonitor'] = api_hm api_pool['members'] = [] for member in listener.default_pool.members: api_member = member.to_api_dict() del api_member['pool_id'] api_pool['members'].append(api_member) api_listener['default_pool'] = api_pool if listener.l7_policies and len(listener.l7_policies) > 0: api_l7policies = [] for l7policy in listener.l7_policies: api_l7policy = l7policy.to_api_dict() del api_l7policy['redirect_pool_id'] del api_l7policy['listeners'] if l7policy.rules and len(l7policy.rules) > 0: api_l7rules = [] for l7rule in l7policy.rules: api_l7rule = l7rule.to_api_dict() del api_l7rule['policies'] api_l7rules.append(api_l7rule) api_l7policy['rules'] = api_l7rules if l7policy.redirect_pool: api_r_pool = l7policy.redirect_pool.to_api_dict() if l7policy.redirect_pool.healthmonitor: api_r_hm = (l7policy.redirect_pool.healthmonitor. to_api_dict()) del api_r_hm['pools'] api_r_pool['healthmonitor'] = api_r_hm api_r_pool['members'] = [] for r_member in l7policy.redirect_pool.members: api_r_member = r_member.to_api_dict() del api_r_member['pool_id'] api_r_pool['members'].append(api_r_member) del api_r_pool['listeners'] del api_r_pool['listener'] del api_r_pool['listener_id'] del api_r_pool['healthmonitor_id'] del api_r_pool['loadbalancers'] del api_r_pool['l7_policies'] del api_r_pool['sessionpersistence'] api_l7policy['redirect_pool'] = api_r_pool api_l7policies.append(api_l7policy) api_listener['l7policies'] = api_l7policies api_listeners.append(api_listener) return api_listeners def to_api_dict(self, full_graph=False): ret_dict = super(LoadBalancer, self).to_dict( vip_port=False, stats=False, listeners=False) if full_graph: ret_dict['listeners'] = self._construct_full_graph_api_dict() del ret_dict['pools'] else: ret_dict['listeners'] = [{'id': listener.id} for listener in self.listeners] ret_dict['pools'] = [{'id': pool.id} for pool in self.pools] if self.provider: ret_dict['provider'] = self.provider.provider_name if not self.flavor_id: del ret_dict['flavor_id'] return ret_dict @classmethod def from_dict(cls, model_dict): listeners = model_dict.pop('listeners', []) pools = model_dict.pop('pools', []) vip_port = model_dict.pop('vip_port', None) provider = model_dict.pop('provider', None) model_dict.pop('stats', None) model_dict['listeners'] = [Listener.from_dict(listener) for listener in listeners] model_dict['pools'] = [Pool.from_dict(pool) for pool in pools] if vip_port: model_dict['vip_port'] = Port.from_dict(vip_port) if provider: model_dict['provider'] = ProviderResourceAssociation.from_dict( provider) return super(LoadBalancer, cls).from_dict(model_dict) SA_MODEL_TO_DATA_MODEL_MAP = { models.LoadBalancer: LoadBalancer, models.HealthMonitorV2: HealthMonitor, models.Listener: Listener, models.SNI: SNI, models.L7Rule: L7Rule, models.L7Policy: L7Policy, models.PoolV2: Pool, models.MemberV2: Member, models.LoadBalancerStatistics: LoadBalancerStatistics, models.SessionPersistenceV2: SessionPersistence, models_v2.IPAllocation: IPAllocation, models_v2.Port: Port, servicetype_db.ProviderResourceAssociation: ProviderResourceAssociation } DATA_MODEL_TO_SA_MODEL_MAP = { LoadBalancer: models.LoadBalancer, HealthMonitor: models.HealthMonitorV2, Listener: models.Listener, SNI: models.SNI, L7Rule: models.L7Rule, L7Policy: models.L7Policy, Pool: models.PoolV2, Member: models.MemberV2, LoadBalancerStatistics: models.LoadBalancerStatistics, SessionPersistence: models.SessionPersistenceV2, IPAllocation: models_v2.IPAllocation, Port: models_v2.Port, ProviderResourceAssociation: servicetype_db.ProviderResourceAssociation } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/lbaas/lb_db_models.py0000644000175000017500000005163500000000000027407 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from neutron.db.models import servicetype as st_db from neutron.db import models_v2 from neutron_lib.db import constants as db_const from neutron_lib.db import model_base import sqlalchemy as sa from sqlalchemy.ext import orderinglist from sqlalchemy import orm from vmware_nsx.tests.unit.services.lbaas import lb_constants as lb_const class SessionPersistenceV2(model_base.BASEV2): NAME = 'session_persistence' __tablename__ = "lbaas_sessionpersistences" pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"), primary_key=True, nullable=False) type = sa.Column(sa.Enum(*lb_const.SUPPORTED_SP_TYPES, name="lbaas_sesssionpersistences_typev2"), nullable=False) cookie_name = sa.Column(sa.String(1024), nullable=True) class LoadBalancerStatistics(model_base.BASEV2): """Represents load balancer statistics.""" NAME = 'loadbalancer_stats' __tablename__ = "lbaas_loadbalancer_statistics" loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_loadbalancers.id"), primary_key=True, nullable=False) bytes_in = sa.Column(sa.BigInteger, nullable=False) bytes_out = sa.Column(sa.BigInteger, nullable=False) active_connections = sa.Column(sa.BigInteger, nullable=False) total_connections = sa.Column(sa.BigInteger, nullable=False) @orm.validates('bytes_in', 'bytes_out', 'active_connections', 'total_connections') def validate_non_negative_int(self, key, value): if value < 0: data = {'key': key, 'value': value} raise ValueError('The %(key)s field can not have ' 'negative value. ' 'Current value is %(value)d.' % data) return value class MemberV2(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a v2 neutron load balancer member.""" NAME = 'member' __tablename__ = "lbaas_members" __table_args__ = ( sa.schema.UniqueConstraint('pool_id', 'address', 'protocol_port', name='uniq_pool_address_port_v2'), ) pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"), nullable=False) address = sa.Column(sa.String(64), nullable=False) protocol_port = sa.Column(sa.Integer, nullable=False) weight = sa.Column(sa.Integer, nullable=True) admin_state_up = sa.Column(sa.Boolean(), nullable=False) subnet_id = sa.Column(sa.String(36), nullable=True) provisioning_status = sa.Column(sa.String(16), nullable=False) operating_status = sa.Column(sa.String(16), nullable=False) name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE), nullable=True) @property def root_loadbalancer(self): return self.pool.loadbalancer @property def to_api_dict(self): def to_dict(sa_model, attributes): ret = {} for attr in attributes: value = getattr(sa_model, attr) if six.PY2 and isinstance(value, six.text_type): ret[attr.encode('utf8')] = value.encode('utf8') else: ret[attr] = value return ret ret_dict = to_dict(self, [ 'id', 'tenant_id', 'pool_id', 'address', 'protocol_port', 'weight', 'admin_state_up', 'subnet_id', 'name']) return ret_dict class HealthMonitorV2(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a v2 neutron load balancer healthmonitor.""" NAME = 'healthmonitor' __tablename__ = "lbaas_healthmonitors" type = sa.Column(sa.Enum(*lb_const.SUPPORTED_HEALTH_MONITOR_TYPES, name="healthmonitors_typev2"), nullable=False) delay = sa.Column(sa.Integer, nullable=False) timeout = sa.Column(sa.Integer, nullable=False) max_retries = sa.Column(sa.Integer, nullable=False) http_method = sa.Column(sa.String(16), nullable=True) url_path = sa.Column(sa.String(255), nullable=True) expected_codes = sa.Column(sa.String(64), nullable=True) provisioning_status = sa.Column(sa.String(16), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) name = sa.Column(sa.String(db_const.NAME_FIELD_SIZE), nullable=True) max_retries_down = sa.Column(sa.Integer, nullable=True) @property def root_loadbalancer(self): return self.pool.loadbalancer @property def to_api_dict(self): def to_dict(sa_model, attributes): ret = {} for attr in attributes: value = getattr(sa_model, attr) if six.PY2 and isinstance(value, six.text_type): ret[attr.encode('utf8')] = value.encode('utf8') else: ret[attr] = value return ret ret_dict = to_dict(self, [ 'id', 'tenant_id', 'type', 'delay', 'timeout', 'max_retries', 'http_method', 'url_path', 'expected_codes', 'admin_state_up', 'name', 'max_retries_down']) ret_dict['pools'] = [] if self.pool: ret_dict['pools'].append({'id': self.pool.id}) if self.type in [lb_const.HEALTH_MONITOR_TCP, lb_const.HEALTH_MONITOR_PING]: ret_dict.pop('http_method') ret_dict.pop('url_path') ret_dict.pop('expected_codes') return ret_dict class LoadBalancer(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a v2 neutron load balancer.""" NAME = 'loadbalancer' __tablename__ = "lbaas_loadbalancers" name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) vip_subnet_id = sa.Column(sa.String(36), nullable=False) vip_port_id = sa.Column(sa.String(36), sa.ForeignKey( 'ports.id', name='fk_lbaas_loadbalancers_ports_id')) vip_address = sa.Column(sa.String(36)) provisioning_status = sa.Column(sa.String(16), nullable=False) operating_status = sa.Column(sa.String(16), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) vip_port = orm.relationship(models_v2.Port) stats = orm.relationship( LoadBalancerStatistics, uselist=False, backref=orm.backref("loadbalancer", uselist=False), cascade="all, delete-orphan") provider = orm.relationship( st_db.ProviderResourceAssociation, uselist=False, primaryjoin="LoadBalancer.id==ProviderResourceAssociation.resource_id", foreign_keys=[st_db.ProviderResourceAssociation.resource_id], # NOTE(ihrachys) it's not exactly clear why we would need to have the # backref created (and not e.g. just back_populates= link) since we # don't use the reverse property anywhere, but it helps with # accommodating to the new neutron code that automatically detects # obsolete foreign key state and expires affected relationships. The # code is located in neutron/db/api.py and assumes all relationships # should have backrefs. backref='loadbalancer', # this is only for old API backwards compatibility because when a load # balancer is deleted the pool ID should be the same as the load # balancer ID and should not be cleared out in this table viewonly=True) flavor_id = sa.Column(sa.String(36), sa.ForeignKey( 'flavors.id', name='fk_lbaas_loadbalancers_flavors_id')) @property def root_loadbalancer(self): return self @property def to_api_dict(self): def to_dict(sa_model, attributes): ret = {} for attr in attributes: value = getattr(sa_model, attr) if six.PY2 and isinstance(value, six.text_type): ret[attr.encode('utf8')] = value.encode('utf8') else: ret[attr] = value return ret ret_dict = to_dict(self, [ 'id', 'tenant_id', 'name', 'description', 'vip_subnet_id', 'vip_port_id', 'vip_address', 'operating_status', 'provisioning_status', 'admin_state_up', 'flavor_id']) ret_dict['listeners'] = [{'id': listener.id} for listener in self.listeners] ret_dict['pools'] = [{'id': pool.id} for pool in self.pools] if self.provider: ret_dict['provider'] = self.provider.provider_name if not self.flavor_id: del ret_dict['flavor_id'] return ret_dict class PoolV2(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a v2 neutron load balancer pool.""" NAME = 'pool' __tablename__ = "lbaas_pools" name = sa.Column(sa.String(255), nullable=True) description = sa.Column(sa.String(255), nullable=True) loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey( "lbaas_loadbalancers.id")) healthmonitor_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_healthmonitors.id"), unique=True, nullable=True) protocol = sa.Column(sa.Enum(*lb_const.POOL_SUPPORTED_PROTOCOLS, name="pool_protocolsv2"), nullable=False) lb_algorithm = sa.Column(sa.Enum(*lb_const.SUPPORTED_LB_ALGORITHMS, name="lb_algorithmsv2"), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) provisioning_status = sa.Column(sa.String(16), nullable=False) operating_status = sa.Column(sa.String(16), nullable=False) members = orm.relationship(MemberV2, backref=orm.backref("pool", uselist=False), cascade="all, delete-orphan") healthmonitor = orm.relationship( HealthMonitorV2, backref=orm.backref("pool", uselist=False)) session_persistence = orm.relationship( SessionPersistenceV2, uselist=False, backref=orm.backref("pool", uselist=False), cascade="all, delete-orphan") loadbalancer = orm.relationship( LoadBalancer, uselist=False, backref=orm.backref("pools", uselist=True)) @property def root_loadbalancer(self): return self.loadbalancer # No real relationship here. But we want to fake a pool having a # 'listener_id' sometimes for API back-ward compatibility purposes. @property def listener(self): if self.listeners: return self.listeners[0] else: return None @property def to_api_dict(self): def to_dict(sa_model, attributes): ret = {} for attr in attributes: value = getattr(sa_model, attr) if six.PY2 and isinstance(value, six.text_type): ret[attr.encode('utf8')] = value.encode('utf8') else: ret[attr] = value return ret ret_dict = to_dict(self, [ 'id', 'tenant_id', 'name', 'description', 'healthmonitor_id', 'protocol', 'lb_algorithm', 'admin_state_up']) ret_dict['loadbalancers'] = [] if self.loadbalancer: ret_dict['loadbalancers'].append({'id': self.loadbalancer.id}) ret_dict['session_persistence'] = None if self.session_persistence: ret_dict['session_persistence'] = ( to_dict(self.session_persistence, [ 'type', 'cookie_name'])) ret_dict['members'] = [{'id': member.id} for member in self.members] ret_dict['listeners'] = [{'id': listener.id} for listener in self.listeners] if self.listener: ret_dict['listener_id'] = self.listener.id else: ret_dict['listener_id'] = None ret_dict['l7_policies'] = [{'id': l7_policy.id} for l7_policy in self.l7_policies] return ret_dict class SNI(model_base.BASEV2): """Many-to-many association between Listener and TLS container ids Making the SNI certificates list, ordered using the position """ NAME = 'sni' __tablename__ = "lbaas_sni" listener_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_listeners.id"), primary_key=True, nullable=False) tls_container_id = sa.Column(sa.String(128), primary_key=True, nullable=False) position = sa.Column(sa.Integer) @property def root_loadbalancer(self): return self.listener.loadbalancer class L7Rule(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents L7 Rule.""" NAME = 'l7rule' __tablename__ = "lbaas_l7rules" l7policy_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_l7policies.id"), nullable=False) type = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_RULE_TYPES, name="l7rule_typesv2"), nullable=False) compare_type = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_RULE_COMPARE_TYPES, name="l7rule_compare_typev2"), nullable=False) invert = sa.Column(sa.Boolean(), nullable=False) key = sa.Column(sa.String(255), nullable=True) value = sa.Column(sa.String(255), nullable=False) provisioning_status = sa.Column(sa.String(16), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) @property def root_loadbalancer(self): return self.policy.listener.loadbalancer @property def to_api_dict(self): def to_dict(sa_model, attributes): ret = {} for attr in attributes: value = getattr(sa_model, attr) if six.PY2 and isinstance(value, six.text_type): ret[attr.encode('utf8')] = value.encode('utf8') else: ret[attr] = value return ret ret_dict = to_dict(self, [ 'id', 'tenant_id', 'type', 'compare_type', 'invert', 'key', 'value', 'admin_state_up']) ret_dict['policies'] = [] if self.policy: ret_dict['policies'].append({'id': self.policy.id}) return ret_dict class L7Policy(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents L7 Policy.""" NAME = 'l7policy' __tablename__ = "lbaas_l7policies" name = sa.Column(sa.String(255), nullable=True) description = sa.Column(sa.String(255), nullable=True) listener_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_listeners.id"), nullable=False) action = sa.Column(sa.Enum(*lb_const.SUPPORTED_L7_POLICY_ACTIONS, name="l7policy_action_typesv2"), nullable=False) redirect_pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"), nullable=True) redirect_url = sa.Column(sa.String(255), nullable=True) position = sa.Column(sa.Integer, nullable=False) provisioning_status = sa.Column(sa.String(16), nullable=False) admin_state_up = sa.Column(sa.Boolean(), nullable=False) rules = orm.relationship( L7Rule, uselist=True, primaryjoin="L7Policy.id==L7Rule.l7policy_id", foreign_keys=[L7Rule.l7policy_id], cascade="all, delete-orphan", backref=orm.backref("policy") ) redirect_pool = orm.relationship( PoolV2, backref=orm.backref("l7_policies", uselist=True)) @property def root_loadbalancer(self): return self.listener.loadbalancer @property def to_api_dict(self): def to_dict(sa_model, attributes): ret = {} for attr in attributes: value = getattr(sa_model, attr) if six.PY2 and isinstance(value, six.text_type): ret[attr.encode('utf8')] = value.encode('utf8') else: ret[attr] = value return ret ret_dict = to_dict(self, [ 'id', 'tenant_id', 'name', 'description', 'listener_id', 'action', 'redirect_pool_id', 'redirect_url', 'position', 'admin_state_up']) ret_dict['listeners'] = [{'id': self.listener_id}] ret_dict['rules'] = [{'id': rule.id} for rule in self.rules] if (ret_dict.get('action') == lb_const.L7_POLICY_ACTION_REDIRECT_TO_POOL): del ret_dict['redirect_url'] return ret_dict class Listener(model_base.BASEV2, model_base.HasId, model_base.HasProject): """Represents a v2 neutron listener.""" NAME = 'listener' __tablename__ = "lbaas_listeners" __table_args__ = ( sa.schema.UniqueConstraint('loadbalancer_id', 'protocol_port', name='uniq_loadbalancer_listener_port'), ) name = sa.Column(sa.String(255)) description = sa.Column(sa.String(255)) default_pool_id = sa.Column(sa.String(36), sa.ForeignKey("lbaas_pools.id"), nullable=True) loadbalancer_id = sa.Column(sa.String(36), sa.ForeignKey( "lbaas_loadbalancers.id")) protocol = sa.Column(sa.Enum(*lb_const.LISTENER_SUPPORTED_PROTOCOLS, name="listener_protocolsv2"), nullable=False) default_tls_container_id = sa.Column(sa.String(128), default=None, nullable=True) sni_containers = orm.relationship( SNI, backref=orm.backref("listener", uselist=False), uselist=True, primaryjoin="Listener.id==SNI.listener_id", order_by='SNI.position', collection_class=orderinglist.ordering_list( 'position'), foreign_keys=[SNI.listener_id], cascade="all, delete-orphan" ) protocol_port = sa.Column(sa.Integer, nullable=False) connection_limit = sa.Column(sa.Integer) admin_state_up = sa.Column(sa.Boolean(), nullable=False) provisioning_status = sa.Column(sa.String(16), nullable=False) operating_status = sa.Column(sa.String(16), nullable=False) default_pool = orm.relationship( PoolV2, backref=orm.backref("listeners")) loadbalancer = orm.relationship( LoadBalancer, backref=orm.backref("listeners", uselist=True)) l7_policies = orm.relationship( L7Policy, uselist=True, primaryjoin="Listener.id==L7Policy.listener_id", order_by="L7Policy.position", collection_class=orderinglist.ordering_list('position', count_from=1), foreign_keys=[L7Policy.listener_id], cascade="all, delete-orphan", backref=orm.backref("listener")) @property def root_loadbalancer(self): return self.loadbalancer @property def to_api_dict(self): def to_dict(sa_model, attributes): ret = {} for attr in attributes: value = getattr(sa_model, attr) if six.PY2 and isinstance(value, six.text_type): ret[attr.encode('utf8')] = value.encode('utf8') else: ret[attr] = value return ret ret_dict = to_dict(self, [ 'id', 'tenant_id', 'name', 'description', 'default_pool_id', 'protocol', 'default_tls_container_id', 'protocol_port', 'connection_limit', 'admin_state_up']) # NOTE(blogan): Returning a list to future proof for M:N objects # that are not yet implemented. ret_dict['loadbalancers'] = [] if self.loadbalancer: ret_dict['loadbalancers'].append({'id': self.loadbalancer.id}) ret_dict['sni_container_refs'] = [container.tls_container_id for container in self.sni_containers] ret_dict['default_tls_container_ref'] = self.default_tls_container_id ret_dict['l7policies'] = [{'id': l7_policy.id} for l7_policy in self.l7_policies] return ret_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/lbaas/lb_translators.py0000644000175000017500000000761300000000000030030 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log as logging LOG = logging.getLogger(__name__) def lb_hm_obj_to_dict(hm): # Translate the LBaaS HM to a dictionary skipping the pool object to avoid # recursions hm_dict = hm.to_dict(pool=False) # Translate the pool separately without it's internal objects if hm.pool: hm_dict['pool'] = lb_pool_obj_to_dict(hm.pool, with_listeners=False) return hm_dict def lb_listener_obj_to_dict(listener): # Translate the LBaaS listener to a dictionary skipping the some objects # to avoid recursions listener_dict = listener.to_dict(loadbalancer=False, default_pool=False) # Translate the default pool separately without it's internal objects if listener.default_pool: listener_dict['default_pool'] = lb_pool_obj_to_dict( listener.default_pool, with_listeners=False) else: listener_dict['default_pool'] = None if listener.loadbalancer: listener_dict['loadbalancer'] = lb_loadbalancer_obj_to_dict( listener.loadbalancer) else: listener_dict['loadbalancer'] = None return listener_dict def lb_pool_obj_to_dict(pool, with_listeners=True): # Translate the LBaaS pool to a dictionary skipping the some objects # to avoid recursions pool_dict = pool.to_dict(listeners=False, listener=False) if with_listeners: # Translate the listener/s separately without it's internal objects if pool.listener: pool_dict['listener'] = lb_listener_obj_to_dict(pool.listener) else: pool_dict['listener'] = None pool_dict['listeners'] = [] if pool.listeners: for listener in pool.listeners: pool_dict['listeners'].append( lb_listener_obj_to_dict(listener)) return pool_dict def lb_loadbalancer_obj_to_dict(loadbalancer): return loadbalancer.to_dict() def lb_member_obj_to_dict(member): # Translate the LBaaS member to a dictionary skipping the some objects # to avoid recursions member_dict = member.to_dict(pool=False) # Add the pool dictionary (with its listeners and loadbalancer) if member.pool: member_dict['pool'] = lb_pool_obj_to_dict(member.pool) else: member_dict['pool'] = None return member_dict def lb_l7policy_obj_to_dict(l7policy): # Translate the LBaaS L7 policy to a dictionary skipping the some objects # to avoid recursions l7policy_dict = l7policy.to_dict(listener=False, rules=False) # Add the listener dictionary if l7policy.listener: l7policy_dict['listener'] = lb_listener_obj_to_dict(l7policy.listener) else: l7policy_dict['listener'] = None # Add the rules l7policy_dict['rules'] = [] if l7policy.rules: for rule in l7policy.rules: l7policy_dict['rules'].append( lb_l7rule_obj_to_dict(rule, with_policy=False)) return l7policy_dict def lb_l7rule_obj_to_dict(l7rule, with_policy=True): # Translate the LBaaS L7 rule to a dictionary skipping the some objects # to avoid recursions l7rule_dict = l7rule.to_dict(policy=False) # Add the policy dictionary if with_policy: l7rule_dict['policy'] = lb_l7policy_obj_to_dict(l7rule.policy) else: l7rule_dict['policy'] = None return l7rule_dict ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/lbaas/test_nsxp_driver.py0000644000175000017500000021004300000000000030372 0ustar00coreycorey00000000000000# Copyright (c) 2019 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.tests import base from neutron_lib import context from neutron_lib import exceptions as n_exc from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import healthmonitor_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import l7policy_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import l7rule_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils as p_utils from vmware_nsx.services.lbaas.nsx_p.implementation import listener_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import loadbalancer_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import member_mgr from vmware_nsx.services.lbaas.nsx_p.implementation import pool_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsx.services.lbaas.octavia import octavia_listener from vmware_nsx.tests.unit.services.lbaas import lb_data_models as lb_models from vmware_nsx.tests.unit.services.lbaas import lb_translators # TODO(asarfaty): Use octavia models for those tests LB_VIP = '10.0.0.10' LB_ROUTER_ID = 'router-x' ROUTER_ID = 'neutron-router-x' LB_ID = 'xxx-xxx' LB_TENANT_ID = 'yyy-yyy' LB_SERVICE_ID = LB_ID LB_NETWORK = {'router:external': False, 'id': 'xxxxx', 'name': 'network-1'} EXT_LB_NETWORK = {'router:external': True, 'id': 'public', 'name': 'network-2'} LISTENER_ID = 'listener-x' HTTP_LISTENER_ID = 'listener-http' HTTPS_LISTENER_ID = 'listener-https' APP_PROFILE_ID = 'appp-x' LB_VS_ID = LISTENER_ID LB_APP_PROFILE = { "resource_type": "LbHttpProfile", "description": "my http profile", "id": APP_PROFILE_ID, "display_name": "httpprofile1", "ntlm": False, "request_header_size": 1024, "http_redirect_to_https": False, "idle_timeout": 1800, "x_forwarded_for": "INSERT", } POOL_ID = 'ppp-qqq' LB_POOL_ID = POOL_ID LB_POOL = { "display_name": "httppool1", "description": "my http pool", "id": LB_POOL_ID, "algorithm": "ROUND_ROBIN", } MEMBER_ID = 'mmm-mmm' MEMBER_ADDRESS = '10.0.0.200' LB_MEMBER = {'display_name': 'member1_' + MEMBER_ID, 'weight': 1, 'ip_address': MEMBER_ADDRESS, 'port': 80, 'backup_member': False, 'admin_state_up': True} LB_POOL_WITH_MEMBER = { "display_name": "httppool1", "description": "my http pool", "id": LB_POOL_ID, "algorithm": "ROUND_ROBIN", "members": [ { "display_name": "http-member1", "ip_address": MEMBER_ADDRESS, "port": "80", "weight": "1", "admin_state": "ENABLED" } ] } HM_ID = 'hhh-mmm' LB_MONITOR_ID = HM_ID L7POLICY_ID = 'l7policy-xxx' LB_RULE_ID = 'lb-rule-xx' L7RULE_ID = 'l7rule-111' LB_PP_ID = POOL_ID FAKE_CERT = {'id': 'cert-xyz'} SERVICE_STATUSES = { "virtual_servers": [{ "virtual_server_id": LB_VS_ID, "status": "UP" }], "service_id": LB_SERVICE_ID, "service_status": "UP", "pools": [{ "members": [{ "port": "80", "ip_address": MEMBER_ADDRESS, "status": "DOWN" }], "pool_id": LB_POOL_ID, "status": "DOWN" }] } VS_STATUSES = { "results": [{ "virtual_server_id": LB_VS_ID, "status": "UP" }] } class BaseTestEdgeLbaasV2(base.BaseTestCase): def _tested_entity(self): return None def completor(self, success=True): self.last_completor_succees = success self.last_completor_called = True def setUp(self): super(BaseTestEdgeLbaasV2, self).setUp() self.last_completor_succees = False self.last_completor_called = False self.context = context.get_admin_context() octavia_objects = { 'loadbalancer': loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(), 'listener': listener_mgr.EdgeListenerManagerFromDict(), 'pool': pool_mgr.EdgePoolManagerFromDict(), 'member': member_mgr.EdgeMemberManagerFromDict(), 'healthmonitor': healthmonitor_mgr.EdgeHealthMonitorManagerFromDict(), 'l7policy': l7policy_mgr.EdgeL7PolicyManagerFromDict(), 'l7rule': l7rule_mgr.EdgeL7RuleManagerFromDict()} self.edge_driver = octavia_listener.NSXOctaviaListenerEndpoint( **octavia_objects) self.lbv2_driver = mock.Mock() self.core_plugin = mock.Mock() base_mgr.LoadbalancerBaseManager._lbv2_driver = self.lbv2_driver base_mgr.LoadbalancerBaseManager._core_plugin = self.core_plugin self._patch_lb_plugin(self.lbv2_driver, self._tested_entity) self._patch_policy_lb_clients(self.core_plugin) self.lb = lb_models.LoadBalancer(LB_ID, LB_TENANT_ID, 'lb1', '', 'some-subnet', 'port-id', LB_VIP) self.listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'Dummy', None, LB_ID, 'HTTP', protocol_port=80, loadbalancer=self.lb) self.https_listener = lb_models.Listener( HTTP_LISTENER_ID, LB_TENANT_ID, 'listener2', '', None, LB_ID, 'HTTPS', protocol_port=443, loadbalancer=self.lb) self.terminated_https_listener = lb_models.Listener( HTTPS_LISTENER_ID, LB_TENANT_ID, 'listener3', '', None, LB_ID, 'TERMINATED_HTTPS', protocol_port=443, loadbalancer=self.lb) self.pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listener=self.listener, listeners=[self.listener], loadbalancer=self.lb) self.sess_persistence = lb_models.SessionPersistence( POOL_ID, 'HTTP_COOKIE', 'meh_cookie') self.pool_persistency = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listener=self.listener, listeners=[self.listener], loadbalancer=self.lb, session_persistence=self.sess_persistence) self.member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, MEMBER_ADDRESS, 80, 1, pool=self.pool, name='member1', admin_state_up=True) self.hm = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'PING', 3, 3, 1, pool=self.pool, name='hm1') self.hm_http = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'HTTP', 3, 3, 1, pool=self.pool, http_method='GET', url_path="/meh", name='hm2') self.l7policy = lb_models.L7Policy(L7POLICY_ID, LB_TENANT_ID, name='policy-test', description='policy-desc', listener_id=LISTENER_ID, action='REDIRECT_TO_POOL', redirect_pool_id=POOL_ID, listener=self.listener, position=1) self.l7rule = lb_models.L7Rule(L7RULE_ID, LB_TENANT_ID, l7policy_id=L7POLICY_ID, compare_type='EQUAL_TO', invert=False, type='HEADER', key='key1', value='val1', policy=self.l7policy) # Translate LBaaS objects to dictionaries self.lb_dict = lb_translators.lb_loadbalancer_obj_to_dict( self.lb) self.listener_dict = lb_translators.lb_listener_obj_to_dict( self.listener) self.https_listener_dict = lb_translators.lb_listener_obj_to_dict( self.https_listener) self.terminated_https_listener_dict = lb_translators.\ lb_listener_obj_to_dict(self.terminated_https_listener) self.pool_dict = lb_translators.lb_pool_obj_to_dict( self.pool) self.pool_persistency_dict = lb_translators.lb_pool_obj_to_dict( self.pool_persistency) self.member_dict = lb_translators.lb_member_obj_to_dict( self.member) self.hm_dict = lb_translators.lb_hm_obj_to_dict( self.hm) self.hm_http_dict = lb_translators.lb_hm_obj_to_dict( self.hm_http) self.l7policy_dict = lb_translators.lb_l7policy_obj_to_dict( self.l7policy) self.l7rule_dict = lb_translators.lb_l7rule_obj_to_dict( self.l7rule) def tearDown(self): self._unpatch_lb_plugin(self.lbv2_driver, self._tested_entity) super(BaseTestEdgeLbaasV2, self).tearDown() def _patch_lb_plugin(self, lb_plugin, manager): self.real_manager = getattr(lb_plugin, manager) lb_manager = mock.patch.object(lb_plugin, manager).start() mock.patch.object(lb_manager, 'create').start() mock.patch.object(lb_manager, 'update').start() mock.patch.object(lb_manager, 'delete').start() mock.patch.object(lb_manager, 'successful_completion').start() def _patch_policy_lb_clients(self, core_plugin): nsxpolicy = mock.patch.object(core_plugin, 'nsxpolicy').start() load_balancer = mock.patch.object(nsxpolicy, 'load_balancer').start() self.service_client = mock.patch.object(load_balancer, 'lb_service').start() self.app_client = mock.patch.object(load_balancer, 'lb_http_profile').start() self.vs_client = mock.patch.object(load_balancer, 'virtual_server').start() self.pool_client = mock.patch.object(load_balancer, 'lb_pool').start() self.monitor_client = mock.patch.object( load_balancer, 'lb_monitor_profile_icmp').start() self.http_monitor_client = mock.patch.object( load_balancer, 'lb_monitor_profile_http').start() self.rule_client = mock.patch.object(load_balancer, 'rule').start() self.pp_client = mock.patch.object( load_balancer, 'lb_source_ip_persistence_profile').start() self.pp_cookie_client = mock.patch.object( load_balancer, 'lb_cookie_persistence_profile').start() self.pp_generic_client = mock.patch.object( load_balancer, 'lb_persistence_profile').start() self.tm_client = mock.patch.object(nsxpolicy, 'trust_management').start() self.nsxpolicy = nsxpolicy def _unpatch_lb_plugin(self, lb_plugin, manager): setattr(lb_plugin, manager, self.real_manager) class TestEdgeLbaasV2Loadbalancer(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Loadbalancer, self).setUp() @property def _tested_entity(self): return 'load_balancer' def test_create(self): neutron_router = {'id': ROUTER_ID, 'name': 'dummy', 'external_gateway_info': {'external_fixed_ips': []}} with mock.patch.object(lb_utils, 'get_network_from_subnet', return_value=LB_NETWORK), \ mock.patch.object(lb_utils, 'get_router_from_network', return_value=ROUTER_ID),\ mock.patch.object(self.core_plugin, 'get_router', return_value=neutron_router), \ mock.patch.object(self.core_plugin, '_find_router_gw_subnets', return_value=[]),\ mock.patch.object(self.core_plugin, 'service_router_has_loadbalancers', return_value=False) as plugin_has_lb,\ mock.patch.object(self.service_client, 'get_router_lb_service', return_value=None),\ mock.patch.object(self.service_client, 'create_or_overwrite' ) as create_service: self.edge_driver.loadbalancer.create( self.context, self.lb_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) # Service should be created with connectivity path create_service.assert_called_once_with( mock.ANY, lb_service_id=LB_ID, description=self.lb_dict['description'], tags=mock.ANY, size='SMALL', connectivity_path=mock.ANY) plugin_has_lb.assert_called_once_with(mock.ANY, ROUTER_ID) def test_create_same_router_fail(self): neutron_router = {'id': ROUTER_ID, 'name': 'dummy', 'external_gateway_info': {'external_fixed_ips': []}} with mock.patch.object(lb_utils, 'get_network_from_subnet', return_value=LB_NETWORK), \ mock.patch.object(lb_utils, 'get_router_from_network', return_value=ROUTER_ID),\ mock.patch.object(self.core_plugin, 'get_router', return_value=neutron_router), \ mock.patch.object(self.core_plugin, '_find_router_gw_subnets', return_value=[]),\ mock.patch.object(self.core_plugin, 'service_router_has_loadbalancers', return_value=True) as plugin_has_lb,\ mock.patch.object(self.service_client, 'get_router_lb_service', return_value=None): self.assertRaises( n_exc.BadRequest, self.edge_driver.loadbalancer.create, self.context, self.lb_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertFalse(self.last_completor_succees) plugin_has_lb.assert_called_once_with(mock.ANY, ROUTER_ID) def test_create_external_vip(self): with mock.patch.object(lb_utils, 'get_router_from_network', return_value=None),\ mock.patch.object(lb_utils, 'get_network_from_subnet', return_value=EXT_LB_NETWORK), \ mock.patch.object(self.service_client, 'get_router_lb_service', return_value=None),\ mock.patch.object(self.service_client, 'create_or_overwrite', return_value={'id': LB_SERVICE_ID} ) as create_service: self.edge_driver.loadbalancer.create(self.context, self.lb_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) # Service should be created with no connectivity path create_service.assert_called_once_with( mock.ANY, lb_service_id=LB_ID, description=self.lb_dict['description'], tags=mock.ANY, size='SMALL', connectivity_path=None) def test_update(self): new_lb = lb_models.LoadBalancer(LB_ID, 'yyy-yyy', 'lb1-new', 'new-description', 'some-subnet', 'port-id', LB_VIP) new_lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(new_lb) self.edge_driver.loadbalancer.update(self.context, self.lb_dict, new_lb_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(lb_utils, 'get_router_from_network', return_value=ROUTER_ID),\ mock.patch.object(self.service_client, 'get' ) as mock_get_lb_service, \ mock.patch.object(self.service_client, 'delete' ) as mock_delete_lb_service: mock_get_lb_service.return_value = {'id': LB_SERVICE_ID} self.edge_driver.loadbalancer.delete(self.context, self.lb_dict, self.completor) mock_delete_lb_service.assert_called_with(LB_SERVICE_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_stats(self): pass def test_refresh(self): pass def test_status_update(self): with mock.patch.object(self.service_client, 'get_status' ) as mock_get_lb_service_status, \ mock.patch.object(self.service_client, 'get_virtual_servers_status' ) as mock_get_vs_status, \ mock.patch.object(self.pool_client, 'get' ) as mock_get_pool: mock_get_lb_service_status.return_value = SERVICE_STATUSES mock_get_vs_status.return_value = VS_STATUSES mock_get_pool.return_value = LB_POOL_WITH_MEMBER statuses = self.edge_driver.loadbalancer.get_operating_status( self.context, self.lb.id, with_members=True) self.assertEqual(1, len(statuses['loadbalancers'])) self.assertEqual('ONLINE', statuses['loadbalancers'][0]['status']) # The rest of the statuses are not yet supported self.assertEqual(0, len(statuses['pools'])) self.assertEqual(0, len(statuses['listeners'])) self.assertEqual(0, len(statuses['members'])) class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Listener, self).setUp() @property def _tested_entity(self): return 'listener' def _create_listener(self, protocol='HTTP'): with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.core_plugin, 'get_waf_profile_path_and_mode', return_value=(None, None)), \ mock.patch.object(self.vs_client, 'create_or_overwrite' ) as mock_add_virtual_server: mock_get_floatingips.return_value = [] listener = self.listener_dict listener_id = LISTENER_ID if protocol == 'HTTPS': listener = self.https_listener_dict listener_id = HTTP_LISTENER_ID self.edge_driver.listener.create(self.context, listener, self.completor) mock_add_virtual_server.assert_called_with( application_profile_id=listener_id, description=listener['description'], lb_service_id=LB_ID, ip_address=LB_VIP, tags=mock.ANY, name=mock.ANY, ports=[listener['protocol_port']], max_concurrent_connections=None, virtual_server_id=listener_id, pool_id='', lb_persistence_profile_id='') self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_http_listener(self): self._create_listener() def test_create_https_listener(self): self._create_listener(protocol='HTTPS') def test_create_terminated_https(self): with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.core_plugin, 'get_waf_profile_path_and_mode', return_value=(None, None)), \ mock.patch.object(self.tm_client, 'create_cert' ) as mock_create_cert, \ mock.patch.object(self.vs_client, 'create_or_overwrite' ) as mock_add_virtual_server: mock_get_floatingips.return_value = [] mock_create_cert.return_value = FAKE_CERT['id'] self.edge_driver.listener.create( self.context, self.terminated_https_listener_dict, self.completor) mock_add_virtual_server.assert_called_with( application_profile_id=HTTPS_LISTENER_ID, description=self.terminated_https_listener_dict['description'], lb_service_id=LB_ID, ip_address=LB_VIP, tags=mock.ANY, name=mock.ANY, ports=[self.terminated_https_listener_dict['protocol_port']], max_concurrent_connections=None, virtual_server_id=HTTPS_LISTENER_ID, pool_id='', lb_persistence_profile_id='') self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_listener_with_default_pool(self): listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'Dummy', self.pool.id, LB_ID, 'HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool) listener_dict = lb_translators.lb_listener_obj_to_dict(listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.core_plugin, 'get_waf_profile_path_and_mode', return_value=(None, None)), \ mock.patch.object(self.vs_client, 'create_or_overwrite' ) as mock_add_virtual_server: mock_get_floatingips.return_value = [] self.edge_driver.listener.create(self.context, listener_dict, self.completor) mock_add_virtual_server.assert_called_with( application_profile_id=LISTENER_ID, description=listener_dict['description'], lb_service_id=LB_ID, ip_address=LB_VIP, tags=mock.ANY, name=mock.ANY, ports=[listener_dict['protocol_port']], max_concurrent_connections=None, virtual_server_id=LISTENER_ID, pool_id=POOL_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_listener_with_used_default_pool(self): listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'Dummy', self.pool.id, LB_ID, 'HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool) listener_dict = lb_translators.lb_listener_obj_to_dict(listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips,\ mock.patch.object(self.core_plugin, 'get_waf_profile_path_and_mode', return_value=(None, None)),\ mock.patch.object(self.vs_client, 'list', return_value=[{'pool_path': POOL_ID}]): mock_get_floatingips.return_value = [] self.assertRaises(n_exc.BadRequest, self.edge_driver.listener.create, self.context, listener_dict, self.completor) def test_create_listener_with_session_persistence(self): listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'Dummy', self.pool_persistency.id, LB_ID, 'HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool_persistency) listener_dict = lb_translators.lb_listener_obj_to_dict(listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.core_plugin, 'get_waf_profile_path_and_mode', return_value=(None, None)), \ mock.patch.object(self.vs_client, 'create_or_overwrite' ) as mock_add_virtual_server,\ mock.patch.object(self.vs_client, 'get', return_value={}),\ mock.patch.object(self.edge_driver.listener, '_get_pool_tags'),\ mock.patch.object(self.pp_cookie_client, 'create_or_overwrite' ) as mock_create_pp: mock_get_floatingips.return_value = [] self.edge_driver.listener.create(self.context, listener_dict, self.completor) mock_add_virtual_server.assert_called_with( application_profile_id=LISTENER_ID, description=listener_dict['description'], lb_service_id=LB_ID, ip_address=LB_VIP, tags=mock.ANY, name=mock.ANY, ports=[listener_dict['protocol_port']], max_concurrent_connections=None, virtual_server_id=LISTENER_ID, pool_id=listener_dict['default_pool_id']) mock_create_pp.assert_called_once() self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_listener_with_session_persistence_fail(self): listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'Dummy', self.pool_persistency.id, LB_ID, 'TCP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool_persistency) listener_dict = lb_translators.lb_listener_obj_to_dict(listener) with mock.patch.object(self.core_plugin, 'get_waf_profile_path_and_mode', return_value=(None, None)), \ mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips: mock_get_floatingips.return_value = [] self.assertRaises(n_exc.BadRequest, self.edge_driver.listener.create, self.context, listener_dict, self.completor) def test_update(self): new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1-new', 'new-description', None, LB_ID, protocol_port=80, loadbalancer=self.lb) new_listener_dict = lb_translators.lb_listener_obj_to_dict( new_listener) with mock.patch.object(self.core_plugin, 'get_waf_profile_path_and_mode', return_value=(None, None)), \ mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips: mock_get_floatingips.return_value = [] self.edge_driver.listener.update(self.context, self.listener_dict, new_listener_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update_with_default_pool(self): self.assertFalse(self.last_completor_called) new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1-new', 'new-description', self.pool, LB_ID, protocol_port=80, loadbalancer=self.lb, default_pool=self.pool) new_listener_dict = lb_translators.lb_listener_obj_to_dict( new_listener) with mock.patch.object(self.core_plugin, 'get_waf_profile_path_and_mode', return_value=(None, None)), \ mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips: mock_get_floatingips.return_value = [] self.edge_driver.listener.update(self.context, self.listener_dict, new_listener_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update_with_session_persistence(self): new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1-new', 'new-description', self.pool_persistency.id, LB_ID, protocol='HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool_persistency) new_listener_dict = lb_translators.lb_listener_obj_to_dict( new_listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.core_plugin, 'get_waf_profile_path_and_mode', return_value=(None, None)), \ mock.patch.object(self.edge_driver.listener, '_get_pool_tags'),\ mock.patch.object(self.vs_client, 'get', return_value={}),\ mock.patch.object(self.vs_client, 'update', return_value={'id': LB_VS_ID}), \ mock.patch.object(self.pp_cookie_client, 'create_or_overwrite' ) as mock_create_pp: mock_get_floatingips.return_value = [] self.edge_driver.listener.update(self.context, self.listener_dict, new_listener_dict, self.completor) mock_create_pp.assert_called_once() self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update_with_session_persistence_change(self): old_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'description', self.pool_persistency.id, LB_ID, protocol='HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool_persistency) old_listener_dict = lb_translators.lb_listener_obj_to_dict( old_listener) sess_persistence = lb_models.SessionPersistence( POOL_ID, 'SOURCE_IP') pool_persistency = lb_models.Pool('new_pool_id', LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listener=self.listener, listeners=[self.listener], loadbalancer=self.lb, session_persistence=sess_persistence) new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1-new', 'new-description', pool_persistency.id, LB_ID, protocol='HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=pool_persistency) new_listener_dict = lb_translators.lb_listener_obj_to_dict( new_listener) with mock.patch.object(self.core_plugin, 'get_waf_profile_path_and_mode', return_value=(None, None)), \ mock.patch.object(self.pp_client, 'create_or_overwrite' ) as mock_create_pp, \ mock.patch.object(self.pp_generic_client, 'delete' ) as mock_delete_pp, \ mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.edge_driver.listener, '_get_pool_tags' ) as mock_get_pool_tags: mock_get_pool_tags.return_value = [] mock_get_floatingips.return_value = [] self.edge_driver.listener.update( self.context, old_listener_dict, new_listener_dict, self.completor) mock_create_pp.assert_called_once_with( name='persistence_pool1_new_p...ol_id', persistence_profile_id='new_pool_id_sourceip', tags=mock.ANY) # No reason to check parameters here, it's # all mocked out mock_delete_pp.assert_called_once() def test_delete(self): with mock.patch.object(self.service_client, 'get' ) as mock_get_lb_service, \ mock.patch.object(self.app_client, 'delete' ) as mock_delete_app_profile, \ mock.patch.object(self.vs_client, 'delete' ) as mock_delete_virtual_server: mock_get_lb_service.return_value = { 'id': LB_SERVICE_ID, 'virtual_server_ids': [LB_VS_ID]} self.edge_driver.listener.delete(self.context, self.listener_dict, self.completor) mock_delete_virtual_server.assert_called_with(LB_VS_ID) mock_delete_app_profile.assert_called_with(LISTENER_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Pool, self).setUp() @property def _tested_entity(self): return 'pool' def test_create(self): with mock.patch.object(self.pp_client, 'create_or_overwrite' ) as mock_create_pp, \ mock.patch.object(self.vs_client, 'update', return_value=None ) as mock_vs_update: self.edge_driver.pool.create(self.context, self.pool_dict, self.completor) mock_create_pp.assert_not_called() mock_vs_update.assert_called_once_with( LB_VS_ID, pool_id=LB_POOL_ID, lb_persistence_profile_id=None) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def _test_create_with_persistency(self, vs_data, verify_func): with mock.patch.object(self.edge_driver.pool, '_get_pool_tags'),\ mock.patch.object(self.pp_cookie_client, 'create_or_overwrite' ) as mock_create_pp, \ mock.patch.object(self.pp_cookie_client, 'update', return_value=None) as mock_update_pp, \ mock.patch.object(self.vs_client, 'get' ) as mock_vs_get, \ mock.patch.object(self.vs_client, 'update', return_value=None ) as mock_vs_update: mock_vs_get.return_value = vs_data self.edge_driver.pool.create( self.context, self.pool_persistency_dict, self.completor) verify_func(mock_create_pp, mock_update_pp, mock_vs_update) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_with_persistency(self): def verify_func(mock_create_pp, mock_update_pp, mock_vs_update): mock_create_pp.assert_called_once_with( cookie_mode='INSERT', cookie_name='meh_cookie', name=mock.ANY, tags=mock.ANY, persistence_profile_id="%s_cookie" % LB_PP_ID) mock_update_pp.assert_not_called() mock_vs_update.assert_called_once_with( LB_VS_ID, pool_id=LB_POOL_ID, lb_persistence_profile_id="%s_cookie" % LB_PP_ID) vs_data = {'id': LB_VS_ID} self._test_create_with_persistency(vs_data, verify_func) def test_create_with_persistency_existing_profile(self): def verify_func(mock_create_pp, mock_update_pp, mock_vs_update): mock_create_pp.assert_not_called() mock_update_pp.assert_called_once_with( LB_PP_ID, cookie_mode='INSERT', cookie_name='meh_cookie', name=mock.ANY, tags=mock.ANY) mock_vs_update.assert_called_once_with( LB_VS_ID, pool_id=LB_POOL_ID, lb_persistence_profile_id=LB_PP_ID) vs_data = {'id': LB_VS_ID, 'lb_persistence_profile_path': LB_PP_ID} self._test_create_with_persistency(vs_data, verify_func) def test_create_with_persistency_no_listener(self): def verify_func(mock_create_pp, mock_update_pp, mock_vs_update): mock_create_pp.assert_not_called() mock_update_pp.assert_not_called() mock_vs_update.assert_not_called() vs_data = {'id': LB_VS_ID, 'lb_persistence_profile_path': LB_PP_ID} self.pool_persistency_dict['listener'] = None self.pool_persistency_dict['listeners'] = [] self._test_create_with_persistency(vs_data, verify_func) def test_create_multiple_listeners(self): """Verify creation will fail if multiple listeners are set""" pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listeners=[self.listener, self.https_listener], loadbalancer=self.lb) pool_dict = lb_translators.lb_pool_obj_to_dict(pool) self.assertRaises(n_exc.BadRequest, self.edge_driver.pool.create, self.context, pool_dict, self.completor) def test_update(self): new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '', None, 'HTTP', 'LEAST_CONNECTIONS', listener=self.listener) new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool) self.edge_driver.pool.update(self.context, self.pool_dict, new_pool_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update_multiple_listeners(self): """Verify update action will fail if multiple listeners are set""" new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listeners=[self.listener, self.https_listener], loadbalancer=self.lb) new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool) self.assertRaises(n_exc.BadRequest, self.edge_driver.pool.update, self.context, self.pool_dict, new_pool_dict, self.completor) def _test_update_with_persistency(self, vs_data, old_pool, new_pool, verify_func, cookie=False): old_pool_dict = lb_translators.lb_pool_obj_to_dict(old_pool) new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool) with mock.patch.object(self.edge_driver.pool, '_get_pool_tags'),\ mock.patch.object(self.pp_client, 'create_or_overwrite' ) as mock_create_pp, \ mock.patch.object(self.pp_cookie_client, 'create_or_overwrite' ) as mock_create_cookie_pp, \ mock.patch.object(self.pp_client, 'update', return_value=None ) as mock_update_pp, \ mock.patch.object(self.pp_cookie_client, 'update', return_value=None) as mock_update_cookie_pp, \ mock.patch.object(self.pp_generic_client, 'delete', return_value=None) as mock_delete_pp, \ mock.patch.object(self.vs_client, 'get' ) as mock_vs_get, \ mock.patch.object(self.vs_client, 'update', return_value=None ) as mock_vs_update: mock_vs_get.return_value = vs_data self.edge_driver.pool.update(self.context, old_pool_dict, new_pool_dict, self.completor) verify_func( mock_create_cookie_pp if cookie else mock_create_pp, mock_update_cookie_pp if cookie else mock_update_pp, mock_delete_pp, mock_vs_update) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update_with_persistency(self): def verify_func(mock_create_pp, mock_update_pp, mock_delete_pp, mock_vs_update): mock_create_pp.assert_called_once_with( cookie_mode='INSERT', cookie_name='meh_cookie', name=mock.ANY, tags=mock.ANY, persistence_profile_id="%s_cookie" % LB_PP_ID) mock_update_pp.assert_not_called() mock_delete_pp.assert_not_called() mock_vs_update.assert_called_once_with( LB_VS_ID, pool_id=LB_POOL_ID, lb_persistence_profile_id="%s_cookie" % LB_PP_ID) vs_data = {'id': LB_VS_ID} self._test_update_with_persistency(vs_data, self.pool, self.pool_persistency, verify_func, cookie=True) def test_update_switch_persistency_type(self): def verify_func(mock_create_pp, mock_update_pp, mock_delete_pp, mock_vs_update): mock_create_pp.assert_called_once_with( name=mock.ANY, tags=mock.ANY, persistence_profile_id="%s_sourceip" % LB_PP_ID) mock_update_pp.assert_not_called() mock_delete_pp.assert_called_once() mock_vs_update.assert_called_once_with( LB_VS_ID, pool_id=LB_POOL_ID, lb_persistence_profile_id="%s_sourceip" % LB_PP_ID) ip_sess_persistence = lb_models.SessionPersistence( POOL_ID, 'SOURCE_IP') pool_ip_persistency = lb_models.Pool( POOL_ID, LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listener=self.listener, listeners=[self.listener], loadbalancer=self.lb, session_persistence=ip_sess_persistence) vs_data = {'id': LB_VS_ID, 'lb_persistence_profile_path': 'meh'} self._test_update_with_persistency(vs_data, self.pool_persistency, pool_ip_persistency, verify_func,) def test_update_remove_persistency(self): def verify_func(mock_create_pp, mock_update_pp, mock_delete_pp, mock_vs_update): mock_create_pp.assert_not_called() mock_update_pp.assert_not_called() mock_delete_pp.assert_called_with(LB_PP_ID) mock_vs_update.assert_called_once_with( LB_VS_ID, pool_id=LB_POOL_ID, lb_persistence_profile_id=None) vs_data = {'id': LB_VS_ID, 'lb_persistence_profile_path': LB_PP_ID} self._test_update_with_persistency(vs_data, self.pool_persistency, self.pool, verify_func) def test_delete(self): with mock.patch.object(self.vs_client, 'update', return_value=None ) as mock_update_virtual_server, \ mock.patch.object(self.pool_client, 'delete' ) as mock_delete_pool: self.edge_driver.pool.delete(self.context, self.pool_dict, self.completor) mock_update_virtual_server.assert_called_with( LB_VS_ID, lb_persistence_profile_id=None, pool_id=None) mock_delete_pool.assert_called_with(LB_POOL_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete_with_persistency(self): with mock.patch.object(self.vs_client, 'get' ) as mock_vs_get, \ mock.patch.object(self.vs_client, 'update', return_value=None ) as mock_update_virtual_server, \ mock.patch.object(self.pool_client, 'delete' ) as mock_delete_pool, \ mock.patch.object(self.pp_generic_client, 'delete', return_value=None) as mock_delete_pp: mock_vs_get.return_value = { 'id': LB_VS_ID, 'lb_persistence_profile_path': LB_PP_ID} self.edge_driver.pool.delete( self.context, self.pool_persistency_dict, self.completor) mock_delete_pp.assert_called_once_with(LB_PP_ID) mock_update_virtual_server.assert_called_once_with( LB_VS_ID, lb_persistence_profile_id=None, pool_id=None) mock_delete_pool.assert_called_with(LB_POOL_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def _verify_create(self, cookie_name, cookie_mode, mock_create_pp, mock_update_pp): if cookie_name: mock_create_pp.assert_called_once_with( persistence_profile_id="%s_cookie" % LB_PP_ID, cookie_name=cookie_name, cookie_mode=cookie_mode, name=mock.ANY, tags=mock.ANY) else: mock_create_pp.assert_called_once_with( persistence_profile_id="%s_sourceip" % LB_PP_ID, name=mock.ANY, tags=mock.ANY) # Compare tags - kw args are the last item of a mock call tuple self.assertItemsEqual(mock_create_pp.mock_calls[0][-1]['tags'], [{'scope': 'os-lbaas-lb-id', 'tag': 'xxx-xxx'}, {'scope': 'os-lbaas-lb-name', 'tag': 'lb1'}, {'scope': 'os-lbaas-listener-id', 'tag': 'listener-x'}]) mock_update_pp.assert_not_called() def _verify_update(self, cookie_name, cookie_mode, mock_create_pp, mock_update_pp): if cookie_name: mock_update_pp.assert_called_once_with( "%s_cookie" % LB_PP_ID, cookie_name=cookie_name, cookie_mode=cookie_mode, name=mock.ANY, tags=mock.ANY) else: mock_update_pp.assert_called_once_with( "%s_sourceip" % LB_PP_ID, name=mock.ANY, tags=mock.ANY) # Compare tags - kw args are the last item of a mock call tuple self.assertItemsEqual(mock_update_pp.mock_calls[0][-1]['tags'], [{'scope': 'os-lbaas-lb-id', 'tag': 'xxx-xxx'}, {'scope': 'os-lbaas-lb-name', 'tag': 'lb1'}, {'scope': 'os-lbaas-listener-id', 'tag': 'listener-x'}]) mock_create_pp.assert_not_called() def _verify_delete(self, cookie_name, cookie_mode, mock_create_pp, mock_update_pp): mock_create_pp.assert_not_called() mock_update_pp.assert_not_called() def _test_setup_session_persistence(self, session_persistence, vs_data, verify_func, cookie_name=None, cookie_mode=None, switch_type=False): with mock.patch.object(self.pp_client, 'create_or_overwrite' ) as mock_create_pp, \ mock.patch.object(self.pp_cookie_client, 'create_or_overwrite' ) as mock_create_cookie_pp, \ mock.patch.object(self.pp_client, 'update', return_value=None, ) as mock_update_pp,\ mock.patch.object(self.pp_cookie_client, 'update', return_value=None) as mock_update_cookie_pp: self.pool.session_persistence = session_persistence pool_dict = lb_translators.lb_pool_obj_to_dict(self.pool) pp_id, post_func = p_utils.setup_session_persistence( self.nsxpolicy, pool_dict, [], switch_type, self.listener_dict, vs_data) pp_id_suffix = "" if session_persistence: if session_persistence.type == "SOURCE_IP": pp_id_suffix = "sourceip" elif session_persistence.type in ["HTTP_COOKIE", "APP_COOKIE"]: pp_id_suffix = "cookie" self.assertEqual("%s_%s" % (LB_PP_ID, pp_id_suffix), pp_id) else: self.assertIsNone(pp_id) self.assertEqual( (self.nsxpolicy, vs_data['lb_persistence_profile_path'],), post_func.args) verify_func(cookie_name, cookie_mode, mock_create_cookie_pp if cookie_name else mock_create_pp, mock_update_cookie_pp if cookie_name else mock_update_pp) def test_setup_session_persistence_sourceip_new_profile(self): sess_persistence = lb_models.SessionPersistence( "%s_sourceip" % LB_PP_ID, 'SOURCE_IP') self._test_setup_session_persistence( sess_persistence, {'id': LB_VS_ID}, self._verify_create) def test_setup_session_persistence_httpcookie_new_profile(self): sess_persistence = lb_models.SessionPersistence( "%s_cookie" % LB_PP_ID, 'HTTP_COOKIE') self._test_setup_session_persistence( sess_persistence, {'id': LB_VS_ID}, self._verify_create, 'default_cookie_name', 'INSERT') def test_setup_session_persistence_appcookie_new_profile(self): sess_persistence = lb_models.SessionPersistence( "%s_cookie" % LB_PP_ID, 'APP_COOKIE', 'whatever') self._test_setup_session_persistence( sess_persistence, {'id': LB_VS_ID}, self._verify_create, 'whatever', 'REWRITE') def test_setup_session_persistence_none_from_existing(self): sess_persistence = None self._test_setup_session_persistence( sess_persistence, {'id': LB_VS_ID, 'lb_persistence_profile_path': "%s_sourceip" % LB_PP_ID}, self._verify_delete) def test_setup_session_persistence_sourceip_from_existing(self): sess_persistence = lb_models.SessionPersistence( "%s_sourceip" % LB_PP_ID, 'SOURCE_IP') self._test_setup_session_persistence( sess_persistence, {'id': LB_VS_ID, 'lb_persistence_profile_path': "%s_sourceip" % LB_PP_ID}, self._verify_update) def test_setup_session_persistence_httpcookie_from_existing(self): sess_persistence = lb_models.SessionPersistence( "%s_cookie" % LB_PP_ID, 'HTTP_COOKIE') self._test_setup_session_persistence( sess_persistence, {'id': LB_VS_ID, 'lb_persistence_profile_path': '%s_cookie' % LB_PP_ID}, self._verify_update, 'default_cookie_name', 'INSERT') def test_setup_session_persistence_appcookie_from_existing(self): sess_persistence = lb_models.SessionPersistence( "%s_cookie" % LB_PP_ID, 'APP_COOKIE', 'whatever') self._test_setup_session_persistence( sess_persistence, {'id': LB_VS_ID, 'lb_persistence_profile_path': '%s_cookie' % LB_PP_ID}, self._verify_update, 'whatever', 'REWRITE') class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Member, self).setUp() @property def _tested_entity(self): return 'member' def test_create(self): with mock.patch.object(self.lbv2_driver.plugin, 'get_pool_members' ) as mock_get_pool_members, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network, \ mock.patch.object(lb_utils, 'get_router_from_network' ) as mock_get_router, \ mock.patch.object(self.service_client, 'get_router_lb_service' ) as mock_get_lb_service, \ mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(self.pool_client, 'create_pool_member_and_add_to_pool' ) as mock_update_pool_with_members: mock_get_pool_members.return_value = [self.member] mock_get_network.return_value = LB_NETWORK mock_get_router.return_value = LB_ROUTER_ID mock_get_lb_service.return_value = {'id': LB_SERVICE_ID} mock_get_pool.return_value = LB_POOL self.edge_driver.member.create( self.context, self.member_dict, self.completor) mock_update_pool_with_members.assert_called_with( LB_POOL_ID, MEMBER_ADDRESS, port=self.member_dict['protocol_port'], display_name=mock.ANY, weight=self.member_dict['weight'], backup_member=self.member_dict.get('backup', False), admin_state='ENABLED') self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_external_vip(self): with mock.patch.object(self.lbv2_driver.plugin, 'get_pool_members' ) as mock_get_pool_members, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network, \ mock.patch.object(lb_utils, 'get_router_from_network' ) as mock_get_router, \ mock.patch.object(self.service_client, 'get_router_lb_service' ) as mock_get_lb_service, \ mock.patch.object(self.service_client, 'get', return_value={}), \ mock.patch.object(self.core_plugin, 'service_router_has_loadbalancers', return_value=False) as plugin_has_lb,\ mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(self.core_plugin, '_find_router_gw_subnets', return_value=[]),\ mock.patch.object(self.core_plugin, 'get_floatingips', return_value=[{ 'fixed_ip_address': MEMBER_ADDRESS}]),\ mock.patch.object(self.pool_client, 'create_pool_member_and_add_to_pool' ) as mock_update_pool_with_members: mock_get_pool_members.return_value = [self.member] mock_get_network.return_value = EXT_LB_NETWORK mock_get_router.return_value = LB_ROUTER_ID mock_get_lb_service.return_value = {'id': LB_SERVICE_ID} mock_get_pool.return_value = LB_POOL self.edge_driver.member.create( self.context, self.member_dict, self.completor) mock_update_pool_with_members.assert_called_with( LB_POOL_ID, MEMBER_ADDRESS, port=self.member_dict['protocol_port'], display_name=mock.ANY, weight=self.member_dict['weight'], backup_member=self.member_dict.get('backup', False), admin_state='ENABLED') self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) plugin_has_lb.assert_called_once_with(mock.ANY, LB_ROUTER_ID) def test_update(self): new_member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, MEMBER_ADDRESS, 80, 2, pool=self.pool, name='member-nnn-nnn') new_member_dict = lb_translators.lb_member_obj_to_dict(new_member) with mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network_from_subnet: mock_get_pool.return_value = LB_POOL_WITH_MEMBER mock_get_network_from_subnet.return_value = LB_NETWORK self.edge_driver.member.update(self.context, self.member_dict, new_member_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network_from_subnet, \ mock.patch.object(self.pool_client, 'remove_pool_member' ) as mock_update_pool_with_members: mock_get_pool.return_value = LB_POOL_WITH_MEMBER mock_get_network_from_subnet.return_value = LB_NETWORK self.edge_driver.member.delete(self.context, self.member_dict, self.completor) mock_update_pool_with_members.assert_called_with( LB_POOL_ID, MEMBER_ADDRESS, port=self.member_dict['protocol_port']) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2HealthMonitor, self).setUp() @property def _tested_entity(self): return 'health_monitor' def test_create(self): with mock.patch.object(self.monitor_client, 'create_or_overwrite' ) as mock_create_monitor, \ mock.patch.object(self.pool_client, 'add_monitor_to_pool' ) as mock_add_monitor_to_pool: self.edge_driver.healthmonitor.create( self.context, self.hm_dict, self.completor) mock_create_monitor.assert_called_once() mock_add_monitor_to_pool.assert_called_with( LB_POOL_ID, mock.ANY) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_http(self): with mock.patch.object(self.http_monitor_client, 'create_or_overwrite' ) as mock_create_monitor, \ mock.patch.object(self.pool_client, 'add_monitor_to_pool' ) as mock_add_monitor_to_pool: # Verify HTTP-specific monitor parameters are added self.edge_driver.healthmonitor.create( self.context, self.hm_http_dict, self.completor) kw_args = mock_create_monitor.mock_calls[0][2] self.assertEqual(self.hm_http.http_method, kw_args.get('request_method')) self.assertEqual(self.hm_http.url_path, kw_args.get('request_url')) mock_add_monitor_to_pool.assert_called_with( LB_POOL_ID, mock.ANY) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): with mock.patch.object(self.monitor_client, 'update' ) as mock_update_monitor: new_hm = lb_models.HealthMonitor( HM_ID, LB_TENANT_ID, 'PING', 5, 5, 5, pool=self.pool, name='new_name') new_hm_dict = lb_translators.lb_hm_obj_to_dict(new_hm) self.edge_driver.healthmonitor.update( self.context, self.hm_dict, new_hm_dict, self.completor) mock_update_monitor.assert_called_with( LB_MONITOR_ID, name=mock.ANY, fall_count=5, interval=5, timeout=5) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(self.pool_client, 'remove_monitor_from_pool' ) as mock_remove_monitor_from_pool, \ mock.patch.object(self.monitor_client, 'delete' ) as mock_delete_monitor: self.edge_driver.healthmonitor.delete( self.context, self.hm_dict, self.completor) mock_remove_monitor_from_pool.assert_called_with( LB_POOL_ID, mock.ANY) mock_delete_monitor.assert_called_with(LB_MONITOR_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2L7Policy, self).setUp() @property def _tested_entity(self): return 'l7policy' def test_create(self): with mock.patch.object(self.vs_client, 'get' ) as mock_get_virtual_server, \ mock.patch.object(self.vs_client, 'add_lb_rule' ) as mock_update_virtual_server: mock_get_virtual_server.return_value = {'id': LB_VS_ID} self.edge_driver.l7policy.create( self.context, self.l7policy_dict, self.completor) mock_update_virtual_server.assert_called_once() self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): new_l7policy = lb_models.L7Policy(L7POLICY_ID, LB_TENANT_ID, name='new-policy', listener_id=LISTENER_ID, action='REJECT', listener=self.listener, position=2) new_policy_dict = lb_translators.lb_l7policy_obj_to_dict(new_l7policy) vs_with_rules = { 'id': LB_VS_ID, 'rule_ids': [LB_RULE_ID, 'abc', 'xyz'] } with mock.patch.object(self.vs_client, 'get' ) as mock_get_virtual_server, \ mock.patch.object(self.vs_client, 'update_lb_rule' ) as mock_update_virtual_server: mock_get_virtual_server.return_value = vs_with_rules self.edge_driver.l7policy.update(self.context, self.l7policy_dict, new_policy_dict, self.completor) mock_update_virtual_server.assert_called_once() self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(self.vs_client, 'remove_lb_rule' ) as mock_vs_remove_rule: self.edge_driver.l7policy.delete( self.context, self.l7policy_dict, self.completor) mock_vs_remove_rule.assert_called_with(LB_VS_ID, mock.ANY) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2L7Rule, self).setUp() @property def _tested_entity(self): return 'l7rule' def test_create(self): self.l7policy.rules = [self.l7rule] with mock.patch.object(self.vs_client, 'update_lb_rule' ) as mock_update_virtual_server: self.edge_driver.l7rule.create( self.context, self.l7rule_dict, self.completor) mock_update_virtual_server.assert_called_once() self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): new_l7rule = lb_models.L7Rule(L7RULE_ID, LB_TENANT_ID, l7policy_id=L7POLICY_ID, compare_type='STARTS_WITH', invert=True, type='COOKIE', key='cookie1', value='xxxxx', policy=self.l7policy) new_rule_dict = lb_translators.lb_l7rule_obj_to_dict(new_l7rule) self.l7policy.rules = [new_l7rule] with mock.patch.object(self.vs_client, 'update_lb_rule' ) as mock_update_virtual_server: self.edge_driver.l7rule.update(self.context, self.l7rule_dict, new_rule_dict, self.completor) mock_update_virtual_server.assert_called_once() self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): self.l7policy.rules = [self.l7rule] with mock.patch.object(self.vs_client, 'update_lb_rule' ) as mock_update_virtual_server: self.edge_driver.l7rule.delete( self.context, self.l7rule_dict, self.completor) mock_update_virtual_server.assert_called_once() self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/lbaas/test_nsxv3_driver.py0000644000175000017500000025504500000000000030476 0ustar00coreycorey00000000000000# Copyright (c) 2017 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.tests import base from neutron_lib import context from neutron_lib import exceptions as n_exc from vmware_nsx.db import db as nsx_db from vmware_nsx.db import nsx_models from vmware_nsx.services.lbaas import base_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import healthmonitor_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import l7policy_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import l7rule_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsx.services.lbaas.nsx_v3.implementation import listener_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import loadbalancer_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import member_mgr from vmware_nsx.services.lbaas.nsx_v3.implementation import pool_mgr from vmware_nsx.services.lbaas.octavia import octavia_listener from vmware_nsx.tests.unit.services.lbaas import lb_data_models as lb_models from vmware_nsx.tests.unit.services.lbaas import lb_translators # TODO(asarfaty): Use octavia models for those tests LB_VIP = '10.0.0.10' LB_ROUTER_ID = 'router-x' ROUTER_ID = 'neutron-router-x' LB_ID = 'xxx-xxx' LB_TENANT_ID = 'yyy-yyy' LB_SERVICE_ID = 'service-1' LB_BINDING = nsx_models.NsxLbaasLoadbalancer( loadbalancer_id=LB_ID, lb_service_id=LB_SERVICE_ID, lb_router_id=LB_ROUTER_ID, vip_address=LB_VIP) LB_BINDING_NO_RTR = nsx_models.NsxLbaasLoadbalancer( loadbalancer_id=LB_ID, lb_service_id=LB_SERVICE_ID, lb_router_id=lb_utils.NO_ROUTER_ID, vip_address=LB_VIP) LB_NETWORK = {'router:external': False, 'id': 'xxxxx', 'name': 'network-1'} LISTENER_ID = 'listener-x' HTTP_LISTENER_ID = 'listener-http' HTTPS_LISTENER_ID = 'listener-https' APP_PROFILE_ID = 'appp-x' LB_VS_ID = 'vs-x' LB_APP_PROFILE = { "resource_type": "LbHttpProfile", "description": "my http profile", "id": APP_PROFILE_ID, "display_name": "httpprofile1", "ntlm": False, "request_header_size": 1024, "http_redirect_to_https": False, "idle_timeout": 1800, "x_forwarded_for": "INSERT", } LISTENER_BINDING = nsx_models.NsxLbaasListener(loadbalancer_id=LB_ID, listener_id=LISTENER_ID, app_profile_id=APP_PROFILE_ID, lb_vs_id=LB_VS_ID) POOL_ID = 'ppp-qqq' LB_POOL_ID = 'pool-xx' LB_POOL = { "display_name": "httppool1", "description": "my http pool", "id": LB_POOL_ID, "algorithm": "ROUND_ROBIN", } POOL_BINDING = nsx_models.NsxLbaasPool(loadbalancer_id=LB_ID, pool_id=POOL_ID, lb_pool_id=LB_POOL_ID, lb_vs_id=LB_VS_ID) MEMBER_ID = 'mmm-mmm' MEMBER_ADDRESS = '10.0.0.200' LB_MEMBER = {'display_name': 'member1_' + MEMBER_ID, 'weight': 1, 'ip_address': MEMBER_ADDRESS, 'port': 80, 'backup_member': False} LB_POOL_WITH_MEMBER = { "display_name": "httppool1", "description": "my http pool", "id": LB_POOL_ID, "algorithm": "ROUND_ROBIN", "members": [ { "display_name": "http-member1", "ip_address": MEMBER_ADDRESS, "port": "80", "weight": "1", "admin_state": "ENABLED" } ] } HM_ID = 'hhh-mmm' LB_MONITOR_ID = 'mmm-ddd' HM_BINDING = nsx_models.NsxLbaasMonitor(loadbalancer_id=LB_ID, pool_id=POOL_ID, hm_id=HM_ID, lb_monitor_id=LB_MONITOR_ID, lb_pool_id=LB_POOL_ID) L7POLICY_ID = 'l7policy-xxx' LB_RULE_ID = 'lb-rule-xx' L7RULE_ID = 'l7rule-111' L7POLICY_BINDING = nsx_models.NsxLbaasL7Policy(l7policy_id=L7POLICY_ID, lb_vs_id=LB_VS_ID, lb_rule_id=LB_RULE_ID) LB_PP_ID = "ppp-ppp" FAKE_CERT = {'id': 'cert-xyz'} SERVICE_STATUSES = { "virtual_servers": [{ "virtual_server_id": LB_VS_ID, "status": "UP" }], "service_id": LB_SERVICE_ID, "service_status": "UP", "pools": [{ "members": [{ "port": "80", "ip_address": MEMBER_ADDRESS, "status": "DOWN" }], "pool_id": LB_POOL_ID, "status": "DOWN" }] } VS_STATUSES = { "results": [{ "virtual_server_id": LB_VS_ID, "status": "UP" }] } class BaseTestEdgeLbaasV2(base.BaseTestCase): def _tested_entity(self): return None def completor(self, success=True): self.last_completor_succees = success self.last_completor_called = True def setUp(self): super(BaseTestEdgeLbaasV2, self).setUp() self.last_completor_succees = False self.last_completor_called = False self.context = context.get_admin_context() octavia_objects = { 'loadbalancer': loadbalancer_mgr.EdgeLoadBalancerManagerFromDict(), 'listener': listener_mgr.EdgeListenerManagerFromDict(), 'pool': pool_mgr.EdgePoolManagerFromDict(), 'member': member_mgr.EdgeMemberManagerFromDict(), 'healthmonitor': healthmonitor_mgr.EdgeHealthMonitorManagerFromDict(), 'l7policy': l7policy_mgr.EdgeL7PolicyManagerFromDict(), 'l7rule': l7rule_mgr.EdgeL7RuleManagerFromDict()} self.edge_driver = octavia_listener.NSXOctaviaListenerEndpoint( **octavia_objects) self.lbv2_driver = mock.Mock() self.core_plugin = mock.Mock() base_mgr.LoadbalancerBaseManager._lbv2_driver = self.lbv2_driver base_mgr.LoadbalancerBaseManager._core_plugin = self.core_plugin self._patch_lb_plugin(self.lbv2_driver, self._tested_entity) self._patch_nsxlib_lb_clients(self.core_plugin) self.lb = lb_models.LoadBalancer(LB_ID, LB_TENANT_ID, 'lb1', '', 'some-subnet', 'port-id', LB_VIP) self.listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'Dummy', None, LB_ID, 'HTTP', protocol_port=80, loadbalancer=self.lb) self.https_listener = lb_models.Listener( HTTP_LISTENER_ID, LB_TENANT_ID, 'listener2', '', None, LB_ID, 'HTTPS', protocol_port=443, loadbalancer=self.lb) self.terminated_https_listener = lb_models.Listener( HTTPS_LISTENER_ID, LB_TENANT_ID, 'listener3', '', None, LB_ID, 'TERMINATED_HTTPS', protocol_port=443, loadbalancer=self.lb) self.pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listener=self.listener, listeners=[self.listener], loadbalancer=self.lb) self.sess_persistence = lb_models.SessionPersistence( POOL_ID, 'HTTP_COOKIE', 'meh_cookie') self.pool_persistency = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listener=self.listener, listeners=[self.listener], loadbalancer=self.lb, session_persistence=self.sess_persistence) self.member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, MEMBER_ADDRESS, 80, 1, pool=self.pool, name='member1') self.hm = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'PING', 3, 3, 1, pool=self.pool, name='hm1') self.hm_http = lb_models.HealthMonitor(HM_ID, LB_TENANT_ID, 'HTTP', 3, 3, 1, pool=self.pool, http_method='GET', url_path="/meh", name='hm2') self.l7policy = lb_models.L7Policy(L7POLICY_ID, LB_TENANT_ID, name='policy-test', description='policy-desc', listener_id=LISTENER_ID, action='REDIRECT_TO_POOL', redirect_pool_id=POOL_ID, listener=self.listener, position=1) self.l7rule = lb_models.L7Rule(L7RULE_ID, LB_TENANT_ID, l7policy_id=L7POLICY_ID, compare_type='EQUAL_TO', invert=False, type='HEADER', key='key1', value='val1', policy=self.l7policy) # Translate LBaaS objects to dictionaries self.lb_dict = lb_translators.lb_loadbalancer_obj_to_dict( self.lb) self.listener_dict = lb_translators.lb_listener_obj_to_dict( self.listener) self.https_listener_dict = lb_translators.lb_listener_obj_to_dict( self.https_listener) self.terminated_https_listener_dict = lb_translators.\ lb_listener_obj_to_dict(self.terminated_https_listener) self.pool_dict = lb_translators.lb_pool_obj_to_dict( self.pool) self.pool_persistency_dict = lb_translators.lb_pool_obj_to_dict( self.pool_persistency) self.member_dict = lb_translators.lb_member_obj_to_dict( self.member) self.hm_dict = lb_translators.lb_hm_obj_to_dict( self.hm) self.hm_http_dict = lb_translators.lb_hm_obj_to_dict( self.hm_http) self.l7policy_dict = lb_translators.lb_l7policy_obj_to_dict( self.l7policy) self.l7rule_dict = lb_translators.lb_l7rule_obj_to_dict( self.l7rule) def tearDown(self): self._unpatch_lb_plugin(self.lbv2_driver, self._tested_entity) super(BaseTestEdgeLbaasV2, self).tearDown() def _patch_lb_plugin(self, lb_plugin, manager): self.real_manager = getattr(lb_plugin, manager) lb_manager = mock.patch.object(lb_plugin, manager).start() mock.patch.object(lb_manager, 'create').start() mock.patch.object(lb_manager, 'update').start() mock.patch.object(lb_manager, 'delete').start() mock.patch.object(lb_manager, 'successful_completion').start() def _patch_nsxlib_lb_clients(self, core_plugin): nsxlib = mock.patch.object(core_plugin, 'nsxlib').start() load_balancer = mock.patch.object(nsxlib, 'load_balancer').start() self.service_client = mock.patch.object(load_balancer, 'service').start() self.app_client = mock.patch.object(load_balancer, 'application_profile').start() self.vs_client = mock.patch.object(load_balancer, 'virtual_server').start() self.pool_client = mock.patch.object(load_balancer, 'pool').start() self.monitor_client = mock.patch.object(load_balancer, 'monitor').start() self.rule_client = mock.patch.object(load_balancer, 'rule').start() self.pp_client = mock.patch.object(load_balancer, 'persistence_profile').start() self.tm_client = mock.patch.object(nsxlib, 'trust_management').start() self.nsxlib = nsxlib def _unpatch_lb_plugin(self, lb_plugin, manager): setattr(lb_plugin, manager, self.real_manager) class TestEdgeLbaasV2Loadbalancer(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Loadbalancer, self).setUp() @property def _tested_entity(self): return 'load_balancer' def test_create(self): neutron_router = {'id': ROUTER_ID, 'name': 'dummy', 'external_gateway_info': {'external_fixed_ips': []}} with mock.patch.object(lb_utils, 'validate_lb_subnet' ) as mock_validate_lb_subnet,\ mock.patch.object(lb_utils, 'get_router_from_network', return_value=ROUTER_ID),\ mock.patch.object(self.core_plugin, 'get_router', return_value=neutron_router), \ mock.patch.object(self.core_plugin, '_find_router_gw_subnets', return_value=[]),\ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding'),\ mock.patch.object(nsx_db, 'get_nsx_router_id', return_value=LB_ROUTER_ID),\ mock.patch.object(self.service_client, 'get_router_lb_service', return_value=None),\ mock.patch.object(self.service_client, 'create', return_value={'id': LB_SERVICE_ID} ) as create_service,\ mock.patch.object(nsx_db, 'add_nsx_lbaas_loadbalancer_binding' ) as add_binding: mock_validate_lb_subnet.return_value = True self.edge_driver.loadbalancer.create( self.context, self.lb_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) add_binding.assert_called_once_with(mock.ANY, LB_ID, LB_SERVICE_ID, LB_ROUTER_ID, LB_VIP) create_service.assert_called_once() def test_create_service_exists(self): with mock.patch.object(lb_utils, 'validate_lb_subnet' ) as mock_validate_lb_subnet,\ mock.patch.object(lb_utils, 'get_router_from_network'),\ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding'),\ mock.patch.object(nsx_db, 'get_nsx_router_id', return_value=LB_ROUTER_ID),\ mock.patch.object(self.service_client, 'get_router_lb_service', return_value={'id': LB_SERVICE_ID}),\ mock.patch.object(self.service_client, 'create') as create_service,\ mock.patch.object(nsx_db, 'add_nsx_lbaas_loadbalancer_binding' ) as add_binding: mock_validate_lb_subnet.return_value = True self.edge_driver.loadbalancer.create(self.context, self.lb_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) add_binding.assert_called_once_with(mock.ANY, LB_ID, LB_SERVICE_ID, LB_ROUTER_ID, LB_VIP) create_service.assert_not_called() def test_create_external_vip(self): with mock.patch.object(lb_utils, 'validate_lb_subnet' ) as mock_validate_lb_subnet,\ mock.patch.object(lb_utils, 'get_router_from_network', return_value=None),\ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding'),\ mock.patch.object(nsx_db, 'get_nsx_router_id'),\ mock.patch.object(self.service_client, 'get_router_lb_service', return_value=None),\ mock.patch.object(self.service_client, 'create', return_value={'id': LB_SERVICE_ID} ) as create_service,\ mock.patch.object(nsx_db, 'add_nsx_lbaas_loadbalancer_binding' ) as add_binding: mock_validate_lb_subnet.return_value = True self.edge_driver.loadbalancer.create(self.context, self.lb_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) add_binding.assert_called_once_with(mock.ANY, LB_ID, LB_SERVICE_ID, lb_utils.NO_ROUTER_ID, LB_VIP) create_service.assert_called_once() def test_update(self): new_lb = lb_models.LoadBalancer(LB_ID, 'yyy-yyy', 'lb1-new', 'new-description', 'some-subnet', 'port-id', LB_VIP) new_lb_dict = lb_translators.lb_loadbalancer_obj_to_dict(new_lb) self.edge_driver.loadbalancer.update(self.context, self.lb_dict, new_lb_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.service_client, 'get' ) as mock_get_lb_service, \ mock.patch.object(self.service_client, 'delete' ) as mock_delete_lb_service, \ mock.patch.object(nsx_db, 'get_neutron_from_nsx_router_id' ) as mock_get_neutron_from_nsx_router_id, \ mock.patch.object(nsx_db, 'delete_nsx_lbaas_loadbalancer_binding' ) as mock_delete_lb_binding: mock_get_lb_binding.return_value = LB_BINDING mock_get_lb_service.return_value = {'id': LB_SERVICE_ID} self.edge_driver.loadbalancer.delete(self.context, self.lb_dict, self.completor) mock_delete_lb_service.assert_called_with(LB_SERVICE_ID) mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID mock_delete_lb_binding.assert_called_with( self.context.session, LB_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_stats(self): pass def test_refresh(self): pass def test_status_update(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.service_client, 'get_status' ) as mock_get_lb_service_status, \ mock.patch.object(self.service_client, 'get_virtual_servers_status' ) as mock_get_vs_status, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding_by_lb_pool' ) as mock_get_pool_binding, \ mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding_by_lb_and_vs' ) as mock_get_listener_binding: mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_lb_service_status.return_value = SERVICE_STATUSES mock_get_vs_status.return_value = VS_STATUSES mock_get_pool.return_value = LB_POOL_WITH_MEMBER statuses = self.edge_driver.loadbalancer.get_operating_status( self.context, self.lb.id, with_members=True) self.assertEqual(1, len(statuses['loadbalancers'])) self.assertEqual('ONLINE', statuses['loadbalancers'][0]['status']) self.assertEqual(1, len(statuses['pools'])) self.assertEqual('OFFLINE', statuses['pools'][0]['status']) self.assertEqual(1, len(statuses['listeners'])) self.assertEqual('ONLINE', statuses['listeners'][0]['status']) self.assertEqual(1, len(statuses['members'])) self.assertEqual('OFFLINE', statuses['members'][0]['status']) class TestEdgeLbaasV2Listener(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Listener, self).setUp() @property def _tested_entity(self): return 'listener' def _create_listener(self, protocol='HTTP'): with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.app_client, 'create' ) as mock_create_app_profile, \ mock.patch.object(self.vs_client, 'create' ) as mock_create_virtual_server, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.service_client, 'add_virtual_server' ) as mock_add_virtual_server, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_listener_binding' ) as mock_add_listener_binding: mock_get_floatingips.return_value = [] mock_create_app_profile.return_value = {'id': APP_PROFILE_ID} mock_create_virtual_server.return_value = {'id': LB_VS_ID} mock_get_lb_binding.return_value = LB_BINDING listener = self.listener_dict if protocol == 'HTTPS': listener = self.https_listener_dict self.edge_driver.listener.create(self.context, listener, self.completor) mock_add_virtual_server.assert_called_with(LB_SERVICE_ID, LB_VS_ID) mock_add_listener_binding.assert_called_with( self.context.session, LB_ID, listener['id'], APP_PROFILE_ID, LB_VS_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_http_listener(self): self._create_listener() def test_create_https_listener(self): self._create_listener(protocol='HTTPS') def test_create_terminated_https(self): with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.tm_client, 'create_cert' ) as mock_create_cert, \ mock.patch.object(self.app_client, 'create' ) as mock_create_app_profile, \ mock.patch.object(self.vs_client, 'create' ) as mock_create_virtual_server, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.service_client, 'add_virtual_server' ) as mock_add_virtual_server, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_listener_binding' ) as mock_add_listener_binding: mock_get_floatingips.return_value = [] mock_create_cert.return_value = FAKE_CERT['id'] mock_create_app_profile.return_value = {'id': APP_PROFILE_ID} mock_create_virtual_server.return_value = {'id': LB_VS_ID} mock_get_lb_binding.return_value = LB_BINDING self.edge_driver.listener.create( self.context, self.terminated_https_listener_dict, self.completor) mock_add_virtual_server.assert_called_with(LB_SERVICE_ID, LB_VS_ID) mock_add_listener_binding.assert_called_with( self.context.session, LB_ID, HTTPS_LISTENER_ID, APP_PROFILE_ID, LB_VS_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_listener_with_default_pool(self): listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'Dummy', self.pool.id, LB_ID, 'HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool) listener_dict = lb_translators.lb_listener_obj_to_dict(listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.app_client, 'create' ) as mock_create_app_profile, \ mock.patch.object(self.vs_client, 'create' ) as mock_create_virtual_server, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.service_client, 'add_virtual_server' ) as mock_add_virtual_server, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_listener_binding' ) as mock_add_listener_binding,\ mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding'),\ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding: mock_get_floatingips.return_value = [] mock_create_app_profile.return_value = {'id': APP_PROFILE_ID} mock_create_virtual_server.return_value = {'id': LB_VS_ID} mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = None self.edge_driver.listener.create(self.context, listener_dict, self.completor) mock_add_virtual_server.assert_called_with(LB_SERVICE_ID, LB_VS_ID) mock_add_listener_binding.assert_called_with( self.context.session, LB_ID, LISTENER_ID, APP_PROFILE_ID, LB_VS_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_listener_with_used_default_pool(self): listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'Dummy', self.pool.id, LB_ID, 'HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool) listener_dict = lb_translators.lb_listener_obj_to_dict(listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding: mock_get_floatingips.return_value = [] mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = POOL_BINDING self.assertRaises(n_exc.BadRequest, self.edge_driver.listener.create, self.context, listener_dict, self.completor) def test_create_listener_with_session_persistence(self): listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'Dummy', self.pool_persistency.id, LB_ID, 'HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool_persistency) listener_dict = lb_translators.lb_listener_obj_to_dict(listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(self.app_client, 'create' ) as mock_create_app_profile, \ mock.patch.object(self.vs_client, 'create' ) as mock_create_virtual_server, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.service_client, 'add_virtual_server' ) as mock_add_virtual_server, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_listener_binding' ) as mock_add_listener_binding,\ mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding'),\ mock.patch.object(self.pp_client, 'create' ) as mock_create_pp, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding: mock_get_floatingips.return_value = [] mock_create_app_profile.return_value = {'id': APP_PROFILE_ID} mock_create_virtual_server.return_value = {'id': LB_VS_ID} mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = None self.edge_driver.listener.create(self.context, listener_dict, self.completor) mock_add_virtual_server.assert_called_with(LB_SERVICE_ID, LB_VS_ID) mock_add_listener_binding.assert_called_with( self.context.session, LB_ID, LISTENER_ID, APP_PROFILE_ID, LB_VS_ID) mock_create_pp.assert_called_once() self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_listener_with_session_persistence_fail(self): listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'Dummy', self.pool_persistency.id, LB_ID, 'TCP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool_persistency) listener_dict = lb_translators.lb_listener_obj_to_dict(listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding: mock_get_floatingips.return_value = [] mock_get_lb_binding.return_value = LB_BINDING mock_get_pool_binding.return_value = None self.assertRaises(n_exc.BadRequest, self.edge_driver.listener.create, self.context, listener_dict, self.completor) def test_update(self): new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1-new', 'new-description', None, LB_ID, protocol_port=80, loadbalancer=self.lb) new_listener_dict = lb_translators.lb_listener_obj_to_dict( new_listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding: mock_get_floatingips.return_value = [] mock_get_listener_binding.return_value = LISTENER_BINDING self.edge_driver.listener.update(self.context, self.listener_dict, new_listener_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update_with_default_pool(self): self.assertFalse(self.last_completor_called) new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1-new', 'new-description', self.pool, LB_ID, protocol_port=80, loadbalancer=self.lb, default_pool=self.pool) new_listener_dict = lb_translators.lb_listener_obj_to_dict( new_listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding,\ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding,\ mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding'): mock_get_floatingips.return_value = [] mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.listener.update(self.context, self.listener_dict, new_listener_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update_with_session_persistence(self): new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1-new', 'new-description', self.pool_persistency.id, LB_ID, protocol='HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool_persistency) new_listener_dict = lb_translators.lb_listener_obj_to_dict( new_listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding,\ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding,\ mock.patch.object(self.vs_client, 'update', return_value={'id': LB_VS_ID}), \ mock.patch.object(self.pp_client, 'create' ) as mock_create_pp, \ mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding'): mock_get_floatingips.return_value = [] mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.listener.update(self.context, self.listener_dict, new_listener_dict, self.completor) mock_create_pp.assert_called_once() self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update_with_session_persistence_change(self): old_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1', 'description', self.pool_persistency.id, LB_ID, protocol='HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=self.pool_persistency) old_listener_dict = lb_translators.lb_listener_obj_to_dict( old_listener) sess_persistence = lb_models.SessionPersistence( POOL_ID, 'SOURCE_IP') pool_persistency = lb_models.Pool('new_pool_id', LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listener=self.listener, listeners=[self.listener], loadbalancer=self.lb, session_persistence=sess_persistence) new_listener = lb_models.Listener(LISTENER_ID, LB_TENANT_ID, 'listener1-new', 'new-description', pool_persistency.id, LB_ID, protocol='HTTP', protocol_port=80, loadbalancer=self.lb, default_pool=pool_persistency) new_listener_dict = lb_translators.lb_listener_obj_to_dict( new_listener) with mock.patch.object(self.core_plugin, 'get_floatingips' ) as mock_get_floatingips, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding,\ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.pp_client, 'create' ) as mock_create_pp, \ mock.patch.object(self.pp_client, 'delete' ) as mock_delete_pp, \ mock.patch.object(lb_utils, 'get_pool_tags' ) as mock_get_pool_tags, \ mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding'): mock_get_pool_tags.return_value = [] mock_get_floatingips.return_value = [] mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.listener.update( self.context, old_listener_dict, new_listener_dict, self.completor) mock_create_pp.assert_called_once_with( display_name='persistence_pool1_new_p...ol_id', resource_type='LbSourceIpPersistenceProfile', tags=mock.ANY) # No reason to check parameters here, it's # all mocked out mock_delete_pp.assert_called_once() def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(self.service_client, 'get' ) as mock_get_lb_service, \ mock.patch.object(self.service_client, 'remove_virtual_server' ) as mock_remove_virtual_server, \ mock.patch.object(self.app_client, 'delete' ) as mock_delete_app_profile, \ mock.patch.object(nsx_db, 'get_neutron_from_nsx_router_id' ) as mock_get_neutron_from_nsx_router_id, \ mock.patch.object(self.vs_client, 'delete' ) as mock_delete_virtual_server, \ mock.patch.object(nsx_db, 'delete_nsx_lbaas_listener_binding', ) as mock_delete_listener_binding: mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID mock_get_lb_binding.return_value = LB_BINDING mock_get_lb_service.return_value = { 'id': LB_SERVICE_ID, 'virtual_server_ids': [LB_VS_ID]} self.edge_driver.listener.delete(self.context, self.listener_dict, self.completor) mock_remove_virtual_server.assert_called_with(LB_SERVICE_ID, LB_VS_ID) mock_delete_virtual_server.assert_called_with(LB_VS_ID) mock_delete_app_profile.assert_called_with(APP_PROFILE_ID) mock_delete_listener_binding.assert_called_with( self.context.session, LB_ID, LISTENER_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2Pool(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Pool, self).setUp() @property def _tested_entity(self): return 'pool' def test_create(self): with mock.patch.object(self.pool_client, 'create' ) as mock_create_pool, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_pool_binding' ) as mock_add_pool_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(self.pp_client, 'create' ) as mock_create_pp, \ mock.patch.object(self.vs_client, 'update', return_value=None ) as mock_vs_update, \ mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding' ) as mock_update_pool_binding: mock_create_pool.return_value = {'id': LB_POOL_ID} mock_get_listener_binding.return_value = LISTENER_BINDING self.edge_driver.pool.create(self.context, self.pool_dict, self.completor) mock_add_pool_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, LB_POOL_ID) mock_create_pp.assert_not_called() mock_vs_update.assert_called_once_with( LB_VS_ID, pool_id=LB_POOL_ID, persistence_profile_id=None) mock_update_pool_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, LB_VS_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def _test_create_with_persistency(self, vs_data, verify_func): with mock.patch.object(self.pool_client, 'create' ) as mock_create_pool, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_pool_binding' ) as mock_add_pool_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(self.pp_client, 'create' ) as mock_create_pp, \ mock.patch.object(self.pp_client, 'update', return_value=None, ) as mock_update_pp, \ mock.patch.object(self.vs_client, 'get' ) as mock_vs_get, \ mock.patch.object(self.vs_client, 'update', return_value=None ) as mock_vs_update, \ mock.patch.object(nsx_db, 'update_nsx_lbaas_pool_binding' ) as mock_update_pool_binding: mock_vs_get.return_value = vs_data mock_create_pool.return_value = {'id': LB_POOL_ID} mock_create_pp.return_value = {'id': LB_PP_ID} mock_get_listener_binding.return_value = LISTENER_BINDING self.edge_driver.pool.create( self.context, self.pool_persistency_dict, self.completor) mock_add_pool_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, LB_POOL_ID) verify_func(mock_create_pp, mock_update_pp, mock_update_pool_binding, mock_vs_update) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_with_persistency(self): def verify_func(mock_create_pp, mock_update_pp, mock_update_pool_binding, mock_vs_update): mock_create_pp.assert_called_once_with( resource_type='LbCookiePersistenceProfile', cookie_mode='INSERT', cookie_name='meh_cookie', display_name=mock.ANY, tags=mock.ANY) mock_update_pp.assert_not_called() mock_update_pool_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, LB_VS_ID) mock_vs_update.assert_called_once_with( LB_VS_ID, pool_id=LB_POOL_ID, persistence_profile_id=LB_PP_ID) vs_data = {'id': LB_VS_ID} self._test_create_with_persistency(vs_data, verify_func) def test_create_with_persistency_existing_profile(self): def verify_func(mock_create_pp, mock_update_pp, mock_update_pool_binding, mock_vs_update): mock_create_pp.assert_not_called() mock_update_pp.assert_called_once_with( LB_PP_ID, resource_type='LbCookiePersistenceProfile', cookie_mode='INSERT', cookie_name='meh_cookie', display_name=mock.ANY, tags=mock.ANY) mock_update_pool_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, LB_VS_ID) mock_vs_update.assert_called_once_with( LB_VS_ID, pool_id=LB_POOL_ID, persistence_profile_id=LB_PP_ID) vs_data = {'id': LB_VS_ID, 'persistence_profile_id': LB_PP_ID} self._test_create_with_persistency(vs_data, verify_func) def test_create_with_persistency_no_listener(self): def verify_func(mock_create_pp, mock_update_pp, mock_update_pool_binding, mock_vs_update): mock_create_pp.assert_not_called() mock_update_pp.assert_not_called() mock_update_pool_binding.assert_not_called() mock_vs_update.assert_not_called() vs_data = {'id': LB_VS_ID, 'persistence_profile_id': LB_PP_ID} self.pool_persistency_dict['listener'] = None self.pool_persistency_dict['listeners'] = [] self._test_create_with_persistency(vs_data, verify_func) def test_create_multiple_listeners(self): """Verify creation will fail if multiple listeners are set""" pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listeners=[self.listener, self.https_listener], loadbalancer=self.lb) pool_dict = lb_translators.lb_pool_obj_to_dict(pool) self.assertRaises(n_exc.BadRequest, self.edge_driver.pool.create, self.context, pool_dict, self.completor) def test_update(self): new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool-name', '', None, 'HTTP', 'LEAST_CONNECTIONS', listener=self.listener) new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool) with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding: mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.pool.update(self.context, self.pool_dict, new_pool_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update_multiple_listeners(self): """Verify update action will fail if multiple listeners are set""" new_pool = lb_models.Pool(POOL_ID, LB_TENANT_ID, 'pool1', '', None, 'HTTP', 'ROUND_ROBIN', loadbalancer_id=LB_ID, listeners=[self.listener, self.https_listener], loadbalancer=self.lb) new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool) with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding: mock_get_pool_binding.return_value = POOL_BINDING self.assertRaises(n_exc.BadRequest, self.edge_driver.pool.update, self.context, self.pool_dict, new_pool_dict, self.completor) def _test_update_with_persistency(self, vs_data, old_pool, new_pool, verify_func): old_pool_dict = lb_translators.lb_pool_obj_to_dict(old_pool) new_pool_dict = lb_translators.lb_pool_obj_to_dict(new_pool) with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.pp_client, 'create' ) as mock_create_pp, \ mock.patch.object(self.pp_client, 'update', return_value=None, ) as mock_update_pp, \ mock.patch.object(self.pp_client, 'delete', return_value=None, ) as mock_delete_pp, \ mock.patch.object(self.vs_client, 'get' ) as mock_vs_get, \ mock.patch.object(self.vs_client, 'update', return_value=None ) as mock_vs_update: mock_vs_get.return_value = vs_data mock_get_pool_binding.return_value = POOL_BINDING mock_create_pp.return_value = {'id': LB_PP_ID} self.edge_driver.pool.update(self.context, old_pool_dict, new_pool_dict, self.completor) verify_func(mock_create_pp, mock_update_pp, mock_delete_pp, mock_vs_update) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update_with_persistency(self): def verify_func(mock_create_pp, mock_update_pp, mock_delete_pp, mock_vs_update): mock_create_pp.assert_called_once_with( resource_type='LbCookiePersistenceProfile', cookie_mode='INSERT', cookie_name='meh_cookie', display_name=mock.ANY, tags=mock.ANY) mock_update_pp.assert_not_called() mock_delete_pp.assert_not_called() mock_vs_update.assert_called_once_with( LB_VS_ID, pool_id=LB_POOL_ID, persistence_profile_id=LB_PP_ID) vs_data = {'id': LB_VS_ID} self._test_update_with_persistency(vs_data, self.pool, self.pool_persistency, verify_func) def test_update_remove_persistency(self): def verify_func(mock_create_pp, mock_update_pp, mock_delete_pp, mock_vs_update): mock_create_pp.assert_not_called() mock_update_pp.assert_not_called() mock_delete_pp.assert_called_with(LB_PP_ID) mock_vs_update.assert_called_once_with( LB_VS_ID, pool_id=LB_POOL_ID, persistence_profile_id=None) vs_data = {'id': LB_VS_ID, 'persistence_profile_id': LB_PP_ID} self._test_update_with_persistency(vs_data, self.pool_persistency, self.pool, verify_func) def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.vs_client, 'update', return_value=None ) as mock_update_virtual_server, \ mock.patch.object(self.pool_client, 'delete' ) as mock_delete_pool, \ mock.patch.object(nsx_db, 'delete_nsx_lbaas_pool_binding' ) as mock_delete_pool_binding, \ mock.patch.object(nsx_db, 'get_neutron_from_nsx_router_id' ) as mock_get_neutron_from_nsx_router_id, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding: mock_get_pool_binding.return_value = POOL_BINDING mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID mock_get_lb_binding.return_value = None self.edge_driver.pool.delete(self.context, self.pool_dict, self.completor) mock_update_virtual_server.assert_called_with( LB_VS_ID, persistence_profile_id=None, pool_id=None) mock_delete_pool.assert_called_with(LB_POOL_ID) mock_delete_pool_binding.assert_called_with( self.context.session, LB_ID, POOL_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete_with_persistency(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.vs_client, 'get' ) as mock_vs_get, \ mock.patch.object(self.vs_client, 'update', return_value=None ) as mock_update_virtual_server, \ mock.patch.object(self.pool_client, 'delete' ) as mock_delete_pool, \ mock.patch.object(self.pp_client, 'delete', return_value=None, ) as mock_delete_pp, \ mock.patch.object(nsx_db, 'delete_nsx_lbaas_pool_binding' ) as mock_delete_pool_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding: mock_get_pool_binding.return_value = POOL_BINDING mock_get_lb_binding.return_value = None mock_vs_get.return_value = {'id': LB_VS_ID, 'persistence_profile_id': LB_PP_ID} self.edge_driver.pool.delete( self.context, self.pool_persistency_dict, self.completor) mock_delete_pp.assert_called_once_with(LB_PP_ID) mock_update_virtual_server.assert_called_once_with( LB_VS_ID, persistence_profile_id=None, pool_id=None) mock_delete_pool.assert_called_with(LB_POOL_ID) mock_delete_pool_binding.assert_called_with( self.context.session, LB_ID, POOL_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def _verify_create(self, res_type, cookie_name, cookie_mode, mock_create_pp, mock_update_pp): if cookie_name: mock_create_pp.assert_called_once_with( resource_type=res_type, cookie_name=cookie_name, cookie_mode=cookie_mode, display_name=mock.ANY, tags=mock.ANY) else: mock_create_pp.assert_called_once_with( resource_type=res_type, display_name=mock.ANY, tags=mock.ANY) # Compare tags - kw args are the last item of a mock call tuple self.assertItemsEqual(mock_create_pp.mock_calls[0][-1]['tags'], [{'scope': 'os-lbaas-lb-id', 'tag': 'xxx-xxx'}, {'scope': 'os-lbaas-lb-name', 'tag': 'lb1'}, {'scope': 'os-lbaas-listener-id', 'tag': 'listener-x'}]) mock_update_pp.assert_not_called() def _verify_update(self, res_type, cookie_name, cookie_mode, mock_create_pp, mock_update_pp): if cookie_name: mock_update_pp.assert_called_once_with( LB_PP_ID, resource_type=res_type, cookie_name=cookie_name, cookie_mode=cookie_mode, display_name=mock.ANY, tags=mock.ANY) else: mock_update_pp.assert_called_once_with( LB_PP_ID, resource_type=res_type, display_name=mock.ANY, tags=mock.ANY) # Compare tags - kw args are the last item of a mock call tuple self.assertItemsEqual(mock_update_pp.mock_calls[0][-1]['tags'], [{'scope': 'os-lbaas-lb-id', 'tag': 'xxx-xxx'}, {'scope': 'os-lbaas-lb-name', 'tag': 'lb1'}, {'scope': 'os-lbaas-listener-id', 'tag': 'listener-x'}]) mock_create_pp.assert_not_called() def _verify_switch(self, res_type, cookie_name, cookie_mode, mock_create_pp, mock_update_pp): if cookie_name: mock_create_pp.assert_called_once_with( resource_type=res_type, cookie_name=cookie_name, cookie_mode=cookie_mode, display_name=mock.ANY, tags=mock.ANY) else: mock_create_pp.assert_called_once_with( LB_PP_ID, resource_type=res_type, display_name=mock.ANY, tags=mock.ANY) # Compare tags - kw args are the last item of a mock call tuple self.assertItemsEqual(mock_create_pp.mock_calls[0][-1]['tags'], [{'scope': 'os-lbaas-lb-id', 'tag': 'xxx-xxx'}, {'scope': 'os-lbaas-lb-name', 'tag': 'lb1'}, {'scope': 'os-lbaas-listener-id', 'tag': 'listener-x'}]) def _verify_delete(self, res_type, cookie_name, cookie_mode, mock_create_pp, mock_update_pp): # do not check delete mock as deletion is not done in # setup_session_persistence mock_create_pp.assert_not_called() mock_update_pp.assert_not_called() def _test_setup_session_persistence(self, session_persistence, res_type, vs_data, verify_func, cookie_name=None, cookie_mode=None, switch_type=False): with mock.patch.object(self.pp_client, 'create' ) as mock_create_pp, \ mock.patch.object(self.pp_client, 'update', return_value=None, ) as mock_update_pp: mock_create_pp.return_value = {'id': LB_PP_ID} self.pool.session_persistence = session_persistence pool_dict = lb_translators.lb_pool_obj_to_dict(self.pool) pp_id, post_func = lb_utils.setup_session_persistence( self.nsxlib, pool_dict, [], switch_type, self.listener_dict, vs_data) if session_persistence: self.assertEqual(LB_PP_ID, pp_id) else: self.assertIsNone(pp_id) if not session_persistence or switch_type: # Also verify post_func for delete self.assertEqual((self.nsxlib, LB_PP_ID,), post_func.args) verify_func(res_type, cookie_name, cookie_mode, mock_create_pp, mock_update_pp) def test_setup_session_persistence_sourceip_new_profile(self): sess_persistence = lb_models.SessionPersistence(POOL_ID, 'SOURCE_IP') res_type = 'LbSourceIpPersistenceProfile' self._test_setup_session_persistence( sess_persistence, res_type, {'id': LB_VS_ID}, self._verify_create) def test_setup_session_persistence_httpcookie_new_profile(self): sess_persistence = lb_models.SessionPersistence( POOL_ID, 'HTTP_COOKIE') res_type = 'LbCookiePersistenceProfile' self._test_setup_session_persistence( sess_persistence, res_type, {'id': LB_VS_ID}, self._verify_create, 'default_cookie_name', 'INSERT') def test_setup_session_persistence_appcookie_new_profile(self): sess_persistence = lb_models.SessionPersistence( POOL_ID, 'APP_COOKIE', 'whatever') res_type = 'LbCookiePersistenceProfile' self._test_setup_session_persistence( sess_persistence, res_type, {'id': LB_VS_ID}, self._verify_create, 'whatever', 'REWRITE') def test_setup_session_persistence_none_from_existing(self): sess_persistence = None self._test_setup_session_persistence( sess_persistence, None, {'id': LB_VS_ID, 'persistence_profile_id': LB_PP_ID}, self._verify_delete) def test_setup_session_persistence_sourceip_from_existing(self): sess_persistence = lb_models.SessionPersistence(POOL_ID, 'SOURCE_IP') res_type = 'LbSourceIpPersistenceProfile' self._test_setup_session_persistence( sess_persistence, res_type, {'id': LB_VS_ID, 'persistence_profile_id': LB_PP_ID}, self._verify_update) def test_setup_session_persistence_httpcookie_from_existing(self): sess_persistence = lb_models.SessionPersistence(POOL_ID, 'HTTP_COOKIE') res_type = 'LbCookiePersistenceProfile' self._test_setup_session_persistence( sess_persistence, res_type, {'id': LB_VS_ID, 'persistence_profile_id': LB_PP_ID}, self._verify_update, 'default_cookie_name', 'INSERT') def test_setup_session_persistence_appcookie_from_existing(self): sess_persistence = lb_models.SessionPersistence( POOL_ID, 'APP_COOKIE', 'whatever') res_type = 'LbCookiePersistenceProfile' self._test_setup_session_persistence( sess_persistence, res_type, {'id': LB_VS_ID, 'persistence_profile_id': LB_PP_ID}, self._verify_update, 'whatever', 'REWRITE') def test_setup_session_persistence_appcookie_switch_type(self): sess_persistence = lb_models.SessionPersistence( POOL_ID, 'APP_COOKIE', 'whatever') res_type = 'LbCookiePersistenceProfile' self._test_setup_session_persistence( sess_persistence, res_type, {'id': LB_VS_ID, 'persistence_profile_id': LB_PP_ID}, self._verify_switch, 'whatever', 'REWRITE', switch_type=True) class TestEdgeLbaasV2Member(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2Member, self).setUp() @property def _tested_entity(self): return 'member' def test_create(self): with mock.patch.object(lb_utils, 'validate_lb_member_subnet' ) as mock_validate_lb_subnet, \ mock.patch.object(self.lbv2_driver.plugin, 'get_pool_members' ) as mock_get_pool_members, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network, \ mock.patch.object(lb_utils, 'get_router_from_network' ) as mock_get_router, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding' ) as mock_get_lb_binding, \ mock.patch.object(nsx_db, 'get_nsx_router_id' ) as mock_get_nsx_router_id, \ mock.patch.object(self.service_client, 'get_router_lb_service' ) as mock_get_lb_service, \ mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(self.pool_client, 'update_pool_with_members' ) as mock_update_pool_with_members: mock_validate_lb_subnet.return_value = True mock_get_pool_members.return_value = [self.member] mock_get_network.return_value = LB_NETWORK mock_get_router.return_value = LB_ROUTER_ID mock_get_pool_binding.return_value = POOL_BINDING mock_get_lb_binding.return_value = LB_BINDING mock_get_nsx_router_id.return_value = LB_ROUTER_ID mock_get_lb_service.return_value = {'id': LB_SERVICE_ID} mock_get_pool.return_value = LB_POOL self.edge_driver.member.create( self.context, self.member_dict, self.completor) mock_update_pool_with_members.assert_called_with(LB_POOL_ID, [LB_MEMBER]) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_external_vip(self): with mock.patch.object(lb_utils, 'validate_lb_member_subnet' ) as mock_validate_lb_subnet, \ mock.patch.object(self.lbv2_driver.plugin, 'get_pool_members' ) as mock_get_pool_members, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network, \ mock.patch.object(lb_utils, 'get_router_from_network' ) as mock_get_router, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_loadbalancer_binding', ) as mock_get_lb_binding, \ mock.patch.object(nsx_db, 'update_nsx_lbaas_loadbalancer_binding', ) as mock_update_lb_binding, \ mock.patch.object(nsx_db, 'get_nsx_router_id' ) as mock_get_nsx_router_id, \ mock.patch.object(self.service_client, 'get_router_lb_service' ) as mock_get_lb_service, \ mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(self.core_plugin, '_find_router_gw_subnets', return_value=[]),\ mock.patch.object(self.pool_client, 'update_pool_with_members' ) as mock_update_pool_with_members: mock_validate_lb_subnet.return_value = True mock_get_pool_members.return_value = [self.member] mock_get_network.return_value = LB_NETWORK mock_get_router.return_value = LB_ROUTER_ID mock_get_pool_binding.return_value = POOL_BINDING mock_get_lb_binding.return_value = LB_BINDING_NO_RTR mock_get_nsx_router_id.return_value = LB_ROUTER_ID mock_get_lb_service.return_value = {'id': LB_SERVICE_ID} mock_get_pool.return_value = LB_POOL self.edge_driver.member.create( self.context, self.member_dict, self.completor) mock_update_pool_with_members.assert_called_with(LB_POOL_ID, [LB_MEMBER]) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) mock_update_lb_binding.assert_called_once_with( mock.ANY, LB_ID, LB_ROUTER_ID) def test_create_member_different_router(self): with mock.patch.object(self.lbv2_driver.plugin, 'get_pool_members' ) as mock_get_pool_members, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network, \ mock.patch.object(lb_utils, 'get_router_from_network' ) as mock_get_router: mock_get_pool_members.return_value = [self.member] mock_get_network.return_value = LB_NETWORK mock_get_router.return_value = None self.assertRaises(n_exc.BadRequest, self.edge_driver.member.create, self.context, self.member_dict, self.completor) def test_update(self): new_member = lb_models.Member(MEMBER_ID, LB_TENANT_ID, POOL_ID, MEMBER_ADDRESS, 80, 2, pool=self.pool, name='member-nnn-nnn') new_member_dict = lb_translators.lb_member_obj_to_dict(new_member) with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network_from_subnet: mock_get_pool_binding.return_value = POOL_BINDING mock_get_pool.return_value = LB_POOL_WITH_MEMBER mock_get_network_from_subnet.return_value = LB_NETWORK self.edge_driver.member.update(self.context, self.member_dict, new_member_dict, self.completor) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.pool_client, 'get' ) as mock_get_pool, \ mock.patch.object(lb_utils, 'get_network_from_subnet' ) as mock_get_network_from_subnet, \ mock.patch.object(nsx_db, 'get_neutron_from_nsx_router_id' ) as mock_get_neutron_from_nsx_router_id, \ mock.patch.object(self.pool_client, 'update_pool_with_members' ) as mock_update_pool_with_members: mock_get_pool_binding.return_value = POOL_BINDING mock_get_pool.return_value = LB_POOL_WITH_MEMBER mock_get_network_from_subnet.return_value = LB_NETWORK mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID self.edge_driver.member.delete(self.context, self.member_dict, self.completor) mock_update_pool_with_members.assert_called_with(LB_POOL_ID, []) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2HealthMonitor(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2HealthMonitor, self).setUp() @property def _tested_entity(self): return 'health_monitor' def test_create(self): with mock.patch.object(self.monitor_client, 'create' ) as mock_create_monitor, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.pool_client, 'add_monitor_to_pool' ) as mock_add_monitor_to_pool, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_monitor_binding' ) as mock_add_monitor_binding: mock_create_monitor.return_value = {'id': LB_MONITOR_ID} mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.healthmonitor.create( self.context, self.hm_dict, self.completor) mock_add_monitor_to_pool.assert_called_with(LB_POOL_ID, LB_MONITOR_ID) mock_add_monitor_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, HM_ID, LB_MONITOR_ID, LB_POOL_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_create_http(self): with mock.patch.object(self.monitor_client, 'create' ) as mock_create_monitor, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.pool_client, 'add_monitor_to_pool' ) as mock_add_monitor_to_pool, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_monitor_binding' ) as mock_add_monitor_binding: mock_create_monitor.return_value = {'id': LB_MONITOR_ID} mock_get_pool_binding.return_value = POOL_BINDING # Verify HTTP-specific monitor parameters are added self.edge_driver.healthmonitor.create( self.context, self.hm_http_dict, self.completor) self.assertEqual(1, len(mock_create_monitor.mock_calls)) kw_args = mock_create_monitor.mock_calls[0][2] self.assertEqual(self.hm_http.http_method, kw_args.get('request_method')) self.assertEqual(self.hm_http.url_path, kw_args.get('request_url')) mock_add_monitor_to_pool.assert_called_with(LB_POOL_ID, LB_MONITOR_ID) mock_add_monitor_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, HM_ID, LB_MONITOR_ID, LB_POOL_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): with mock.patch.object(self.monitor_client, 'update' ) as mock_update_monitor, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_monitor_binding' ) as mock_get_monitor_binding: mock_get_monitor_binding.return_value = HM_BINDING new_hm = lb_models.HealthMonitor( HM_ID, LB_TENANT_ID, 'PING', 5, 5, 5, pool=self.pool, name='new_name') new_hm_dict = lb_translators.lb_hm_obj_to_dict(new_hm) self.edge_driver.healthmonitor.update( self.context, self.hm_dict, new_hm_dict, self.completor) mock_update_monitor.assert_called_with( LB_MONITOR_ID, display_name=mock.ANY, fall_count=5, interval=5, timeout=5, resource_type='LbIcmpMonitor') self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_monitor_binding' ) as mock_get_monitor_binding, \ mock.patch.object(self.pool_client, 'remove_monitor_from_pool' ) as mock_remove_monitor_from_pool, \ mock.patch.object(nsx_db, 'get_neutron_from_nsx_router_id' ) as mock_get_neutron_from_nsx_router_id, \ mock.patch.object(self.monitor_client, 'delete' ) as mock_delete_monitor, \ mock.patch.object(nsx_db, 'delete_nsx_lbaas_monitor_binding' ) as mock_delete_monitor_binding: mock_get_monitor_binding.return_value = HM_BINDING self.edge_driver.healthmonitor.delete( self.context, self.hm_dict, self.completor) mock_remove_monitor_from_pool.assert_called_with(LB_POOL_ID, LB_MONITOR_ID) mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID mock_delete_monitor.assert_called_with(LB_MONITOR_ID) mock_delete_monitor_binding.assert_called_with( self.context.session, LB_ID, POOL_ID, HM_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2L7Policy, self).setUp() @property def _tested_entity(self): return 'l7policy' def test_create(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_listener_binding' ) as mock_get_listener_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.rule_client, 'create' ) as mock_create_rule, \ mock.patch.object(self.vs_client, 'get' ) as mock_get_virtual_server, \ mock.patch.object(self.vs_client, 'update' ) as mock_update_virtual_server, \ mock.patch.object(nsx_db, 'add_nsx_lbaas_l7policy_binding' ) as mock_add_l7policy_binding: mock_get_listener_binding.return_value = LISTENER_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_create_rule.return_value = {'id': LB_RULE_ID} mock_get_virtual_server.return_value = {'id': LB_VS_ID} self.edge_driver.l7policy.create( self.context, self.l7policy_dict, self.completor) mock_update_virtual_server.assert_called_with( LB_VS_ID, rule_ids=[LB_RULE_ID]) mock_add_l7policy_binding.assert_called_with( self.context.session, L7POLICY_ID, LB_RULE_ID, LB_VS_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): new_l7policy = lb_models.L7Policy(L7POLICY_ID, LB_TENANT_ID, name='new-policy', listener_id=LISTENER_ID, action='REJECT', listener=self.listener, position=2) new_policy_dict = lb_translators.lb_l7policy_obj_to_dict(new_l7policy) vs_with_rules = { 'id': LB_VS_ID, 'rule_ids': [LB_RULE_ID, 'abc', 'xyz'] } rule_body = { 'match_conditions': [], 'actions': [{ 'type': 'LbHttpRejectAction', 'reply_status': '403'}], 'phase': 'HTTP_FORWARDING', 'match_strategy': 'ALL' } with mock.patch.object(nsx_db, 'get_nsx_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.rule_client, 'update' ) as mock_update_rule, \ mock.patch.object(self.vs_client, 'get' ) as mock_get_virtual_server, \ mock.patch.object(self.vs_client, 'update' ) as mock_update_virtual_server: mock_get_l7policy_binding.return_value = L7POLICY_BINDING mock_get_pool_binding.return_value = POOL_BINDING mock_get_virtual_server.return_value = vs_with_rules self.edge_driver.l7policy.update(self.context, self.l7policy_dict, new_policy_dict, self.completor) mock_update_rule.assert_called_with(LB_RULE_ID, **rule_body) mock_update_virtual_server.assert_called_with( LB_VS_ID, rule_ids=['abc', LB_RULE_ID, 'xyz']) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): with mock.patch.object(nsx_db, 'get_nsx_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(self.vs_client, 'remove_rule' ) as mock_vs_remove_rule, \ mock.patch.object(self.rule_client, 'delete' ) as mock_delete_rule, \ mock.patch.object(nsx_db, 'get_neutron_from_nsx_router_id' ) as mock_get_neutron_from_nsx_router_id, \ mock.patch.object(nsx_db, 'delete_nsx_lbaas_l7policy_binding' ) as mock_delete_l7policy_binding: mock_get_l7policy_binding.return_value = L7POLICY_BINDING mock_get_neutron_from_nsx_router_id.return_value = LB_ROUTER_ID self.edge_driver.l7policy.delete( self.context, self.l7policy_dict, self.completor) mock_vs_remove_rule.assert_called_with(LB_VS_ID, LB_RULE_ID) mock_delete_rule.assert_called_with(LB_RULE_ID) mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID mock_delete_l7policy_binding.assert_called_with( self.context.session, L7POLICY_ID) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) class TestEdgeLbaasV2L7Rule(BaseTestEdgeLbaasV2): def setUp(self): super(TestEdgeLbaasV2L7Rule, self).setUp() @property def _tested_entity(self): return 'l7rule' def test_create(self): self.l7policy.rules = [self.l7rule] create_rule_body = { 'match_conditions': [{ 'type': 'LbHttpRequestHeaderCondition', 'match_type': 'EQUALS', 'header_name': self.l7rule.key, 'header_value': self.l7rule.value}], 'actions': [{ 'type': 'LbSelectPoolAction', 'pool_id': LB_POOL_ID}], 'phase': 'HTTP_FORWARDING', 'match_strategy': 'ALL' } with mock.patch.object(nsx_db, 'get_nsx_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.rule_client, 'update' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POLICY_BINDING mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.l7rule.create( self.context, self.l7rule_dict, self.completor) mock_update_rule.assert_called_with(LB_RULE_ID, **create_rule_body) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_update(self): new_l7rule = lb_models.L7Rule(L7RULE_ID, LB_TENANT_ID, l7policy_id=L7POLICY_ID, compare_type='STARTS_WITH', invert=True, type='COOKIE', key='cookie1', value='xxxxx', policy=self.l7policy) new_rule_dict = lb_translators.lb_l7rule_obj_to_dict(new_l7rule) self.l7policy.rules = [new_l7rule] update_rule_body = { 'match_conditions': [{ 'type': 'LbHttpRequestHeaderCondition', 'match_type': 'STARTS_WITH', 'header_name': 'Cookie', 'header_value': 'cookie1=xxxxx'}], 'actions': [{ 'type': 'LbSelectPoolAction', 'pool_id': LB_POOL_ID}], 'phase': 'HTTP_FORWARDING', 'match_strategy': 'ALL' } with mock.patch.object(nsx_db, 'get_nsx_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(self.rule_client, 'update' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POLICY_BINDING mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.l7rule.update(self.context, self.l7rule_dict, new_rule_dict, self.completor) mock_update_rule.assert_called_with(LB_RULE_ID, **update_rule_body) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) def test_delete(self): self.l7policy.rules = [self.l7rule] delete_rule_body = { 'match_conditions': [], 'actions': [{ 'type': 'LbSelectPoolAction', 'pool_id': LB_POOL_ID}], 'phase': 'HTTP_FORWARDING', 'match_strategy': 'ALL' } with mock.patch.object(nsx_db, 'get_nsx_lbaas_l7policy_binding' ) as mock_get_l7policy_binding, \ mock.patch.object(nsx_db, 'get_nsx_lbaas_pool_binding' ) as mock_get_pool_binding, \ mock.patch.object(nsx_db, 'get_neutron_from_nsx_router_id' ) as mock_get_neutron_from_nsx_router_id, \ mock.patch.object(self.rule_client, 'update' ) as mock_update_rule: mock_get_l7policy_binding.return_value = L7POLICY_BINDING mock_get_pool_binding.return_value = POOL_BINDING self.edge_driver.l7rule.delete( self.context, self.l7rule_dict, self.completor) mock_update_rule.assert_called_with(LB_RULE_ID, **delete_rule_body) mock_get_neutron_from_nsx_router_id.router_id = ROUTER_ID self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/lbaas/test_octavia_driver.py0000644000175000017500000005323600000000000031041 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import decorator import mock import testtools from oslo_utils import uuidutils from octavia_lib.api.drivers import data_models code_ok = True # Skip duplications between Octavia & Neutron configurations and missing # configuration groups with mock.patch('oslo_config.cfg.ConfigOpts.import_group'),\ mock.patch('oslo_config.cfg.ConfigOpts.__getattr__'): try: from vmware_nsx.services.lbaas.octavia import octavia_driver as driver except ImportError: # Octavia code not found # this can happen as Octavia is not in the requirements yet code_ok = False DRIVER = 'vmware_nsx.services.lbaas.octavia.octavia_driver.NSXOctaviaDriver' class TestNsxProviderDriver(testtools.TestCase): """Test the NSX Octavia driver Make sure all the relevant data is translated and sent to the listener """ def setUp(self): super(TestNsxProviderDriver, self).setUp() global code_ok if not code_ok: return # init the NSX driver without the RPC & certificate with mock.patch(DRIVER + '._init_rpc_messaging'), \ mock.patch(DRIVER + '._init_cert_manager'): self.driver = driver.NSXOctaviaDriver() self.driver.client = mock.Mock() self.loadbalancer_id = uuidutils.generate_uuid() self.vip_address = '192.0.2.10' self.vip_network_id = uuidutils.generate_uuid() self.vip_port_id = uuidutils.generate_uuid() self.vip_subnet_id = uuidutils.generate_uuid() self.listener_id = uuidutils.generate_uuid() self.pool_id = uuidutils.generate_uuid() self.member_id = uuidutils.generate_uuid() self.member_subnet_id = uuidutils.generate_uuid() self.healthmonitor_id = uuidutils.generate_uuid() self.l7policy_id = uuidutils.generate_uuid() self.l7rule_id = uuidutils.generate_uuid() self.project_id = uuidutils.generate_uuid() self.default_tls_container_ref = uuidutils.generate_uuid() self.sni_container_ref_1 = uuidutils.generate_uuid() self.sni_container_ref_2 = uuidutils.generate_uuid() self.ref_member = data_models.Member( address='198.51.100.4', admin_state_up=True, member_id=self.member_id, monitor_address='203.0.113.2', monitor_port=66, name='jacket', pool_id=self.pool_id, protocol_port=99, subnet_id=self.member_subnet_id, weight=55) self.ref_healthmonitor = data_models.HealthMonitor( admin_state_up=False, delay=2, expected_codes="500", healthmonitor_id=self.healthmonitor_id, http_method='TRACE', max_retries=1, max_retries_down=0, name='doc', pool_id=self.pool_id, timeout=3, type='PHD', url_path='/index.html') self.ref_pool = data_models.Pool( admin_state_up=True, description='Olympic swimming pool', healthmonitor=self.ref_healthmonitor, lb_algorithm='A_Fast_One', loadbalancer_id=self.loadbalancer_id, members=[self.ref_member], name='Osborn', pool_id=self.pool_id, protocol='avian', session_persistence={'type': 'glue'}) self.ref_l7rule = data_models.L7Rule( admin_state_up=True, compare_type='store_brand', invert=True, key='board', l7policy_id=self.l7policy_id, l7rule_id=self.l7rule_id, type='strict', value='gold') self.ref_l7policy = data_models.L7Policy( action='packed', admin_state_up=False, description='Corporate policy', l7policy_id=self.l7policy_id, listener_id=self.listener_id, name='more_policy', position=1, redirect_pool_id=self.pool_id, redirect_url='/hr', rules=[self.ref_l7rule]) self.ref_listener = data_models.Listener( admin_state_up=False, connection_limit=5, default_pool=self.ref_pool, default_pool_id=self.pool_id, default_tls_container_data='default_cert_data', default_tls_container_ref=self.default_tls_container_ref, description='The listener', insert_headers={'X-Forwarded-For': 'true'}, l7policies=[self.ref_l7policy], listener_id=self.listener_id, loadbalancer_id=self.loadbalancer_id, name='super_listener', protocol='avian', protocol_port=42, sni_container_data=['sni_cert_data_1', 'sni_cert_data_2'], sni_container_refs=[self.sni_container_ref_1, self.sni_container_ref_2]) self.ref_lb = data_models.LoadBalancer( admin_state_up=False, description='One great load balancer', flavor={'cake': 'chocolate'}, listeners=[self.ref_listener], loadbalancer_id=self.loadbalancer_id, name='favorite_lb', project_id=self.project_id, vip_address=self.vip_address, vip_network_id=self.vip_network_id, vip_port_id=self.vip_port_id, vip_subnet_id=self.vip_subnet_id) # start DB mocks mock.patch('octavia.db.api.get_session').start() mock.patch("octavia.api.drivers.utils.db_pool_to_provider_pool", return_value=self.ref_pool).start() @decorator.decorator def skip_no_octavia(f, *args, **kwargs): global code_ok if not code_ok: obj = args[0] return obj.skipTest('Octavia code not found') return f(*args, **kwargs) @skip_no_octavia def test_loadbalancer_create(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.loadbalancer_create(self.ref_lb) cast_method.assert_called_with({}, 'loadbalancer_create', loadbalancer=mock.ANY) driver_obj = cast_method.call_args[1]['loadbalancer'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) self.assertIn('admin_state_up', driver_obj) self.assertIn('name', driver_obj) self.assertIn('listeners', driver_obj) self.assertEqual(1, len(driver_obj['listeners'])) self.assertEqual(self.ref_lb.vip_address, driver_obj['vip_address']) self.assertEqual(self.ref_lb.vip_network_id, driver_obj['vip_network_id']) self.assertEqual(self.ref_lb.vip_port_id, driver_obj['vip_port_id']) self.assertEqual(self.ref_lb.vip_subnet_id, driver_obj['vip_subnet_id']) @skip_no_octavia def test_loadbalancer_delete(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.loadbalancer_delete(self.ref_lb) cast_method.assert_called_with({}, 'loadbalancer_delete', cascade=False, loadbalancer=mock.ANY) driver_obj = cast_method.call_args[1]['loadbalancer'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_loadbalancer_update(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.loadbalancer_update(self.ref_lb, self.ref_lb) cast_method.assert_called_with({}, 'loadbalancer_update', old_loadbalancer=mock.ANY, new_loadbalancer=mock.ANY) driver_obj = cast_method.call_args[1]['new_loadbalancer'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_listener_create(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.listener_create(self.ref_listener) cast_method.assert_called_with({}, 'listener_create', cert=None, listener=mock.ANY) driver_obj = cast_method.call_args[1]['listener'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) self.assertIn('admin_state_up', driver_obj) self.assertIn('name', driver_obj) self.assertIn('loadbalancer_id', driver_obj) self.assertIn('loadbalancer', driver_obj) self.assertEqual(self.ref_listener.protocol, driver_obj['protocol']) self.assertEqual(self.ref_listener.protocol_port, driver_obj['protocol_port']) self.assertEqual(self.ref_listener.connection_limit, driver_obj['connection_limit']) self.assertIn('l7policies', driver_obj) #TODO(asarfaty) add after the driver is fixed #self.assertIn('default_tls_container_id', driver_obj) @skip_no_octavia def test_listener_delete(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.listener_delete(self.ref_listener) cast_method.assert_called_with({}, 'listener_delete', listener=mock.ANY) driver_obj = cast_method.call_args[1]['listener'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_listener_update(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.listener_update(self.ref_listener, self.ref_listener) cast_method.assert_called_with({}, 'listener_update', cert=None, old_listener=mock.ANY, new_listener=mock.ANY) driver_obj = cast_method.call_args[1]['new_listener'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_pool_create(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.pool_create(self.ref_pool) cast_method.assert_called_with({}, 'pool_create', pool=mock.ANY) driver_obj = cast_method.call_args[1]['pool'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) self.assertIn('admin_state_up', driver_obj) self.assertIn('name', driver_obj) self.assertIn('loadbalancer_id', driver_obj) self.assertIn('listener', driver_obj) self.assertIn('listeners', driver_obj) self.assertEqual(self.ref_pool.lb_algorithm, driver_obj['lb_algorithm']) self.assertEqual(self.ref_pool.session_persistence, driver_obj['session_persistence']) self.assertIn('members', driver_obj) @skip_no_octavia def test_pool_delete(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.pool_delete(self.ref_pool) cast_method.assert_called_with({}, 'pool_delete', pool=mock.ANY) driver_obj = cast_method.call_args[1]['pool'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_pool_update(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.pool_update(self.ref_pool, self.ref_pool) cast_method.assert_called_with({}, 'pool_update', old_pool=mock.ANY, new_pool=mock.ANY) driver_obj = cast_method.call_args[1]['new_pool'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_member_create(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.member_create(self.ref_member) cast_method.assert_called_with({}, 'member_create', member=mock.ANY) driver_obj = cast_method.call_args[1]['member'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) self.assertIn('admin_state_up', driver_obj) self.assertIn('name', driver_obj) self.assertEqual(self.pool_id, driver_obj['pool_id']) self.assertIn('pool', driver_obj) self.assertIn('loadbalancer', driver_obj['pool']) #TODO(asarfaty) add when the driver is fixed #self.assertIn('listener', driver_obj['pool']) self.assertEqual(self.ref_member.subnet_id, driver_obj['subnet_id']) self.assertEqual(self.ref_member.address, driver_obj['address']) self.assertEqual(self.ref_member.protocol_port, driver_obj['protocol_port']) self.assertEqual(self.ref_member.weight, driver_obj['weight']) @skip_no_octavia def test_member_delete(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.member_delete(self.ref_member) cast_method.assert_called_with({}, 'member_delete', member=mock.ANY) driver_obj = cast_method.call_args[1]['member'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_member_update(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.member_update(self.ref_member, self.ref_member) cast_method.assert_called_with({}, 'member_update', old_member=mock.ANY, new_member=mock.ANY) driver_obj = cast_method.call_args[1]['old_member'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_health_monitor_create(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.health_monitor_create(self.ref_healthmonitor) cast_method.assert_called_with({}, 'healthmonitor_create', healthmonitor=mock.ANY) driver_obj = cast_method.call_args[1]['healthmonitor'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) self.assertIn('admin_state_up', driver_obj) self.assertIn('name', driver_obj) self.assertEqual(self.ref_healthmonitor.type, driver_obj['type']) self.assertEqual(self.ref_healthmonitor.url_path, driver_obj['url_path']) self.assertEqual(self.ref_healthmonitor.delay, driver_obj['delay']) self.assertEqual(self.ref_healthmonitor.timeout, driver_obj['timeout']) self.assertEqual(self.ref_healthmonitor.max_retries, driver_obj['max_retries']) self.assertEqual(self.ref_healthmonitor.http_method, driver_obj['http_method']) self.assertIn('pool', driver_obj) self.assertEqual(self.pool_id, driver_obj['pool']['id']) self.assertEqual(self.loadbalancer_id, driver_obj['pool']['loadbalancer_id']) @skip_no_octavia def test_health_monitor_delete(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.health_monitor_delete(self.ref_healthmonitor) cast_method.assert_called_with({}, 'healthmonitor_delete', healthmonitor=mock.ANY) driver_obj = cast_method.call_args[1]['healthmonitor'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_health_monitor_update(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.health_monitor_update(self.ref_healthmonitor, self.ref_healthmonitor) cast_method.assert_called_with({}, 'healthmonitor_update', old_healthmonitor=mock.ANY, new_healthmonitor=mock.ANY) driver_obj = cast_method.call_args[1]['new_healthmonitor'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_l7policy_create(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.l7policy_create(self.ref_l7policy) cast_method.assert_called_with({}, 'l7policy_create', l7policy=mock.ANY) driver_obj = cast_method.call_args[1]['l7policy'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) self.assertIn('admin_state_up', driver_obj) self.assertIn('name', driver_obj) self.assertIn('listener', driver_obj) self.assertEqual(self.listener_id, driver_obj['listener_id']) self.assertIn('rules', driver_obj) self.assertIn('position', driver_obj) self.assertEqual(self.ref_l7policy.action, driver_obj['action']) self.assertEqual(self.ref_l7policy.redirect_url, driver_obj['redirect_url']) self.assertEqual(self.ref_l7policy.redirect_pool_id, driver_obj['redirect_pool_id']) @skip_no_octavia def test_l7policy_delete(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.l7policy_delete(self.ref_l7policy) cast_method.assert_called_with({}, 'l7policy_delete', l7policy=mock.ANY) driver_obj = cast_method.call_args[1]['l7policy'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_l7policy_update(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.l7policy_update(self.ref_l7policy, self.ref_l7policy) cast_method.assert_called_with({}, 'l7policy_update', old_l7policy=mock.ANY, new_l7policy=mock.ANY) driver_obj = cast_method.call_args[1]['new_l7policy'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_l7rule_create(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.l7rule_create(self.ref_l7rule) cast_method.assert_called_with({}, 'l7rule_create', l7rule=mock.ANY) driver_obj = cast_method.call_args[1]['l7rule'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) self.assertIn('admin_state_up', driver_obj) self.assertIn('name', driver_obj) self.assertIn('policy', driver_obj) self.assertIn('rules', driver_obj['policy']) self.assertEqual(self.ref_l7rule.type, driver_obj['type']) self.assertEqual(self.ref_l7rule.value, driver_obj['value']) self.assertEqual(self.ref_l7rule.invert, driver_obj['invert']) @skip_no_octavia def test_l7rule_delete(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.l7rule_delete(self.ref_l7rule) cast_method.assert_called_with({}, 'l7rule_delete', l7rule=mock.ANY) driver_obj = cast_method.call_args[1]['l7rule'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) @skip_no_octavia def test_l7rule_update(self): with mock.patch.object(self.driver.client, 'cast') as cast_method: self.driver.l7rule_update(self.ref_l7rule, self.ref_l7rule) cast_method.assert_called_with({}, 'l7rule_update', old_l7rule=mock.ANY, new_l7rule=mock.ANY) driver_obj = cast_method.call_args[1]['new_l7rule'] self.assertIn('id', driver_obj) self.assertIn('project_id', driver_obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/lbaas/test_octavia_listener.py0000644000175000017500000004076000000000000031371 0ustar00coreycorey00000000000000# Copyright 2018 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools from neutron_lib import exceptions from oslo_utils import uuidutils from vmware_nsx.services.lbaas.octavia import octavia_listener class DummyOctaviaResource(object): create_called = False update_called = False delete_called = False delete_cascade_called = False to_raise = False def create(self, ctx, lb_obj, completor_func, **args): self.create_called = True if self.to_raise: raise exceptions.InvalidInput(error_message='test') else: completor_func(success=True) def update(self, ctx, old_lb_obj, new_lb_obj, completor_func, **args): self.update_called = True if self.to_raise: raise exceptions.InvalidInput(error_message='test') else: completor_func(success=True) def delete(self, ctx, lb_obj, completor_func, **args): self.delete_called = True if self.to_raise: raise exceptions.InvalidInput(error_message='test') else: completor_func(success=True) def delete_cascade(self, ctx, lb_obj, completor_func, **args): self.delete_cascade_called = True if self.to_raise: raise exceptions.InvalidInput(error_message='test') else: completor_func(success=True) class TestNsxOctaviaListener(testtools.TestCase): """Test the NSX Octavia listener""" def setUp(self): super(TestNsxOctaviaListener, self).setUp() self.dummyResource = DummyOctaviaResource() self.clientMock = mock.Mock() self.clientMock.cast = mock.Mock() self.endpoint = octavia_listener.NSXOctaviaListenerEndpoint( client=self.clientMock, loadbalancer=self.dummyResource, listener=self.dummyResource, pool=self.dummyResource, member=self.dummyResource, healthmonitor=self.dummyResource, l7policy=self.dummyResource, l7rule=self.dummyResource) self.dummyObj = {'project_id': uuidutils.generate_uuid(), 'id': uuidutils.generate_uuid()} self.ctx = None self.mock_ctx = mock.patch("neutron_lib.context.Context") self.mock_ctx.start() def tearDown(self): self.mock_ctx.stop() super(TestNsxOctaviaListener, self).tearDown() def test_loadbalancer_create(self): self.dummyResource.create_called = False self.endpoint.loadbalancer_create(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.create_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'loadbalancers': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_loadbalancer_create_failed(self): self.dummyResource.create_called = False self.dummyResource.to_raise = True self.endpoint.loadbalancer_create(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.create_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'loadbalancers': [ {'operating_status': 'ERROR', 'provisioning_status': 'ERROR', 'id': mock.ANY}]}) self.dummyResource.to_raise = False def test_loadbalancer_delete(self): self.dummyResource.delete_called = False self.endpoint.loadbalancer_delete(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.delete_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'loadbalancers': [ {'operating_status': 'ONLINE', 'provisioning_status': 'DELETED', 'id': mock.ANY}]}) def test_loadbalancer_delete_cascade(self): self.dummyResource.delete_called = False self.endpoint.loadbalancer_delete(self.ctx, self.dummyObj, cascade=True) self.assertTrue(self.dummyResource.delete_cascade_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={ 'loadbalancers': [{'operating_status': 'ONLINE', 'provisioning_status': 'DELETED', 'id': mock.ANY}], 'l7policies': [], 'pools': [], 'listeners': [], 'l7rules': [], 'members': [], 'healthmonitors': []}) def test_loadbalancer_update(self): self.dummyResource.update_called = False self.endpoint.loadbalancer_update(self.ctx, self.dummyObj, self.dummyObj) self.assertTrue(self.dummyResource.update_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'loadbalancers': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_listener_create(self): self.dummyResource.create_called = False self.endpoint.listener_create(self.ctx, self.dummyObj, None) self.assertTrue(self.dummyResource.create_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'listeners': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_listener_delete(self): self.dummyResource.delete_called = False self.endpoint.listener_delete(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.delete_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'listeners': [ {'operating_status': 'ONLINE', 'provisioning_status': 'DELETED', 'id': mock.ANY}]}) def test_listener_update(self): self.dummyResource.update_called = False self.endpoint.listener_update(self.ctx, self.dummyObj, self.dummyObj, None) self.assertTrue(self.dummyResource.update_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'listeners': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_pool_create(self): self.dummyResource.create_called = False self.endpoint.pool_create(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.create_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'pools': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_pool_delete(self): self.dummyResource.delete_called = False lb_id = uuidutils.generate_uuid() listener_id = uuidutils.generate_uuid() pool_id = uuidutils.generate_uuid() pool_obj = { 'id': pool_id, 'pool_id': pool_id, 'project_id': uuidutils.generate_uuid(), 'listener_id': listener_id, 'loadbalancer_id': lb_id, 'listener': {'protocol': 'HTTP', 'id': listener_id, 'default_pool_id': pool_id, 'loadbalancer': {'id': lb_id}}} self.endpoint.pool_delete(self.ctx, pool_obj) self.assertTrue(self.dummyResource.delete_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'loadbalancers': [{'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': lb_id}], 'pools': [{'operating_status': 'ONLINE', 'provisioning_status': 'DELETED', 'id': pool_id}], 'listeners': [{'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': listener_id}], }) def test_pool_update(self): self.dummyResource.update_called = False self.endpoint.pool_update(self.ctx, self.dummyObj, self.dummyObj) self.assertTrue(self.dummyResource.update_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'pools': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_member_create(self): self.dummyResource.create_called = False lb_id = uuidutils.generate_uuid() pool_id = uuidutils.generate_uuid() listener_id = uuidutils.generate_uuid() member_id = uuidutils.generate_uuid() member_obj = { 'id': member_id, 'protocol_port': 80, 'name': 'dummy', 'pool_id': pool_id, 'project_id': uuidutils.generate_uuid(), 'pool': {'listener_id': listener_id, 'id': pool_id, 'loadbalancer_id': lb_id, 'listener': {'protocol': 'HTTP', 'id': listener_id, 'default_pool_id': pool_id, 'loadbalancer': {'id': lb_id}}}} self.endpoint.member_create(self.ctx, member_obj) self.assertTrue(self.dummyResource.create_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'members': [{'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': member_id}], 'loadbalancers': [{'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': lb_id}], 'pools': [{'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': pool_id}], 'listeners': [{'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': listener_id}], }) def test_member_delete(self): self.dummyResource.delete_called = False self.endpoint.member_delete(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.delete_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'members': [ {'operating_status': 'ONLINE', 'provisioning_status': 'DELETED', 'id': mock.ANY}]}) def test_member_update(self): self.dummyResource.update_called = False self.endpoint.member_update(self.ctx, self.dummyObj, self.dummyObj) self.assertTrue(self.dummyResource.update_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'members': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_healthmonitor_create(self): self.dummyResource.create_called = False self.endpoint.healthmonitor_create(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.create_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'healthmonitors': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_healthmonitor_delete(self): self.dummyResource.delete_called = False self.endpoint.healthmonitor_delete(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.delete_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'healthmonitors': [ {'operating_status': 'ONLINE', 'provisioning_status': 'DELETED', 'id': mock.ANY}]}) def test_healthmonitor_update(self): self.dummyResource.update_called = False self.endpoint.healthmonitor_update(self.ctx, self.dummyObj, self.dummyObj) self.assertTrue(self.dummyResource.update_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'healthmonitors': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_l7policy_create(self): self.dummyResource.create_called = False self.endpoint.l7policy_create(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.create_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'l7policies': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_l7policy_delete(self): self.dummyResource.delete_called = False self.endpoint.l7policy_delete(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.delete_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'l7policies': [ {'operating_status': 'ONLINE', 'provisioning_status': 'DELETED', 'id': mock.ANY}]}) def test_l7policy_update(self): self.dummyResource.update_called = False self.endpoint.l7policy_update(self.ctx, self.dummyObj, self.dummyObj) self.assertTrue(self.dummyResource.update_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'l7policies': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_l7rule_create(self): self.dummyResource.create_called = False self.endpoint.l7rule_create(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.create_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'l7rules': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) def test_l7rule_delete(self): self.dummyResource.delete_called = False self.endpoint.l7rule_delete(self.ctx, self.dummyObj) self.assertTrue(self.dummyResource.delete_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'l7rules': [ {'operating_status': 'ONLINE', 'provisioning_status': 'DELETED', 'id': mock.ANY}]}) def test_l7rule_update(self): self.dummyResource.update_called = False self.endpoint.l7rule_update(self.ctx, self.dummyObj, self.dummyObj) self.assertTrue(self.dummyResource.update_called) self.clientMock.cast.assert_called_once_with( {}, 'update_loadbalancer_status', status={'l7rules': [ {'operating_status': 'ONLINE', 'provisioning_status': 'ACTIVE', 'id': mock.ANY}]}) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2462552 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/qos/0000755000175000017500000000000000000000000024136 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/qos/__init__.py0000644000175000017500000000000000000000000026235 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/qos/test_nsxp_notification.py0000644000175000017500000004054600000000000031316 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron_lib import exceptions from neutron_lib.objects import registry as obj_reg from oslo_config import cfg from oslo_utils import uuidutils from neutron.services.qos import qos_plugin from neutron.tests.unit.services.qos import base from vmware_nsx.common import utils from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.services.qos.nsx_v3 import driver as qos_driver from vmware_nsx.services.qos.nsx_v3 import pol_utils as qos_utils from vmware_nsx.tests.unit.nsx_p import test_plugin from vmware_nsxlib.v3.policy import core_defs as policy_defs PLUGIN_NAME = 'vmware_nsx.plugins.nsx_p.plugin.NsxPolicyPlugin' QoSPolicy = obj_reg.load_class('QosPolicy') QosBandwidthLimitRule = obj_reg.load_class('QosBandwidthLimitRule') QosDscpMarkingRule = obj_reg.load_class('QosDscpMarkingRule') QosMinimumBandwidthRule = obj_reg.load_class('QosMinimumBandwidthRule') class TestQosNsxPNotification(base.BaseQosTestCase, test_plugin.NsxPPluginTestCaseMixin): def setUp(self): # Reset the drive to re-create it qos_driver.DRIVER = None super(TestQosNsxPNotification, self).setUp() self.setup_coreplugin(PLUGIN_NAME) self.qos_plugin = qos_plugin.QoSPlugin() self.ctxt = context.Context('fake_user', 'fake_tenant') mock.patch.object(self.ctxt.session, 'refresh').start() mock.patch.object(self.ctxt.session, 'expunge').start() policy_id = uuidutils.generate_uuid() self.project_id = uuidutils.generate_uuid() self.policy_data = { 'policy': {'id': policy_id, 'project_id': self.project_id, 'name': 'test-policy', 'description': 'Test policy description', 'shared': True}} self.rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 2000, 'max_burst_kbps': 150}} self.ingress_rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 3000, 'max_burst_kbps': 350, 'direction': 'ingress'}} self.dscp_rule_data = { 'dscp_marking_rule': {'id': uuidutils.generate_uuid(), 'dscp_mark': 22}} self.policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # egress BW limit rule self.rule = QosBandwidthLimitRule( self.ctxt, **self.rule_data['bandwidth_limit_rule']) # ingress bw limit rule self.ingress_rule = QosBandwidthLimitRule( self.ctxt, **self.ingress_rule_data['bandwidth_limit_rule']) self.dscp_rule = QosDscpMarkingRule( self.ctxt, **self.dscp_rule_data['dscp_marking_rule']) self.fake_profile = {'id': policy_id} mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() self.peak_bw_multiplier = cfg.CONF.NSX.qos_peak_bw_multiplier self.nsxlib = v3_utils.get_nsxlib_wrapper() def _get_expected_tags(self): policy_dict = {'id': self.policy.id, 'tenant_id': self.project_id} return self.nsxlib.build_v3_tags_payload( policy_dict, resource_type='os-neutron-qos-id', project_name=self.ctxt.tenant_name) @mock.patch.object(QoSPolicy, 'create_rbac_policy') def test_policy_create_profile(self, *mocks): # test the profile creation when a QoS policy is created with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxQosProfileApi.create_or_overwrite', return_value=self.fake_profile) as create_profile,\ mock.patch.object(QoSPolicy, 'get_object', return_value=self.policy),\ mock.patch.object(QoSPolicy, 'create'): self.qos_plugin.create_policy(self.ctxt, self.policy_data) exp_name = utils.get_name_and_uuid(self.policy.name, self.policy.id) create_profile.assert_called_once_with( exp_name, profile_id=self.policy.id, description=self.policy_data["policy"]["description"], dscp=None, shaper_configurations=[], tags=self._get_expected_tags()) @mock.patch.object(QoSPolicy, '_reload_rules') def test_bw_rule_create_profile(self, *mocks): # test the profile update when an egress QoS BW rule is created _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [self.rule]) with mock.patch.object(QoSPolicy, 'get_object', return_value=_policy),\ mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxQosProfileApi.' 'create_or_overwrite') as create_profile,\ mock.patch('neutron.objects.db.api.update_object', return_value=self.rule_data): self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, _policy.id, self.rule_data) # validate the data on the profile rule_dict = self.rule_data['bandwidth_limit_rule'] expected_bw = int(round(float( rule_dict['max_kbps']) / 1024)) expected_burst = rule_dict['max_burst_kbps'] * 128 expected_peak = int(expected_bw * self.peak_bw_multiplier) exp_name = utils.get_name_and_uuid(self.policy.name, self.policy.id) # egress neutron rule -> ingress nsx args shaper_type = policy_defs.QoSRateLimiter.INGRESS_RATE_LIMITER_TYPE expected_shaper = policy_defs.QoSRateLimiter( resource_type=shaper_type, enabled=True, burst_size=expected_burst, peak_bandwidth=expected_peak, average_bandwidth=expected_bw) create_profile.assert_called_once_with( exp_name, profile_id=self.policy.id, description=self.policy_data["policy"]["description"], dscp=None, shaper_configurations=[mock.ANY], tags=self._get_expected_tags()) # Compare the shaper actual_shaper = create_profile.call_args[1][ 'shaper_configurations'][0] self.assertEqual(expected_shaper.get_obj_dict(), actual_shaper.get_obj_dict()) @mock.patch.object(QoSPolicy, '_reload_rules') def test_ingress_bw_rule_create_profile(self, *mocks): # test the profile update when a ingress QoS BW rule is created _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [self.ingress_rule]) with mock.patch.object(QoSPolicy, 'get_object', return_value=_policy),\ mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxQosProfileApi.' 'create_or_overwrite') as create_profile,\ mock.patch('neutron.objects.db.api.update_object', return_value=self.ingress_rule_data): self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.ingress_rule.id, _policy.id, self.ingress_rule_data) # validate the data on the profile rule_dict = self.ingress_rule_data['bandwidth_limit_rule'] expected_bw = int(round(float( rule_dict['max_kbps']) / 1024)) expected_burst = rule_dict['max_burst_kbps'] * 128 expected_peak = int(expected_bw * self.peak_bw_multiplier) exp_name = utils.get_name_and_uuid(self.policy.name, self.policy.id) # ingress neutron rule -> egress nsx args shaper_type = policy_defs.QoSRateLimiter.EGRESS_RATE_LIMITER_TYPE expected_shaper = policy_defs.QoSRateLimiter( resource_type=shaper_type, enabled=True, burst_size=expected_burst, peak_bandwidth=expected_peak, average_bandwidth=expected_bw) create_profile.assert_called_once_with( exp_name, profile_id=self.policy.id, description=self.policy_data["policy"]["description"], dscp=None, shaper_configurations=[mock.ANY], tags=self._get_expected_tags()) # Compare the shaper actual_shaper = create_profile.call_args[1][ 'shaper_configurations'][0] self.assertEqual(expected_shaper.get_obj_dict(), actual_shaper.get_obj_dict()) @mock.patch.object(QoSPolicy, '_reload_rules') def test_bw_rule_create_profile_minimal_val(self, *mocks): # test driver precommit with an invalid limit value bad_limit = qos_utils.MAX_KBPS_MIN_VALUE - 1 rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': bad_limit, 'max_burst_kbps': 150}} rule = QosBandwidthLimitRule( self.ctxt, **rule_data['bandwidth_limit_rule']) _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [rule]) with mock.patch.object(QoSPolicy, 'get_object', return_value=_policy),\ mock.patch('neutron.objects.db.api.update_object', return_value=rule_data): self.assertRaises( exceptions.DriverCallError, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, rule.id, _policy.id, rule_data) @mock.patch.object(QoSPolicy, '_reload_rules') def test_bw_rule_create_profile_maximal_val(self, *mocks): # test driver precommit with an invalid burst value bad_burst = qos_utils.MAX_BURST_MAX_VALUE + 1 rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 1025, 'max_burst_kbps': bad_burst}} rule = QosBandwidthLimitRule( self.ctxt, **rule_data['bandwidth_limit_rule']) _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [rule]) with mock.patch.object( QoSPolicy, 'get_object', return_value=_policy), mock.patch( 'neutron.objects.db.api.update_object', return_value=rule_data): self.assertRaises( exceptions.DriverCallError, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, rule.id, _policy.id, rule_data) @mock.patch.object(QoSPolicy, '_reload_rules') def test_dscp_rule_create_profile(self, *mocks): # test the profile update when a QoS DSCP rule is created _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [self.dscp_rule]) with mock.patch.object(QoSPolicy, 'get_object', return_value=_policy),\ mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxQosProfileApi.' 'create_or_overwrite') as create_profile,\ mock.patch('neutron.objects.db.api.update_object', return_value=self.dscp_rule_data): self.qos_plugin.update_policy_dscp_marking_rule( self.ctxt, self.dscp_rule.id, _policy.id, self.dscp_rule_data) # validate the data on the profile rule_dict = self.dscp_rule_data['dscp_marking_rule'] dscp_mark = rule_dict['dscp_mark'] exp_name = utils.get_name_and_uuid(self.policy.name, self.policy.id) expected_dscp = policy_defs.QoSDscp( mode=policy_defs.QoSDscp.QOS_DSCP_UNTRUSTED, priority=dscp_mark) create_profile.assert_called_once_with( exp_name, profile_id=self.policy.id, description=self.policy_data["policy"]["description"], dscp=mock.ANY, shaper_configurations=[], tags=self._get_expected_tags()) # Compare the dscp obj actual_dscp = create_profile.call_args[1]['dscp'] self.assertEqual(expected_dscp.get_obj_dict(), actual_dscp.get_obj_dict()) @mock.patch.object(QoSPolicy, '_reload_rules') def test_minimum_bw_rule_create_profile(self, *mocks): # Minimum BW rules are not supported policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) min_bw_rule_data = { 'minimum_bandwidth_rule': {'id': uuidutils.generate_uuid(), 'min_kbps': 10, 'direction': 'egress'}} min_bw_rule = QosMinimumBandwidthRule( self.ctxt, **min_bw_rule_data['minimum_bandwidth_rule']) # add a rule to the policy setattr(policy, "rules", [min_bw_rule]) with mock.patch.object( QoSPolicy, 'get_object', return_value=policy),\ mock.patch('neutron.objects.db.api.' 'update_object', return_value=self.dscp_rule_data): self.assertRaises( exceptions.DriverCallError, self.qos_plugin.update_policy_minimum_bandwidth_rule, self.ctxt, min_bw_rule.id, policy.id, min_bw_rule_data) def test_rule_delete_profile(self): # test the profile update when a QoS rule is deleted _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # The mock will return the policy without the rule, # as if it was deleted with mock.patch.object(QoSPolicy, 'get_object', return_value=_policy),\ mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxQosProfileApi.' 'create_or_overwrite') as set_profile: setattr(_policy, "rules", [self.rule]) self.qos_plugin.delete_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id) # validate the data on the profile exp_name = utils.get_name_and_uuid(self.policy.name, self.policy.id) set_profile.assert_called_once_with( exp_name, profile_id=self.policy.id, description=self.policy_data["policy"]["description"], dscp=None, shaper_configurations=[], tags=self._get_expected_tags()) @mock.patch('neutron.objects.db.api.get_object', return_value=None) def test_policy_delete_profile(self, *mocks): # test the profile deletion when a QoS policy is deleted with mock.patch('vmware_nsxlib.v3.policy.core_resources.' 'NsxQosProfileApi.delete') as delete_profile: self.qos_plugin.delete_policy(self.ctxt, self.policy.id) delete_profile.assert_called_once_with(self.policy.id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/qos/test_nsxv3_notification.py0000644000175000017500000004272500000000000031410 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron_lib import context from neutron_lib import exceptions from neutron_lib.objects import registry as obj_reg from neutron_lib.objects import utils as obj_utils from oslo_config import cfg from oslo_utils import uuidutils from neutron.services.qos import qos_plugin from neutron.tests.unit.services.qos import base from vmware_nsx.db import db as nsx_db from vmware_nsx.plugins.nsx_v3 import utils as v3_utils from vmware_nsx.services.qos.nsx_v3 import driver as qos_driver from vmware_nsx.services.qos.nsx_v3 import utils as qos_utils from vmware_nsx.tests.unit.nsx_v3 import test_plugin PLUGIN_NAME = 'vmware_nsx.plugins.nsx_v3.plugin.NsxV3Plugin' QoSPolicy = obj_reg.load_class('QosPolicy') QosBandwidthLimitRule = obj_reg.load_class('QosBandwidthLimitRule') QosDscpMarkingRule = obj_reg.load_class('QosDscpMarkingRule') QosMinimumBandwidthRule = obj_reg.load_class('QosMinimumBandwidthRule') class TestQosNsxV3Notification(base.BaseQosTestCase, test_plugin.NsxV3PluginTestCaseMixin): def setUp(self): # Reset the drive to re-create it qos_driver.DRIVER = None super(TestQosNsxV3Notification, self).setUp() self.setup_coreplugin(PLUGIN_NAME) self.qos_plugin = qos_plugin.QoSPlugin() self.ctxt = context.Context('fake_user', 'fake_tenant') mock.patch.object(self.ctxt.session, 'refresh').start() mock.patch.object(self.ctxt.session, 'expunge').start() self.policy_data = { 'policy': {'id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(), 'name': 'test-policy', 'description': 'Test policy description', 'shared': True}} self.rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 2000, 'max_burst_kbps': 150}} self.ingress_rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 3000, 'max_burst_kbps': 350, 'direction': 'ingress'}} self.dscp_rule_data = { 'dscp_marking_rule': {'id': uuidutils.generate_uuid(), 'dscp_mark': 22}} self.policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # egress BW limit rule self.rule = QosBandwidthLimitRule( self.ctxt, **self.rule_data['bandwidth_limit_rule']) # ingress bw limit rule self.ingress_rule = QosBandwidthLimitRule( self.ctxt, **self.ingress_rule_data['bandwidth_limit_rule']) self.dscp_rule = QosDscpMarkingRule( self.ctxt, **self.dscp_rule_data['dscp_marking_rule']) self.fake_profile_id = 'fake_profile' self.fake_profile = {'id': self.fake_profile_id} mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() mock.patch.object(nsx_db, 'get_switch_profile_by_qos_policy', return_value=self.fake_profile_id).start() self.peak_bw_multiplier = cfg.CONF.NSX.qos_peak_bw_multiplier self.nsxlib = v3_utils.get_nsxlib_wrapper() @mock.patch.object(QoSPolicy, 'create_rbac_policy') @mock.patch.object(nsx_db, 'add_qos_policy_profile_mapping') def test_policy_create_profile(self, fake_db_add, fake_rbac_create): # test the switch profile creation when a QoS policy is created with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.create', return_value=self.fake_profile ) as create_profile: with mock.patch.object( QoSPolicy, 'get_object', return_value=self.policy): with mock.patch.object(QoSPolicy, 'create'): policy = self.qos_plugin.create_policy(self.ctxt, self.policy_data) expected_tags = self.nsxlib.build_v3_tags_payload( policy, resource_type='os-neutron-qos-id', project_name=self.ctxt.tenant_name) create_profile.assert_called_once_with( description=self.policy_data["policy"]["description"], name=self.policy_data["policy"]["name"], tags=expected_tags) # verify that the policy->profile mapping entry was added self.assertTrue(fake_db_add.called) @mock.patch.object(QoSPolicy, 'create_rbac_policy') def __test_policy_update_profile(self, *mocks): # test the switch profile update when a QoS policy is updated fields = obj_utils.get_updatable_fields( QoSPolicy, self.policy_data['policy']) with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.update' ) as update_profile: with mock.patch.object(QoSPolicy, 'get_object', return_value=self.policy): with mock.patch.object(QoSPolicy, 'update'): self.qos_plugin.update_policy( self.ctxt, self.policy.id, {'policy': fields}) # verify that the profile was updated with the correct data self.policy_data["policy"]["id"] = self.policy.id expected_tags = self.nsxlib.build_v3_tags_payload( self.policy_data["policy"], resource_type='os-neutron-qos-id', project_name=self.ctxt.tenant_name) update_profile.assert_called_once_with( self.fake_profile_id, description=self.policy_data["policy"]["description"], name=self.policy_data["policy"]["name"], tags=expected_tags ) @mock.patch.object(QoSPolicy, '_reload_rules') def test_bw_rule_create_profile(self, *mocks): # test the switch profile update when an egress QoS BW rule is created _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [self.rule]) with mock.patch.object(QoSPolicy, 'get_object', return_value=_policy): with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.' 'set_profile_shaping' ) as update_profile: with mock.patch('neutron.objects.db.api.update_object', return_value=self.rule_data): self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, _policy.id, self.rule_data) # validate the data on the profile rule_dict = self.rule_data['bandwidth_limit_rule'] expected_bw = int(round(float( rule_dict['max_kbps']) / 1024)) expected_burst = rule_dict['max_burst_kbps'] * 128 expected_peak = int(expected_bw * self.peak_bw_multiplier) # egress neutron rule -> ingress nsx args update_profile.assert_called_once_with( self.fake_profile_id, ingress_bw_enabled=True, ingress_burst_size=expected_burst, ingress_peak_bandwidth=expected_peak, ingress_average_bandwidth=expected_bw, egress_bw_enabled=False, egress_burst_size=None, egress_peak_bandwidth=None, egress_average_bandwidth=None, dscp=0, qos_marking='trusted' ) @mock.patch.object(QoSPolicy, '_reload_rules') def test_ingress_bw_rule_create_profile(self, *mocks): # test the switch profile update when a ingress QoS BW rule is created _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [self.ingress_rule]) with mock.patch.object(QoSPolicy, 'get_object', return_value=_policy): with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.' 'set_profile_shaping' ) as update_profile: with mock.patch('neutron.objects.db.api.update_object', return_value=self.ingress_rule_data): self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.ingress_rule.id, _policy.id, self.ingress_rule_data) # validate the data on the profile rule_dict = self.ingress_rule_data['bandwidth_limit_rule'] expected_bw = int(round(float( rule_dict['max_kbps']) / 1024)) expected_burst = rule_dict['max_burst_kbps'] * 128 expected_peak = int(expected_bw * self.peak_bw_multiplier) # ingress neutron rule -> egress nsx args update_profile.assert_called_once_with( self.fake_profile_id, egress_bw_enabled=True, egress_burst_size=expected_burst, egress_peak_bandwidth=expected_peak, egress_average_bandwidth=expected_bw, ingress_bw_enabled=False, ingress_burst_size=None, ingress_peak_bandwidth=None, ingress_average_bandwidth=None, dscp=0, qos_marking='trusted' ) @mock.patch.object(QoSPolicy, '_reload_rules') def test_bw_rule_create_profile_minimal_val(self, *mocks): # test driver precommit with an invalid limit value bad_limit = qos_utils.MAX_KBPS_MIN_VALUE - 1 rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': bad_limit, 'max_burst_kbps': 150}} rule = QosBandwidthLimitRule( self.ctxt, **rule_data['bandwidth_limit_rule']) _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [rule]) with mock.patch.object(QoSPolicy, 'get_object', return_value=_policy),\ mock.patch('neutron.objects.db.api.update_object', return_value=rule_data): self.assertRaises( exceptions.DriverCallError, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, rule.id, _policy.id, rule_data) @mock.patch.object(QoSPolicy, '_reload_rules') def test_bw_rule_create_profile_maximal_val(self, *mocks): # test driver precommit with an invalid burst value bad_burst = qos_utils.MAX_BURST_MAX_VALUE + 1 rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 1025, 'max_burst_kbps': bad_burst}} rule = QosBandwidthLimitRule( self.ctxt, **rule_data['bandwidth_limit_rule']) _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [rule]) with mock.patch.object( QoSPolicy, 'get_object', return_value=_policy), mock.patch( 'neutron.objects.db.api.update_object', return_value=rule_data): self.assertRaises( exceptions.DriverCallError, self.qos_plugin.update_policy_bandwidth_limit_rule, self.ctxt, rule.id, _policy.id, rule_data) @mock.patch.object(QoSPolicy, '_reload_rules') def test_dscp_rule_create_profile(self, *mocks): # test the switch profile update when a QoS DSCP rule is created _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # add a rule to the policy setattr(_policy, "rules", [self.dscp_rule]) with mock.patch.object(QoSPolicy, 'get_object', return_value=_policy): with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.' 'set_profile_shaping' ) as update_profile: with mock.patch('neutron.objects.db.api.' 'update_object', return_value=self.dscp_rule_data): self.qos_plugin.update_policy_dscp_marking_rule( self.ctxt, self.dscp_rule.id, _policy.id, self.dscp_rule_data) # validate the data on the profile rule_dict = self.dscp_rule_data['dscp_marking_rule'] dscp_mark = rule_dict['dscp_mark'] update_profile.assert_called_once_with( self.fake_profile_id, ingress_bw_enabled=False, ingress_burst_size=None, ingress_peak_bandwidth=None, ingress_average_bandwidth=None, egress_bw_enabled=False, egress_burst_size=None, egress_peak_bandwidth=None, egress_average_bandwidth=None, dscp=dscp_mark, qos_marking='untrusted' ) @mock.patch.object(QoSPolicy, '_reload_rules') def test_minimum_bw_rule_create_profile(self, *mocks): # Minimum BW rules are not supported policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) min_bw_rule_data = { 'minimum_bandwidth_rule': {'id': uuidutils.generate_uuid(), 'min_kbps': 10, 'direction': 'egress'}} min_bw_rule = QosMinimumBandwidthRule( self.ctxt, **min_bw_rule_data['minimum_bandwidth_rule']) # add a rule to the policy setattr(policy, "rules", [min_bw_rule]) with mock.patch.object( QoSPolicy, 'get_object', return_value=policy), mock.patch( 'neutron.objects.db.api.update_object', return_value=self.dscp_rule_data): self.assertRaises( exceptions.DriverCallError, self.qos_plugin.update_policy_minimum_bandwidth_rule, self.ctxt, min_bw_rule.id, policy.id, min_bw_rule_data) def test_rule_delete_profile(self): # test the switch profile update when a QoS rule is deleted _policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # The mock will return the policy without the rule, # as if it was deleted with mock.patch.object(QoSPolicy, 'get_object', return_value=_policy): with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.' 'set_profile_shaping' ) as update_profile: setattr(_policy, "rules", [self.rule]) self.qos_plugin.delete_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id) # validate the data on the profile update_profile.assert_called_once_with( self.fake_profile_id, ingress_bw_enabled=False, ingress_burst_size=None, ingress_peak_bandwidth=None, ingress_average_bandwidth=None, egress_bw_enabled=False, egress_burst_size=None, egress_peak_bandwidth=None, egress_average_bandwidth=None, dscp=0, qos_marking='trusted' ) @mock.patch('neutron.objects.db.api.get_object', return_value=None) def test_policy_delete_profile(self, *mocks): # test the switch profile deletion when a QoS policy is deleted with mock.patch( 'vmware_nsxlib.v3.core_resources.NsxLibQosSwitchingProfile.' 'delete', return_value=self.fake_profile ) as delete_profile: self.qos_plugin.delete_policy(self.ctxt, self.policy.id) delete_profile.assert_called_once_with(self.fake_profile_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/qos/test_nsxv_notification.py0000644000175000017500000003540200000000000031317 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import mock from neutron.services.qos import qos_plugin from neutron.tests.unit.services.qos import base from neutron_lib import context from neutron_lib.objects import registry as obj_reg from neutron_lib.plugins import directory from neutron_lib.services.qos import constants as qos_consts from oslo_config import cfg from oslo_utils import uuidutils from vmware_nsx.dvs import dvs from vmware_nsx.dvs import dvs_utils from vmware_nsx.services.qos.common import utils as qos_com_utils from vmware_nsx.services.qos.nsx_v import driver as qos_driver from vmware_nsx.services.qos.nsx_v import utils as qos_utils from vmware_nsx.tests.unit.nsx_v import test_plugin CORE_PLUGIN = "vmware_nsx.plugins.nsx_v.plugin.NsxVPluginV2" QosPolicy = obj_reg.load_class('QosPolicy') QosPolicyDefault = obj_reg.load_class('QosPolicyDefault') QosBandwidthLimitRule = obj_reg.load_class('QosBandwidthLimitRule') QosDscpMarkingRule = obj_reg.load_class('QosDscpMarkingRule') class TestQosNsxVNotification(test_plugin.NsxVPluginV2TestCase, base.BaseQosTestCase): @mock.patch.object(dvs_utils, 'dvs_create_session') def setUp(self, *mocks): # init the nsx-v plugin for testing with DVS self._init_dvs_config() # Reset the drive to re-create it qos_driver.DRIVER = None # Skip Octavia init because of RPC conflicts with mock.patch("vmware_nsx.services.lbaas.octavia.octavia_listener." "NSXOctaviaListener.__init__", return_value=None),\ mock.patch("vmware_nsx.services.lbaas.octavia.octavia_listener." "NSXOctaviaStatisticsCollector.__init__", return_value=None): super(TestQosNsxVNotification, self).setUp(plugin=CORE_PLUGIN, ext_mgr=None, with_md_proxy=False) self.setup_coreplugin(CORE_PLUGIN) plugin_instance = directory.get_plugin() self._core_plugin = plugin_instance self._core_plugin.init_is_complete = True self.qos_plugin = qos_plugin.QoSPlugin() mock.patch.object(qos_utils.NsxVQosRule, '_get_qos_plugin', return_value=self.qos_plugin).start() # Pre defined QoS data for the tests self.test_tenant_id = '1d7ddf4daf1f47529b5cc93b2e843980' self.ctxt = context.Context('fake_user', self.test_tenant_id) self.policy_data = { 'policy': {'id': uuidutils.generate_uuid(), 'project_id': self.test_tenant_id, 'name': 'test-policy', 'description': 'Test policy description', 'shared': True}} self.rule_data = { 'bandwidth_limit_rule': { 'id': uuidutils.generate_uuid(), 'max_kbps': 100, 'max_burst_kbps': 150, 'type': qos_consts.RULE_TYPE_BANDWIDTH_LIMIT}} self.ingress_rule_data = { 'bandwidth_limit_rule': { 'id': uuidutils.generate_uuid(), 'max_kbps': 200, 'max_burst_kbps': 250, 'direction': 'ingress', 'type': qos_consts.RULE_TYPE_BANDWIDTH_LIMIT}} self.dscp_rule_data = { 'dscp_marking_rule': { 'id': uuidutils.generate_uuid(), 'dscp_mark': 22, 'type': qos_consts.RULE_TYPE_DSCP_MARKING}} self.policy = QosPolicy( self.ctxt, **self.policy_data['policy']) # egress bw rule self.rule = QosBandwidthLimitRule( self.ctxt, **self.rule_data['bandwidth_limit_rule']) # ingress bw rule self.ingress_rule = QosBandwidthLimitRule( self.ctxt, **self.ingress_rule_data['bandwidth_limit_rule']) # dscp marking rule self.dscp_rule = QosDscpMarkingRule( self.ctxt, **self.dscp_rule_data['dscp_marking_rule']) self._net_data = {'network': { 'name': 'test-qos', 'tenant_id': self.test_tenant_id, 'qos_policy_id': self.policy.id, 'port_security_enabled': False, 'admin_state_up': False, 'shared': False }} self._rules = [self.rule_data['bandwidth_limit_rule']] self._dscp_rules = [self.dscp_rule_data['dscp_marking_rule']] mock.patch.object(QosPolicy, 'obj_load_attr').start() def _init_dvs_config(self): # Ensure that DVS is enabled # and enable the DVS features for nsxv qos support cfg.CONF.set_override('host_ip', 'fake_ip', group='dvs') cfg.CONF.set_override('host_username', 'fake_user', group='dvs') cfg.CONF.set_override('host_password', 'fake_password', group='dvs') cfg.CONF.set_override('dvs_name', 'fake_dvs', group='dvs') cfg.CONF.set_default('use_dvs_features', True, 'nsxv') def _create_net(self, net_data=None): if net_data is None: net_data = self._net_data net_data['tenant_id'] = self.test_tenant_id with mock.patch('vmware_nsx.services.qos.common.utils.' 'get_network_policy_id', return_value=self.policy.id): return self._core_plugin.create_network(self.ctxt, net_data) @mock.patch.object(qos_com_utils, 'update_network_policy_binding') @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') def test_create_network_with_policy_rule(self, dvs_update_mock, update_bindings_mock): """Test the DVS update when a QoS rule is attached to a network""" # Create a policy with a rule _policy = QosPolicy( self.ctxt, **self.policy_data['policy']) setattr(_policy, "rules", [self.rule, self.ingress_rule, self.dscp_rule]) with mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' 'get_policy', return_value=_policy) as get_rules_mock,\ mock.patch.object(self.plugin, '_validate_qos_policy_id'): # create the network to use this policy net = self._create_net() # make sure the network-policy binding was updated update_bindings_mock.assert_called_once_with( self.ctxt, net['id'], self.policy.id) # make sure the qos rule was found get_rules_mock.assert_called_with(self.ctxt, self.policy.id) # make sure the dvs was updated self.assertTrue(dvs_update_mock.called) @mock.patch.object(qos_com_utils, 'update_network_policy_binding') @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') def test_create_network_with_default_policy(self, dvs_update_mock, update_bindings_mock): """Test the DVS update when default policy attached to a network""" # Create a default policy with a rule policy_data = copy.deepcopy(self.policy_data['policy']) policy_data['is_default'] = True _policy = QosPolicy(self.ctxt, **policy_data) setattr(_policy, "rules", [self.rule, self.dscp_rule]) default_policy = QosPolicyDefault( qos_policy_id=policy_data['id']) with mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' 'get_policy', return_value=_policy) as get_rules_mock,\ mock.patch.object( QosPolicyDefault, 'get_object', return_value=default_policy): # create the network (with no specific qos policy) net_data = copy.deepcopy(self._net_data) del net_data['network']['qos_policy_id'] net = self._create_net(net_data=net_data) # make sure the network-policy binding was updated update_bindings_mock.assert_called_once_with( self.ctxt, net['id'], self.policy.id) # make sure the qos rule was found get_rules_mock.assert_called_with(self.ctxt, self.policy.id) # make sure the dvs was updated self.assertTrue(dvs_update_mock.called) @mock.patch.object(qos_com_utils, 'update_network_policy_binding') @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') def _test_rule_action_notification(self, action, dvs_update_mock, update_bindings_mock): # Create a policy with a rule _policy = QosPolicy( self.ctxt, **self.policy_data['policy']) # set the rule in the policy data setattr(_policy, "rules", [self.rule]) with mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' 'get_policy', return_value=_policy) as get_rules_mock,\ mock.patch.object(QosPolicy, 'get_object', return_value=_policy): # create the network to use this policy net = self._create_net() dvs_update_mock.called = False get_rules_mock.called = False with mock.patch('neutron.objects.db.api.create_object', return_value=self.rule_data),\ mock.patch('neutron.objects.db.api.update_object', return_value=self.rule_data),\ mock.patch('neutron.objects.db.api.delete_object'),\ mock.patch.object(_policy, 'get_bound_networks', return_value=[net['id']]),\ mock.patch.object(self.ctxt.session, 'expunge'): # create/update/delete the rule if action == 'create': self.qos_plugin.create_policy_bandwidth_limit_rule( self.ctxt, self.policy.id, self.rule_data) elif action == 'update': self.qos_plugin.update_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id, self.rule_data) else: self.qos_plugin.delete_policy_bandwidth_limit_rule( self.ctxt, self.rule.id, self.policy.id) # make sure the qos rule was found self.assertTrue(get_rules_mock.called) # make sure the dvs was updated self.assertTrue(dvs_update_mock.called) def test_create_rule_notification(self): """Test the DVS update when a QoS rule, attached to a network, is created """ self._test_rule_action_notification('create') def test_update_rule_notification(self): """Test the DVS update when a QoS rule, attached to a network, is modified """ self._test_rule_action_notification('update') def test_delete_rule_notification(self): """Test the DVS update when a QoS rule, attached to a network, is deleted """ self._test_rule_action_notification('delete') @mock.patch.object(qos_com_utils, 'update_network_policy_binding') @mock.patch.object(dvs.DvsManager, 'update_port_groups_config') def _test_dscp_rule_action_notification(self, action, dvs_update_mock, update_bindings_mock): # Create a policy with a rule _policy = QosPolicy( self.ctxt, **self.policy_data['policy']) # set the rule in the policy data setattr(_policy, "rules", [self.dscp_rule]) plugin = self.qos_plugin with mock.patch('neutron.services.qos.qos_plugin.QoSPlugin.' 'get_policy', return_value=_policy) as rules_mock,\ mock.patch.object(QosPolicy, 'get_object', return_value=_policy),\ mock.patch.object(self.ctxt.session, 'expunge'): # create the network to use this policy net = self._create_net() dvs_update_mock.called = False rules_mock.called = False with mock.patch('neutron.objects.db.api.create_object', return_value=self.dscp_rule_data),\ mock.patch('neutron.objects.db.api.update_object', return_value=self.dscp_rule_data),\ mock.patch('neutron.objects.db.api.delete_object'),\ mock.patch.object(_policy, 'get_bound_networks', return_value=[net['id']]),\ mock.patch.object(self.ctxt.session, 'expunge'): # create/update/delete the rule if action == 'create': plugin.create_policy_dscp_marking_rule( self.ctxt, self.policy.id, self.dscp_rule_data) elif action == 'update': plugin.update_policy_dscp_marking_rule( self.ctxt, self.dscp_rule.id, self.policy.id, self.dscp_rule_data) else: plugin.delete_policy_dscp_marking_rule( self.ctxt, self.dscp_rule.id, self.policy.id) # make sure the qos rule was found self.assertTrue(rules_mock.called) # make sure the dvs was updated self.assertTrue(dvs_update_mock.called) def test_create_dscp_rule_notification(self): """Test the DVS update when a QoS DSCP rule, attached to a network, is created """ self._test_dscp_rule_action_notification('create') def test_update_dscp_rule_notification(self): """Test the DVS update when a QoS DSCP rule, attached to a network, is modified """ self._test_dscp_rule_action_notification('update') def test_delete_dscp_rule_notification(self): """Test the DVS update when a QoS DSCP rule, attached to a network, is deleted """ self._test_dscp_rule_action_notification('delete') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2462552 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/trunk/0000755000175000017500000000000000000000000024477 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/trunk/__init__.py0000644000175000017500000000000000000000000026576 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/trunk/test_nsxp_driver.py0000644000175000017500000002720500000000000030461 0ustar00coreycorey00000000000000# Copyright (c) 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.tests import base from neutron_lib import context from oslo_utils import importutils from vmware_nsx.extensions import projectpluginmap from vmware_nsx.services.trunk.nsx_p import driver as trunk_driver from vmware_nsx.tests.unit.nsx_p import test_plugin as test_nsx_p_plugin PLUGIN_NAME = 'vmware_nsx.plugins.nsx_p.plugin.NsxPolicyPlugin' class TestNsxpTrunkHandler(test_nsx_p_plugin.NsxPPluginTestCaseMixin, base.BaseTestCase): def _get_port_compute_tags_and_net(self, context, port_id): return True, 'net_' + port_id[-1:], [] def setUp(self): super(TestNsxpTrunkHandler, self).setUp() self.context = context.get_admin_context() self.core_plugin = importutils.import_object(PLUGIN_NAME) self.handler = trunk_driver.NsxpTrunkHandler(self.core_plugin) self.handler._get_port_compute_tags_and_net = mock.Mock( side_effect=self._get_port_compute_tags_and_net) self.trunk_1 = mock.Mock() self.trunk_1.port_id = "parent_port_1" self.trunk_1.id = "trunk_1_id" self.trunk_2 = mock.Mock() self.trunk_2.port_id = "parent_port_2" self.sub_port_a = mock.Mock() self.sub_port_a.segmentation_id = 40 self.sub_port_a.trunk_id = "trunk-1" self.sub_port_a.port_id = "sub_port_a" self.sub_port_a.segmentation_type = 'vlan' self.sub_port_b = mock.Mock() self.sub_port_b.segmentation_id = 41 self.sub_port_b.trunk_id = "trunk-2" self.sub_port_b.port_id = "sub_port_b" self.sub_port_b.segmentation_type = 'vlan' self.sub_port_c = mock.Mock() self.sub_port_c.segmentation_id = 43 self.sub_port_c.trunk_id = "trunk-2" self.sub_port_c.port_id = "sub_port_c" self.sub_port_c.segmentation_type = 'vlan' def test_trunk_created(self): # Create trunk with no subport self.trunk_1.sub_ports = [] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'attach') as m_attach: self.handler.trunk_created(self.context, self.trunk_1) m_attach.assert_called_with( 'net_1', self.trunk_1.port_id, attachment_type='PARENT', tags=[{'tag': self.trunk_1.id, 'scope': 'os-neutron-trunk-id'}], vif_id=self.trunk_1.port_id) # Create trunk with 1 subport self.trunk_1.sub_ports = [self.sub_port_a] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'attach') as m_attach: self.handler.trunk_created(self.context, self.trunk_1) calls = [ mock.call.m_attach( 'net_1', self.trunk_1.port_id, attachment_type='PARENT', tags=[{'tag': self.trunk_1.id, 'scope': 'os-neutron-trunk-id'}], vif_id=self.trunk_1.port_id), mock.call.m_attach( 'net_a', self.sub_port_a.port_id, 'CHILD', self.sub_port_a.port_id, context_id=self.trunk_1.port_id, tags=[{'tag': self.sub_port_a.trunk_id, 'scope': 'os-neutron-trunk-id'}], traffic_tag=self.sub_port_a.segmentation_id)] m_attach.assert_has_calls(calls, any_order=True) # Create trunk with multiple subports self.trunk_2.sub_ports = [self.sub_port_b, self.sub_port_c] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'attach') as m_attach: self.handler.trunk_created(self.context, self.trunk_2) calls = [ mock.call.m_attach( 'net_2', self.trunk_2.port_id, attachment_type='PARENT', tags=[{'tag': self.trunk_2.id, 'scope': 'os-neutron-trunk-id'}], vif_id=self.trunk_2.port_id), mock.call.m_attach( 'net_b', self.sub_port_b.port_id, 'CHILD', self.sub_port_b.port_id, context_id=self.trunk_2.port_id, tags=[{'tag': self.sub_port_b.trunk_id, 'scope': 'os-neutron-trunk-id'}], traffic_tag=self.sub_port_b.segmentation_id), mock.call.m_attach( 'net_c', self.sub_port_c.port_id, 'CHILD', self.sub_port_c.port_id, context_id=self.trunk_2.port_id, tags=[{'tag': self.sub_port_c.trunk_id, 'scope': 'os-neutron-trunk-id'}], traffic_tag=self.sub_port_c.segmentation_id)] m_attach.assert_has_calls(calls, any_order=True) def test_trunk_deleted(self): # Delete trunk with no subport self.trunk_1.sub_ports = [] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'detach') as m_detach: self.handler.trunk_deleted(self.context, self.trunk_1) m_detach.assert_called_with( 'net_1', self.trunk_1.port_id, vif_id=self.trunk_1.port_id, tags=mock.ANY) # Delete trunk with 1 subport self.trunk_1.sub_ports = [self.sub_port_a] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'detach') as m_detach: self.handler.trunk_deleted(self.context, self.trunk_1) calls = [ mock.call.m_detach( 'net_1', self.trunk_1.port_id, vif_id=self.trunk_1.port_id, tags=mock.ANY), mock.call.m_detach( 'net_a', self.sub_port_a.port_id, vif_id=self.sub_port_a.port_id, tags=mock.ANY)] m_detach.assert_has_calls(calls, any_order=True) # Delete trunk with multiple subports self.trunk_2.sub_ports = [self.sub_port_b, self.sub_port_c] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'detach') as m_detach: self.handler.trunk_deleted(self.context, self.trunk_2) calls = [ mock.call.m_detach( 'net_2', self.trunk_2.port_id, vif_id=self.trunk_2.port_id, tags=mock.ANY), mock.call.m_detach( 'net_b', self.sub_port_b.port_id, vif_id=self.sub_port_b.port_id, tags=mock.ANY), mock.call.m_detach( 'net_c', self.sub_port_c.port_id, vif_id=self.sub_port_c.port_id, tags=mock.ANY)] m_detach.assert_has_calls(calls, any_order=True) def test_subports_added(self): # Update trunk with no subport sub_ports = [] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'attach') as m_attach: self.handler.subports_added(self.context, self.trunk_1, sub_ports) m_attach.assert_not_called() # Update trunk with 1 subport sub_ports = [self.sub_port_a] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'attach') as m_attach: self.handler.subports_added(self.context, self.trunk_1, sub_ports) m_attach.assert_called_with( 'net_a', self.sub_port_a.port_id, 'CHILD', self.sub_port_a.port_id, context_id=self.trunk_1.port_id, tags=[{'tag': self.sub_port_a.trunk_id, 'scope': 'os-neutron-trunk-id'}], traffic_tag=self.sub_port_a.segmentation_id) # Update trunk with multiple subports sub_ports = [self.sub_port_b, self.sub_port_c] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'attach') as m_attach: self.handler.subports_added(self.context, self.trunk_2, sub_ports) calls = [ mock.call.m_attach( 'net_b', self.sub_port_b.port_id, 'CHILD', self.sub_port_b.port_id, context_id=self.trunk_2.port_id, tags=[{'tag': self.sub_port_b.trunk_id, 'scope': 'os-neutron-trunk-id'}], traffic_tag=self.sub_port_b.segmentation_id), mock.call.m_attach( 'net_c', self.sub_port_c.port_id, 'CHILD', self.sub_port_c.port_id, context_id=self.trunk_2.port_id, tags=[{'tag': self.sub_port_c.trunk_id, 'scope': 'os-neutron-trunk-id'}], traffic_tag=self.sub_port_c.segmentation_id)] m_attach.assert_has_calls(calls, any_order=True) def test_subports_deleted(self): # Update trunk to remove no subport sub_ports = [] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'detach') as m_detach: self.handler.subports_deleted( self.context, self.trunk_1, sub_ports) m_detach.assert_not_called() # Update trunk to remove 1 subport sub_ports = [self.sub_port_a] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'detach') as m_detach: self.handler.subports_deleted( self.context, self.trunk_1, sub_ports) m_detach.assert_called_with( 'net_a', self.sub_port_a.port_id, vif_id=self.sub_port_a.port_id, tags=mock.ANY) # Update trunk to remove multiple subports sub_ports = [self.sub_port_b, self.sub_port_c] with mock.patch.object( self.handler.plugin_driver.nsxpolicy.segment_port, 'detach') as m_detach: self.handler.subports_deleted( self.context, self.trunk_2, sub_ports) calls = [ mock.call.m_detach( 'net_b', self.sub_port_b.port_id, vif_id=self.sub_port_b.port_id, tags=mock.ANY), mock.call.m_detach( 'net_c', self.sub_port_c.port_id, vif_id=self.sub_port_c.port_id, tags=mock.ANY)] m_detach.assert_has_calls(calls, any_order=True) class TestNsxpTrunkDriver(base.BaseTestCase): def setUp(self): super(TestNsxpTrunkDriver, self).setUp() def test_is_loaded(self): core_plugin = mock.Mock() driver = trunk_driver.NsxpTrunkDriver.create(core_plugin) with mock.patch.object(core_plugin, 'plugin_type', return_value=projectpluginmap.NsxPlugins.NSX_P): self.assertTrue(driver.is_loaded) with mock.patch.object(core_plugin, 'plugin_type', return_value=projectpluginmap.NsxPlugins.NSX_T): self.assertFalse(driver.is_loaded) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/trunk/test_nsxv3_driver.py0000644000175000017500000001630400000000000030550 0ustar00coreycorey00000000000000# Copyright (c) 2016 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from neutron.tests import base from neutron_lib import context from oslo_config import cfg from oslo_utils import importutils from vmware_nsx.common import nsx_constants from vmware_nsx.services.trunk.nsx_v3 import driver as trunk_driver from vmware_nsx.tests.unit.nsx_v3 import test_constants as test_consts from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsx_v3_plugin class TestNsxV3TrunkHandler(test_nsx_v3_plugin.NsxV3PluginTestCaseMixin, base.BaseTestCase): def setUp(self): super(TestNsxV3TrunkHandler, self).setUp() self.context = context.get_admin_context() self.core_plugin = importutils.import_object(test_consts.PLUGIN_NAME) self.handler = trunk_driver.NsxV3TrunkHandler(self.core_plugin) self.handler._update_port_at_backend = mock.Mock() self.trunk_1 = mock.Mock() self.trunk_1.port_id = "parent_port_1" self.trunk_2 = mock.Mock() self.trunk_2.port_id = "parent_port_2" self.sub_port_1 = mock.Mock() self.sub_port_1.segmentation_id = 40 self.sub_port_1.trunk_id = "trunk-1" self.sub_port_1.port_id = "sub_port_1" self.sub_port_2 = mock.Mock() self.sub_port_2.segmentation_id = 41 self.sub_port_2.trunk_id = "trunk-2" self.sub_port_2.port_id = "sub_port_2" self.sub_port_3 = mock.Mock() self.sub_port_3.segmentation_id = 43 self.sub_port_3.trunk_id = "trunk-2" self.sub_port_3.port_id = "sub_port_3" def test_trunk_created(self): # Create trunk with no subport self.trunk_1.sub_ports = [] self.handler.trunk_created(self.context, self.trunk_1) self.handler._update_port_at_backend.assert_not_called() # Create trunk with 1 subport self.trunk_1.sub_ports = [self.sub_port_1] self.handler.trunk_created(self.context, self.trunk_1) self.handler._update_port_at_backend.assert_called_with( self.context, self.trunk_1.port_id, self.sub_port_1) # Create trunk with multiple subports self.trunk_2.sub_ports = [self.sub_port_2, self.sub_port_3] self.handler.trunk_created(self.context, self.trunk_2) calls = [mock.call._update_port_at_backend( self.context, self.trunk_2.port_id, self.sub_port_2), mock.call._update_port_at_backend( self.context, self.trunk_2.port_id, self.sub_port_3)] self.handler._update_port_at_backend.assert_has_calls( calls, any_order=True) def test_trunk_deleted(self): # Delete trunk with no subport self.trunk_1.sub_ports = [] self.handler.trunk_deleted(self.context, self.trunk_1) self.handler._update_port_at_backend.assert_not_called() # Delete trunk with 1 subport self.trunk_1.sub_ports = [self.sub_port_1] self.handler.trunk_deleted(self.context, self.trunk_1) self.handler._update_port_at_backend.assert_called_with( context=self.context, parent_port_id=None, subport=self.sub_port_1) # Delete trunk with multiple subports self.trunk_2.sub_ports = [self.sub_port_2, self.sub_port_3] self.handler.trunk_deleted(self.context, self.trunk_2) calls = [mock.call._update_port_at_backend( context=self.context, parent_port_id=None, subport=self.sub_port_2), mock.call._update_port_at_backend( context=self.context, parent_port_id=None, subport=self.sub_port_3)] self.handler._update_port_at_backend.assert_has_calls( calls, any_order=True) def test_subports_added(self): # Update trunk with no subport sub_ports = [] self.handler.subports_added(self.context, self.trunk_1, sub_ports) self.handler._update_port_at_backend.assert_not_called() # Update trunk with 1 subport sub_ports = [self.sub_port_1] self.handler.subports_added(self.context, self.trunk_1, sub_ports) self.handler._update_port_at_backend.assert_called_with( self.context, self.trunk_1.port_id, self.sub_port_1) # Update trunk with multiple subports sub_ports = [self.sub_port_2, self.sub_port_3] self.handler.subports_added(self.context, self.trunk_2, sub_ports) calls = [mock.call._update_port_at_backend( self.context, self.trunk_2.port_id, self.sub_port_2), mock.call._update_port_at_backend( self.context, self.trunk_2.port_id, self.sub_port_3)] self.handler._update_port_at_backend.assert_has_calls( calls, any_order=True) def test_subports_deleted(self): # Update trunk to remove no subport sub_ports = [] self.handler.subports_deleted(self.context, self.trunk_1, sub_ports) self.handler._update_port_at_backend.assert_not_called() # Update trunk to remove 1 subport sub_ports = [self.sub_port_1] self.handler.subports_deleted(self.context, self.trunk_1, sub_ports) self.handler._update_port_at_backend.assert_called_with( context=self.context, parent_port_id=None, subport=self.sub_port_1) # Update trunk to remove multiple subports sub_ports = [self.sub_port_2, self.sub_port_3] self.handler.subports_deleted(self.context, self.trunk_2, sub_ports) calls = [mock.call._update_port_at_backend( context=self.context, parent_port_id=None, subport=self.sub_port_2), mock.call._update_port_at_backend( context=self.context, parent_port_id=None, subport=self.sub_port_3)] self.handler._update_port_at_backend.assert_has_calls( calls, any_order=True) class TestNsxV3TrunkDriver(base.BaseTestCase): def setUp(self): super(TestNsxV3TrunkDriver, self).setUp() def test_is_loaded(self): driver = trunk_driver.NsxV3TrunkDriver.create(mock.Mock()) cfg.CONF.set_override('core_plugin', nsx_constants.VMWARE_NSX_V3_PLUGIN_NAME) self.assertTrue(driver.is_loaded) cfg.CONF.set_override('core_plugin', 'not_vmware_nsx_plugin') self.assertFalse(driver.is_loaded) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2462552 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/vpnaas/0000755000175000017500000000000000000000000024624 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/vpnaas/__init__.py0000644000175000017500000000000000000000000026723 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/vpnaas/test_nsxp_vpnaas.py0000644000175000017500000010653500000000000030607 0ustar00coreycorey00000000000000# Copyright 2019 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import namedtuple import contextlib import mock from oslo_utils import uuidutils from neutron.db import l3_db from neutron.db.models import l3 as l3_models from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import context as n_ctx from neutron_lib.plugins import directory from neutron_vpnaas.db.vpn import vpn_models # noqa from neutron_vpnaas.tests import base from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.services.vpnaas.nsxp import ipsec_driver from vmware_nsx.services.vpnaas.nsxp import ipsec_validator from vmware_nsx.tests.unit.nsx_p import test_plugin _uuid = uuidutils.generate_uuid FAKE_TENANT = _uuid() FAKE_ROUTER_ID = "aaaaaa-bbbbb-ccc" FAKE_ROUTER = {'id': FAKE_ROUTER_ID, 'name': 'fake router', 'project_id': FAKE_TENANT, 'admin_state_up': True, 'status': 'ACTIVE', 'gw_port_id': _uuid(), 'enable_snat': False, l3_db.EXTERNAL_GW_INFO: {'network_id': _uuid()}} FAKE_SUBNET_ID = _uuid() FAKE_SUBNET = {'cidr': '1.1.1.0/24', 'id': FAKE_SUBNET_ID} FAKE_VPNSERVICE_ID = _uuid() FAKE_VPNSERVICE = {'id': FAKE_VPNSERVICE_ID, 'name': 'vpn_service', 'description': 'dummy', 'router': FAKE_ROUTER, 'router_id': FAKE_ROUTER_ID, 'subnet': FAKE_SUBNET, 'subnet_id': FAKE_SUBNET_ID, 'project_id': FAKE_TENANT, 'external_v4_ip': '1.1.1.1', 'admin_state_up': True} FAKE_IKE_POLICY_ID = _uuid() FAKE_IKE_POLICY = {'id': FAKE_IKE_POLICY_ID, 'name': 'ike_dummy', 'description': 'ike_dummy', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'phase1_negotiation_mode': 'main', 'lifetime': { 'units': 'seconds', 'value': 3600}, 'ike_version': 'v1', 'pfs': 'group14', 'project_id': FAKE_TENANT} FAKE_IPSEC_POLICY_ID = _uuid() FAKE_IPSEC_POLICY = {'id': FAKE_IPSEC_POLICY_ID, 'name': 'ipsec_dummy', 'description': 'myipsecpolicy1', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'encapsulation_mode': 'tunnel', 'lifetime': { 'units': 'seconds', 'value': 3600}, 'transform_protocol': 'esp', 'pfs': 'group14', 'project_id': FAKE_TENANT} FAKE_IPSEC_CONNECTION_ID = _uuid() FAKE_IPSEC_CONNECTION = {'vpnservice_id': FAKE_VPNSERVICE_ID, 'ikepolicy_id': FAKE_IKE_POLICY_ID, 'ipsecpolicy_id': FAKE_IPSEC_POLICY_ID, 'name': 'VPN connection', 'description': 'VPN connection', 'id': FAKE_IPSEC_CONNECTION_ID, 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_cidrs': '192.168.1.0/24', 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'admin_state_up': True, 'project_id': FAKE_TENANT} FAKE_NEW_CONNECTION = {'vpnservice_id': FAKE_VPNSERVICE_ID, 'ikepolicy_id': FAKE_IKE_POLICY_ID, 'ipsecpolicy_id': FAKE_IPSEC_POLICY_ID, 'name': 'VPN connection', 'description': 'VPN connection', 'id': FAKE_IPSEC_CONNECTION_ID, 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_cidrs': '192.168.2.0/24', 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'admin_state_up': True, 'project_id': FAKE_TENANT} FAKE_VPNSERVICE_NO_SUB = {'id': FAKE_VPNSERVICE_ID, 'name': 'vpn_service', 'description': 'dummy', 'router': FAKE_ROUTER, 'router_id': FAKE_ROUTER_ID, 'project_id': FAKE_TENANT, 'external_v4_ip': '1.1.1.1', 'admin_state_up': True} FAKE_ENDPOINTS_CONNECTION = {'vpnservice_id': FAKE_VPNSERVICE_ID, 'ikepolicy_id': FAKE_IKE_POLICY_ID, 'ipsecpolicy_id': FAKE_IPSEC_POLICY_ID, 'name': 'VPN connection', 'description': 'VPN connection', 'id': FAKE_IPSEC_CONNECTION_ID, 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_ep_group_id': 'cidr_ep', 'local_ep_group_id': 'subnet_ep', 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'admin_state_up': True, 'project_id': FAKE_TENANT} class TestDriverValidation(base.BaseTestCase): def setUp(self): super(TestDriverValidation, self).setUp() self.context = n_ctx.Context('some_user', 'some_tenant') self.service_plugin = mock.Mock() driver = mock.Mock() driver.service_plugin = self.service_plugin with mock.patch("neutron_lib.plugins.directory.get_plugin"): self.validator = ipsec_validator.IPsecNsxPValidator(driver) self.validator._l3_plugin = mock.Mock() self.validator._core_plugin = mock.Mock() self.vpn_service = {'router_id': 'dummy_router', 'subnet_id': 'dummy_subnet'} self.peer_address = '10.10.10.10' self.peer_cidr = '10.10.11.0/20' def _test_lifetime_not_in_seconds(self, validation_func): policy_info = {'lifetime': {'units': 'kilobytes', 'value': 1000}} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, policy_info) def test_ike_lifetime_not_in_seconds(self): self._test_lifetime_not_in_seconds( self.validator.validate_ike_policy) def test_ipsec_lifetime_not_in_seconds(self): self._test_lifetime_not_in_seconds( self.validator.validate_ipsec_policy) def _test_lifetime_seconds_values_at_limits(self, validation_func): policy_info = {'lifetime': {'units': 'seconds', 'value': 21600}} validation_func(self.context, policy_info) policy_info = {'lifetime': {'units': 'seconds', 'value': 86400}} validation_func(self.context, policy_info) policy_info = {'lifetime': {'units': 'seconds', 'value': 10}} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, policy_info) def test_ike_lifetime_seconds_values_at_limits(self): self._test_lifetime_seconds_values_at_limits( self.validator.validate_ike_policy) def test_ipsec_lifetime_seconds_values_at_limits(self): self._test_lifetime_seconds_values_at_limits( self.validator.validate_ipsec_policy) def _test_auth_algorithm(self, validation_func): auth_algorithm = {'auth_algorithm': 'sha384'} validation_func(self.context, auth_algorithm) auth_algorithm = {'auth_algorithm': 'sha512'} validation_func(self.context, auth_algorithm) auth_algorithm = {'auth_algorithm': 'sha1'} validation_func(self.context, auth_algorithm) auth_algorithm = {'auth_algorithm': 'sha256'} validation_func(self.context, auth_algorithm) def test_ipsec_auth_algorithm(self): self._test_auth_algorithm(self.validator.validate_ipsec_policy) def test_ike_auth_algorithm(self): self._test_auth_algorithm(self.validator.validate_ike_policy) def _test_encryption_algorithm(self, validation_func): auth_algorithm = {'encryption_algorithm': 'aes-192'} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, auth_algorithm) auth_algorithm = {'encryption_algorithm': 'aes-128'} validation_func(self.context, auth_algorithm) auth_algorithm = {'encryption_algorithm': 'aes-256'} validation_func(self.context, auth_algorithm) def test_ipsec_encryption_algorithm(self): self._test_encryption_algorithm(self.validator.validate_ipsec_policy) def test_ike_encryption_algorithm(self): self._test_encryption_algorithm(self.validator.validate_ike_policy) def test_ike_negotiation_mode(self): policy_info = {'phase1-negotiation-mode': 'aggressive'} self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_ike_policy, self.context, policy_info) policy_info = {'phase1-negotiation-mode': 'main'} self.validator.validate_ike_policy(self.context, policy_info) def _test_pfs(self, validation_func): policy_info = {'pfs': 'group15'} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, policy_info) policy_info = {'pfs': 'group14'} validation_func(self.context, policy_info) def test_ipsec_pfs(self): self._test_pfs(self.validator.validate_ipsec_policy) def test_ike_pfs(self): self._test_pfs(self.validator.validate_ike_policy) def test_ipsec_encap_mode(self): policy_info = {'encapsulation_mode': 'transport'} self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_ipsec_policy, self.context, policy_info) policy_info = {'encapsulation_mode': 'tunnel'} self.validator.validate_ipsec_policy(self.context, policy_info) def test_ipsec_transform_protocol(self): policy_info = {'transform_protocol': 'ah'} self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_ipsec_policy, self.context, policy_info) policy_info = {'transform_protocol': 'esp'} self.validator.validate_ipsec_policy(self.context, policy_info) def test_vpn_service_validation(self): db_router = l3_models.Router() nsx_router = {'ha_mode': 'ACITVE_ACTIVE'} db_router.enable_snat = False with mock.patch.object(self.validator.nsxpolicy.tier0, 'get', return_value=nsx_router): self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_vpnservice, self.context, self.vpn_service) nsx_router = {'ha_mode': 'ACTIVE_STANDBY'} db_router.enable_snat = True with mock.patch.object(self.validator.nsxpolicy.tier0, 'get', return_value=nsx_router),\ mock.patch.object(self.validator._core_plugin, '_get_router', return_value=db_router): self.validator.validate_vpnservice(self.context, self.vpn_service) nsx_router = {'ha_mode': 'ACTIVE_STANDBY'} db_router.enable_snat = False with mock.patch.object(self.validator.nsxpolicy.tier0, 'get', return_value=nsx_router),\ mock.patch.object(self.validator._core_plugin, '_get_router', return_value=db_router): self.validator.validate_vpnservice(self.context, self.vpn_service) nsx_router = {'ha_mode': 'ACTIVE_STANDBY'} db_router.enable_snat = False vpn_service_no_subnet = {'router_id': 'dummy_router', 'subnet_id': None} with mock.patch.object(self.validator.nsxpolicy.tier0, 'get', return_value=nsx_router),\ mock.patch.object(self.validator._core_plugin, '_get_router', return_value=db_router): self.validator.validate_vpnservice( self.context, vpn_service_no_subnet) def _test_conn_validation(self, conn_params=None, success=True, connections=None, service_subnets=None, router_subnets=None): if connections is None: connections = [] if router_subnets is None: router_subnets = [] def mock_get_routers(context, filters=None, fields=None): return [{'id': 'no-snat', 'external_gateway_info': {'enable_snat': False}}] def mock_get_service(context, service_id): if service_subnets: # option to give the test a different subnet per service subnet_cidr = service_subnets[int(service_id) - 1] else: subnet_cidr = '5.5.5.0/2%s' % service_id return {'id': service_id, 'router_id': service_id, 'subnet_id': 'dummy_subnet', 'external_v4_ip': '1.1.1.%s' % service_id, 'subnet': {'id': 'dummy_subnet', 'cidr': subnet_cidr}} def mock_get_connections(context, filters=None, fields=None): if filters and 'peer_address' in filters: return [conn for conn in connections if conn['peer_address'] == filters['peer_address'][0]] else: return connections with mock.patch.object(self.validator.vpn_plugin, '_get_vpnservice', side_effect=mock_get_service),\ mock.patch.object(self.validator._core_plugin, 'get_routers', side_effect=mock_get_routers),\ mock.patch.object(self.validator._core_plugin, '_find_router_subnets_cidrs', return_value=router_subnets),\ mock.patch.object(self.validator.vpn_plugin, 'get_ipsec_site_connections', side_effect=mock_get_connections): ipsec_sitecon = {'id': '1', 'vpnservice_id': '1', 'mtu': 1500, 'peer_address': self.peer_address, 'peer_cidrs': [self.peer_cidr]} if conn_params: ipsec_sitecon.update(conn_params) if success: self.validator.validate_ipsec_site_connection( self.context, ipsec_sitecon) else: self.assertRaises( nsx_exc.NsxVpnValidationError, self.validator.validate_ipsec_site_connection, self.context, ipsec_sitecon) def test_dpd_validation(self): params = {'dpd': {'action': 'hold', 'timeout': 120}} self._test_conn_validation(conn_params=params, success=True) params = {'dpd': {'action': 'clear', 'timeout': 120}} self._test_conn_validation(conn_params=params, success=False) params = {'dpd': {'action': 'hold', 'timeout': 2}} self._test_conn_validation(conn_params=params, success=False) def test_check_unique_addresses(self): # this test runs with non-overlapping local subnets on # different routers subnets = ['5.5.5.0/20', '6.6.6.0/20'] # same service/router gw & peer address - should fail connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': self.peer_address, 'peer_cidrs': [self.peer_cidr]}] self._test_conn_validation(success=False, connections=connections, service_subnets=subnets) # different service/router gw - ok connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) # different peer address - ok connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': '7.7.7.1', 'peer_cidrs': ['7.7.7.7']}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) # ignoring non-active connections connections = [{'id': '2', 'status': 'ERROR', 'vpnservice_id': '1', 'peer_address': self.peer_address, 'peer_cidrs': [self.peer_cidr]}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) def test_overlapping_rules(self): # peer-cidr overlapping with new one, same subnet - should fail connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': '9.9.9.9', 'peer_cidrs': ['10.10.11.1/19']}] self._test_conn_validation(success=False, connections=connections) # same peer-cidr, overlapping subnets - should fail connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': '9.9.9.9', 'peer_cidrs': [self.peer_cidr]}] self._test_conn_validation(success=False, connections=connections) # non overlapping peer-cidr, same subnet - ok connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': '7.7.7.1', 'peer_cidrs': ['7.7.7.7']}] self._test_conn_validation(success=True, connections=connections) # ignoring non-active connections connections = [{'id': '2', 'status': 'ERROR', 'vpnservice_id': '1', 'peer_address': '9.9.9.9', 'peer_cidrs': ['10.10.11.1/19']}] self._test_conn_validation(success=True, connections=connections) def test_advertisment(self): # different routers, same subnet - should fail subnets = ['5.5.5.0/20', '5.5.5.0/20'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=False, connections=connections, service_subnets=subnets) # different routers, overlapping subnet - should fail subnets = ['5.5.5.0/20', '5.5.5.0/21'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=False, connections=connections, service_subnets=subnets) # different routers, non overlapping subnet - ok subnets = ['5.5.5.0/20', '50.5.5.0/21'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) # no-snat router with overlapping subnet to the service subnet - fail subnets = ['5.5.5.0/21', '1.1.1.0/20'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=False, connections=connections, router_subnets=subnets) # no-snat router with non overlapping subnet to the service subnet - ok service_subnets = ['5.5.5.0/20', '6.6.6.0/20'] router_subnets = ['50.5.5.0/21', '1.1.1.0/20'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=True, connections=connections, service_subnets=service_subnets, router_subnets=router_subnets) class TestVpnaasDriver(test_plugin.NsxPPluginTestCaseMixin): def setUp(self): super(TestVpnaasDriver, self).setUp() self.context = n_ctx.get_admin_context() self.service_plugin = mock.Mock() self.validator = mock.Mock() self.driver = ipsec_driver.NSXpIPsecVpnDriver(self.service_plugin) self.plugin = directory.get_plugin() self.policy_vpn = self.plugin.nsxpolicy.ipsec_vpn self.l3plugin = self.plugin mock.patch.object(self.plugin.nsxpolicy, 'search_by_tags', return_value={'results': []}).start() @contextlib.contextmanager def router(self, name='vpn-test-router', tenant_id=_uuid(), admin_state_up=True, **kwargs): request = {'router': {'tenant_id': tenant_id, 'name': name, 'admin_state_up': admin_state_up}} for arg in kwargs: request['router'][arg] = kwargs[arg] router = self.l3plugin.create_router(self.context, request) yield router def test_create_ipsec_site_connection(self): with mock.patch.object(self.service_plugin, 'get_ikepolicy', return_value=FAKE_IKE_POLICY),\ mock.patch.object(self.service_plugin, 'get_ipsecpolicy', return_value=FAKE_IPSEC_POLICY),\ mock.patch.object(self.service_plugin, '_get_vpnservice', return_value=FAKE_VPNSERVICE),\ mock.patch.object(self.service_plugin, 'get_vpnservices', return_value=[FAKE_VPNSERVICE]),\ mock.patch.object(self.plugin, 'get_router', return_value=FAKE_ROUTER),\ mock.patch.object(self.plugin, 'get_subnet', return_value=FAKE_SUBNET),\ mock.patch("vmware_nsx.db.db.add_nsx_vpn_connection_mapping"),\ mock.patch.object(self.policy_vpn.ike_profile, 'create_or_overwrite') as create_ike,\ mock.patch.object(self.policy_vpn.tunnel_profile, 'create_or_overwrite') as create_ipsec,\ mock.patch.object(self.policy_vpn.dpd_profile, 'create_or_overwrite') as create_dpd,\ mock.patch.object(self.policy_vpn.session, 'create_or_overwrite') as create_sesson: self.driver.create_ipsec_site_connection(self.context, FAKE_IPSEC_CONNECTION) create_ike.assert_called_once() create_ipsec.assert_called_once() create_dpd.assert_called_once() create_sesson.assert_called_once() # TODO(asarfaty): make sure router adv also updated def test_update_ipsec_site_connection(self): with mock.patch.object(self.service_plugin, '_get_vpnservice', return_value=FAKE_VPNSERVICE),\ mock.patch.object(self.plugin, 'get_router', return_value=FAKE_ROUTER),\ mock.patch.object(self.plugin, 'update_router_firewall') as update_fw,\ mock.patch.object(self.policy_vpn.session, 'update') as update_sesson,\ mock.patch("vmware_nsx.db.db.get_nsx_vpn_connection_mapping"): self.driver.update_ipsec_site_connection(self.context, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) update_sesson.assert_called_once() update_fw.assert_called_once() def test_delete_ipsec_site_connection(self): with mock.patch.object(self.service_plugin, 'get_ikepolicy', return_value=FAKE_IKE_POLICY),\ mock.patch.object(self.service_plugin, 'get_ipsecpolicy', return_value=FAKE_IPSEC_POLICY),\ mock.patch.object(self.service_plugin, '_get_vpnservice', return_value=FAKE_VPNSERVICE),\ mock.patch.object(self.service_plugin, 'get_vpnservices', return_value=[FAKE_VPNSERVICE]),\ mock.patch.object(self.plugin, 'get_router', return_value=FAKE_ROUTER),\ mock.patch.object(self.plugin, 'get_subnet', return_value=FAKE_SUBNET),\ mock.patch("vmware_nsx.db.db.get_nsx_vpn_connection_mapping"),\ mock.patch.object(self.policy_vpn.ike_profile, 'delete') as delete_ike,\ mock.patch.object(self.policy_vpn.tunnel_profile, 'delete') as delete_ipsec,\ mock.patch.object(self.policy_vpn.dpd_profile, 'delete') as delete_dpd,\ mock.patch.object(self.policy_vpn.session, 'delete') as delete_sesson: self.driver.delete_ipsec_site_connection(self.context, FAKE_IPSEC_CONNECTION) delete_ike.assert_called_once() delete_ipsec.assert_called_once() delete_dpd.assert_called_once() delete_sesson.assert_called_once() # TODO(asarfaty): make sure router adv rules also updated def test_create_vpn_service_legal(self): """Create a legal vpn service""" # create an external network with a subnet, and a router providernet_args = {extnet_apidef.EXTERNAL: True} router_db = namedtuple("Router", FAKE_ROUTER.keys())( *FAKE_ROUTER.values()) tier0_uuid = 'tier-0' with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net, enable_dhcp=False),\ mock.patch.object(self.plugin, '_get_tier0_uuid_by_router', return_value=tier0_uuid),\ self.router(external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet(cidr='1.1.0.0/24') as sub: # add an interface to the router self.l3plugin.add_router_interface( self.context, router['id'], {'subnet_id': sub['subnet']['id']}) # create the service dummy_port = {'id': 'dummy_port', 'fixed_ips': [{'ip_address': '1.1.1.1'}]} tier0_rtr = {'ha_mode': 'ACTIVE_STANDBY'} with mock.patch.object(self.service_plugin, '_get_vpnservice', return_value=FAKE_VPNSERVICE),\ mock.patch.object(self.policy_vpn.service, 'create_or_overwrite') as create_service,\ mock.patch.object(self.l3plugin, '_get_router', return_value=router_db),\ mock.patch.object(self.plugin, 'get_router', return_value=FAKE_ROUTER),\ mock.patch.object(self.plugin, 'get_ports', return_value=[dummy_port]),\ mock.patch.object(self.plugin, 'delete_port') as delete_port,\ mock.patch.object(self.plugin, 'service_router_has_services', return_value=True),\ mock.patch.object(self.plugin.nsxpolicy.tier0, 'get', return_value=tier0_rtr): self.driver.create_vpnservice(self.context, FAKE_VPNSERVICE) create_service.assert_called_once() # Delete the service nsx_services = [{'logical_router_id': tier0_uuid, 'id': 'xxx'}] with mock.patch.object( self.policy_vpn.service, 'list', return_value={'results': nsx_services}),\ mock.patch.object(self.service_plugin, 'get_vpnservices', return_value=[]),\ mock.patch.object(self.policy_vpn.service, 'delete') as delete_service: self.driver.delete_vpnservice( self.context, FAKE_VPNSERVICE) delete_service.assert_called_once() delete_port.assert_called_once() def test_create_another_vpn_service(self): # make sure another backend service is not created providernet_args = {extnet_apidef.EXTERNAL: True} router_db = namedtuple("Router", FAKE_ROUTER.keys())( *FAKE_ROUTER.values()) tier0_rtr_id = _uuid() with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net, enable_dhcp=False),\ mock.patch.object(self.plugin, '_get_tier0_uuid_by_router', return_value=tier0_rtr_id),\ self.router(external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet(cidr='1.1.0.0/24') as sub: # add an interface to the router self.l3plugin.add_router_interface( self.context, router['id'], {'subnet_id': sub['subnet']['id']}) # create the service dummy_port = {'id': 'dummy_port', 'fixed_ips': [{'ip_address': '1.1.1.1'}]} tier0_rtr = {'id': tier0_rtr_id, 'ha_mode': 'ACTIVE_STANDBY'} nsx_srv = {'logical_router_id': tier0_rtr_id, 'id': _uuid(), 'enabled': True} with mock.patch.object(self.service_plugin, '_get_vpnservice', return_value=FAKE_VPNSERVICE),\ mock.patch.object(self.policy_vpn.service, 'create_or_overwrite') as create_service,\ mock.patch.object( self.policy_vpn.service, 'list', return_value={'results': [nsx_srv]}),\ mock.patch.object(self.l3plugin, '_get_router', return_value=router_db),\ mock.patch.object(self.plugin, 'get_router', return_value=FAKE_ROUTER),\ mock.patch.object(self.plugin, 'get_ports', return_value=[dummy_port]),\ mock.patch.object(self.plugin, 'delete_port'),\ mock.patch.object(self.plugin, 'service_router_has_services', return_value=True),\ mock.patch.object(self.plugin.nsxpolicy.tier0, 'get', return_value=tier0_rtr): self.driver.create_vpnservice(self.context, FAKE_VPNSERVICE) create_service.assert_called_once() # now delete both nsx_services = [{'logical_router_id': tier0_rtr_id, 'id': 'xxx'}] with mock.patch.object( self.policy_vpn.service, 'list', return_value={'results': nsx_services}),\ mock.patch.object(self.policy_vpn.service, 'delete') as delete_service: self.driver.delete_vpnservice( self.context, FAKE_VPNSERVICE) delete_service.assert_not_called() with mock.patch.object( self.policy_vpn.service, 'list', return_value={'results': nsx_services}),\ mock.patch.object(self.service_plugin, 'get_vpnservices', return_value=[]),\ mock.patch.object(self.policy_vpn.service, 'delete') as delete_service: self.driver.delete_vpnservice( self.context, FAKE_VPNSERVICE) delete_service.assert_called_once() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/vpnaas/test_nsxv3_vpnaas.py0000644000175000017500000010316300000000000030672 0ustar00coreycorey00000000000000# Copyright 2017 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import namedtuple import contextlib import mock from oslo_utils import uuidutils from neutron.db import l3_db from neutron.db.models import l3 as l3_models from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import context as n_ctx from neutron_lib.plugins import directory from neutron_vpnaas.db.vpn import vpn_models # noqa from neutron_vpnaas.tests import base from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.services.vpnaas.nsxv3 import ipsec_driver from vmware_nsx.services.vpnaas.nsxv3 import ipsec_validator from vmware_nsx.tests.unit.nsx_v3 import test_plugin _uuid = uuidutils.generate_uuid FAKE_TENANT = _uuid() FAKE_ROUTER_ID = "aaaaaa-bbbbb-ccc" FAKE_ROUTER = {'id': FAKE_ROUTER_ID, 'name': 'fake router', 'tenant_id': FAKE_TENANT, 'admin_state_up': True, 'status': 'ACTIVE', 'gw_port_id': _uuid(), 'enable_snat': False, l3_db.EXTERNAL_GW_INFO: {'network_id': _uuid()}} FAKE_SUBNET_ID = _uuid() FAKE_SUBNET = {'cidr': '1.1.1.0/24', 'id': FAKE_SUBNET_ID} FAKE_VPNSERVICE_ID = _uuid() FAKE_VPNSERVICE = {'id': FAKE_VPNSERVICE_ID, 'name': 'vpn_service', 'description': 'dummy', 'router': FAKE_ROUTER, 'router_id': FAKE_ROUTER_ID, 'subnet': FAKE_SUBNET, 'subnet_id': FAKE_SUBNET_ID, 'tenant_id': FAKE_TENANT, 'admin_state_up': True} FAKE_IKE_POLICY_ID = _uuid() FAKE_IKE_POLICY = {'id': FAKE_IKE_POLICY_ID, 'name': 'ike_dummy', 'description': 'ike_dummy', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'phase1_negotiation_mode': 'main', 'lifetime': { 'units': 'seconds', 'value': 3600}, 'ike_version': 'v1', 'pfs': 'group14', 'tenant_id': FAKE_TENANT} FAKE_IPSEC_POLICY_ID = _uuid() FAKE_IPSEC_POLICY = {'id': FAKE_IPSEC_POLICY_ID, 'name': 'ipsec_dummy', 'description': 'myipsecpolicy1', 'auth_algorithm': 'sha1', 'encryption_algorithm': 'aes-128', 'encapsulation_mode': 'tunnel', 'lifetime': { 'units': 'seconds', 'value': 3600}, 'transform_protocol': 'esp', 'pfs': 'group14', 'tenant_id': FAKE_TENANT} FAKE_IPSEC_CONNECTION_ID = _uuid() FAKE_IPSEC_CONNECTION = {'vpnservice_id': FAKE_VPNSERVICE_ID, 'ikepolicy_id': FAKE_IKE_POLICY_ID, 'ipsecpolicy_id': FAKE_IPSEC_POLICY_ID, 'name': 'VPN connection', 'description': 'VPN connection', 'id': FAKE_IPSEC_CONNECTION_ID, 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_cidrs': '192.168.1.0/24', 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'admin_state_up': True, 'tenant_id': FAKE_TENANT} FAKE_NEW_CONNECTION = {'vpnservice_id': FAKE_VPNSERVICE_ID, 'ikepolicy_id': FAKE_IKE_POLICY_ID, 'ipsecpolicy_id': FAKE_IPSEC_POLICY_ID, 'name': 'VPN connection', 'description': 'VPN connection', 'id': FAKE_IPSEC_CONNECTION_ID, 'peer_address': '192.168.1.10', 'peer_id': '192.168.1.10', 'peer_cidrs': '192.168.2.0/24', 'mtu': 1500, 'psk': 'abcd', 'initiator': 'bi-directional', 'dpd': { 'action': 'hold', 'interval': 30, 'timeout': 120}, 'admin_state_up': True, 'tenant_id': FAKE_TENANT} class TestDriverValidation(base.BaseTestCase): def setUp(self): super(TestDriverValidation, self).setUp() self.context = n_ctx.Context('some_user', 'some_tenant') self.service_plugin = mock.Mock() driver = mock.Mock() driver.service_plugin = self.service_plugin with mock.patch("neutron_lib.plugins.directory.get_plugin"): self.validator = ipsec_validator.IPsecV3Validator(driver) self.validator._l3_plugin = mock.Mock() self.validator._core_plugin = mock.Mock() self.vpn_service = {'router_id': 'dummy_router', 'subnet_id': 'dummy_subnet'} self.peer_address = '10.10.10.10' self.peer_cidr = '10.10.11.0/20' def _test_lifetime_not_in_seconds(self, validation_func): policy_info = {'lifetime': {'units': 'kilobytes', 'value': 1000}} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, policy_info) def test_ike_lifetime_not_in_seconds(self): self._test_lifetime_not_in_seconds( self.validator.validate_ike_policy) def test_ipsec_lifetime_not_in_seconds(self): self._test_lifetime_not_in_seconds( self.validator.validate_ipsec_policy) def _test_lifetime_seconds_values_at_limits(self, validation_func): policy_info = {'lifetime': {'units': 'seconds', 'value': 21600}} validation_func(self.context, policy_info) policy_info = {'lifetime': {'units': 'seconds', 'value': 86400}} validation_func(self.context, policy_info) policy_info = {'lifetime': {'units': 'seconds', 'value': 10}} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, policy_info) def test_ike_lifetime_seconds_values_at_limits(self): self._test_lifetime_seconds_values_at_limits( self.validator.validate_ike_policy) def test_ipsec_lifetime_seconds_values_at_limits(self): self._test_lifetime_seconds_values_at_limits( self.validator.validate_ipsec_policy) def _test_auth_algorithm(self, validation_func): auth_algorithm = {'auth_algorithm': 'sha384'} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, auth_algorithm) auth_algorithm = {'auth_algorithm': 'sha512'} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, auth_algorithm) auth_algorithm = {'auth_algorithm': 'sha1'} validation_func(self.context, auth_algorithm) auth_algorithm = {'auth_algorithm': 'sha256'} validation_func(self.context, auth_algorithm) def test_ipsec_auth_algorithm(self): self._test_auth_algorithm(self.validator.validate_ipsec_policy) def test_ike_auth_algorithm(self): self._test_auth_algorithm(self.validator.validate_ike_policy) def _test_encryption_algorithm(self, validation_func): auth_algorithm = {'encryption_algorithm': 'aes-192'} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, auth_algorithm) auth_algorithm = {'encryption_algorithm': 'aes-128'} validation_func(self.context, auth_algorithm) auth_algorithm = {'encryption_algorithm': 'aes-256'} validation_func(self.context, auth_algorithm) def test_ipsec_encryption_algorithm(self): self._test_encryption_algorithm(self.validator.validate_ipsec_policy) def test_ike_encryption_algorithm(self): self._test_encryption_algorithm(self.validator.validate_ike_policy) def test_ike_negotiation_mode(self): policy_info = {'phase1-negotiation-mode': 'aggressive'} self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_ike_policy, self.context, policy_info) policy_info = {'phase1-negotiation-mode': 'main'} self.validator.validate_ike_policy(self.context, policy_info) def _test_pfs(self, validation_func): policy_info = {'pfs': 'group15'} self.assertRaises(nsx_exc.NsxVpnValidationError, validation_func, self.context, policy_info) policy_info = {'pfs': 'group14'} validation_func(self.context, policy_info) def test_ipsec_pfs(self): self._test_pfs(self.validator.validate_ipsec_policy) def test_ike_pfs(self): self._test_pfs(self.validator.validate_ike_policy) def test_ipsec_encap_mode(self): policy_info = {'encapsulation_mode': 'transport'} self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_ipsec_policy, self.context, policy_info) policy_info = {'encapsulation_mode': 'tunnel'} self.validator.validate_ipsec_policy(self.context, policy_info) def test_ipsec_transform_protocol(self): policy_info = {'transform_protocol': 'ah'} self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_ipsec_policy, self.context, policy_info) policy_info = {'transform_protocol': 'esp'} self.validator.validate_ipsec_policy(self.context, policy_info) def test_vpn_service_validation_router(self): db_router = l3_models.Router() nsx_router = {'high_availability_mode': 'ACITVE_ACTIVE'} db_router.enable_snat = False with mock.patch.object(self.validator.nsxlib.logical_router, 'get', return_value=nsx_router): self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_vpnservice, self.context, self.vpn_service) nsx_router = {'high_availability_mode': 'ACTIVE_STANDBY'} db_router.enable_snat = True with mock.patch.object(self.validator.nsxlib.logical_router, 'get', return_value=nsx_router),\ mock.patch.object(self.validator._core_plugin, '_get_router', return_value=db_router): self.assertRaises(nsx_exc.NsxVpnValidationError, self.validator.validate_vpnservice, self.context, self.vpn_service) nsx_router = {'high_availability_mode': 'ACTIVE_STANDBY'} db_router.enable_snat = False with mock.patch.object(self.validator.nsxlib.logical_router, 'get', return_value=nsx_router),\ mock.patch.object(self.validator._core_plugin, '_get_router', return_value=db_router): self.validator.validate_vpnservice(self.context, self.vpn_service) def _test_conn_validation(self, conn_params=None, success=True, connections=None, service_subnets=None, router_subnets=None): if connections is None: connections = [] if router_subnets is None: router_subnets = [] def mock_get_routers(context, filters=None, fields=None): return [{'id': 'no-snat', 'external_gateway_info': {'enable_snat': False}}] def mock_get_service(context, service_id): if service_subnets: # option to give the test a different subnet per service subnet_cidr = service_subnets[int(service_id) - 1] else: subnet_cidr = '5.5.5.0/2%s' % service_id return {'id': service_id, 'router_id': service_id, 'subnet_id': 'dummy_subnet', 'external_v4_ip': '1.1.1.%s' % service_id, 'subnet': {'id': 'dummy_subnet', 'cidr': subnet_cidr}} def mock_get_connections(context, filters=None, fields=None): if filters and 'peer_address' in filters: return [conn for conn in connections if conn['peer_address'] == filters['peer_address'][0]] else: return connections with mock.patch.object( self.validator.vpn_plugin, '_get_vpnservice', side_effect=mock_get_service), mock.patch.object( self.validator._core_plugin, 'get_routers', side_effect=mock_get_routers), mock.patch.object( self.validator._core_plugin, '_find_router_subnets_cidrs', return_value=router_subnets), mock.patch.object( self.validator.vpn_plugin, 'get_ipsec_site_connections', side_effect=mock_get_connections): ipsec_sitecon = {'id': '1', 'vpnservice_id': '1', 'mtu': 1500, 'peer_address': self.peer_address, 'peer_cidrs': [self.peer_cidr]} if conn_params: ipsec_sitecon.update(conn_params) if success: self.validator.validate_ipsec_site_connection( self.context, ipsec_sitecon) else: self.assertRaises( nsx_exc.NsxVpnValidationError, self.validator.validate_ipsec_site_connection, self.context, ipsec_sitecon) def test_dpd_validation(self): params = {'dpd': {'action': 'hold', 'timeout': 120}} self._test_conn_validation(conn_params=params, success=True) params = {'dpd': {'action': 'clear', 'timeout': 120}} self._test_conn_validation(conn_params=params, success=False) params = {'dpd': {'action': 'hold', 'timeout': 2}} self._test_conn_validation(conn_params=params, success=False) def test_check_unique_addresses(self): # this test runs with non-overlapping local subnets on # different routers subnets = ['5.5.5.0/20', '6.6.6.0/20'] # same service/router gw & peer address - should fail connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': self.peer_address, 'peer_cidrs': [self.peer_cidr]}] self._test_conn_validation(success=False, connections=connections, service_subnets=subnets) # different service/router gw - ok connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) # different peer address - ok connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': '7.7.7.1', 'peer_cidrs': ['7.7.7.7']}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) # ignoring non-active connections connections = [{'id': '2', 'status': 'ERROR', 'vpnservice_id': '1', 'peer_address': self.peer_address, 'peer_cidrs': [self.peer_cidr]}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) def test_overlapping_rules(self): # peer-cidr overlapping with new one, same subnet - should fail connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': '9.9.9.9', 'peer_cidrs': ['10.10.11.1/19']}] self._test_conn_validation(success=False, connections=connections) # same peer-cidr, overlapping subnets - should fail connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': '9.9.9.9', 'peer_cidrs': [self.peer_cidr]}] self._test_conn_validation(success=False, connections=connections) # non overlapping peer-cidr, same subnet - ok connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '1', 'peer_address': '7.7.7.1', 'peer_cidrs': ['7.7.7.7']}] self._test_conn_validation(success=True, connections=connections) # ignoring non-active connections connections = [{'id': '2', 'status': 'ERROR', 'vpnservice_id': '1', 'peer_address': '9.9.9.9', 'peer_cidrs': ['10.10.11.1/19']}] self._test_conn_validation(success=True, connections=connections) def test_advertisment(self): # different routers, same subnet - should fail subnets = ['5.5.5.0/20', '5.5.5.0/20'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=False, connections=connections, service_subnets=subnets) # different routers, overlapping subnet - should fail subnets = ['5.5.5.0/20', '5.5.5.0/21'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=False, connections=connections, service_subnets=subnets) # different routers, non overlapping subnet - ok subnets = ['5.5.5.0/20', '50.5.5.0/21'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=True, connections=connections, service_subnets=subnets) # no-snat router with overlapping subnet to the service subnet - fail subnets = ['5.5.5.0/21', '1.1.1.0/20'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=False, connections=connections, router_subnets=subnets) # no-snat router with non overlapping subnet to the service subnet - ok service_subnets = ['5.5.5.0/20', '6.6.6.0/20'] router_subnets = ['50.5.5.0/21', '1.1.1.0/20'] connections = [{'id': '2', 'status': 'ACTIVE', 'vpnservice_id': '2', 'peer_address': self.peer_address, 'peer_cidrs': ['6.6.6.6']}] self._test_conn_validation(success=True, connections=connections, service_subnets=service_subnets, router_subnets=router_subnets) class TestVpnaasDriver(test_plugin.NsxV3PluginTestCaseMixin): def setUp(self): super(TestVpnaasDriver, self).setUp() self.context = n_ctx.get_admin_context() self.service_plugin = mock.Mock() self.validator = mock.Mock() self.driver = ipsec_driver.NSXv3IPsecVpnDriver(self.service_plugin) self.plugin = directory.get_plugin() self.nsxlib_vpn = self.plugin.nsxlib.vpn_ipsec self.l3plugin = self.plugin @contextlib.contextmanager def router(self, name='vpn-test-router', tenant_id=_uuid(), admin_state_up=True, **kwargs): request = {'router': {'tenant_id': tenant_id, 'name': name, 'admin_state_up': admin_state_up}} for arg in kwargs: request['router'][arg] = kwargs[arg] router = self.l3plugin.create_router(self.context, request) yield router def test_create_ipsec_site_connection(self): with mock.patch.object(self.service_plugin, 'get_ikepolicy', return_value=FAKE_IKE_POLICY),\ mock.patch.object(self.service_plugin, 'get_ipsecpolicy', return_value=FAKE_IPSEC_POLICY),\ mock.patch.object(self.service_plugin, '_get_vpnservice', return_value=FAKE_VPNSERVICE),\ mock.patch.object(self.service_plugin, 'get_vpnservices', return_value=[FAKE_VPNSERVICE]),\ mock.patch.object(self.plugin, 'get_router', return_value=FAKE_ROUTER),\ mock.patch.object(self.plugin, 'get_subnet', return_value=FAKE_SUBNET),\ mock.patch("vmware_nsx.db.db.add_nsx_vpn_connection_mapping"),\ mock.patch.object(self.plugin.nsxlib.logical_router, 'update_advertisement_rules') as update_adv,\ mock.patch.object(self.nsxlib_vpn.ike_profile, 'create') as create_ike,\ mock.patch.object(self.nsxlib_vpn.tunnel_profile, 'create') as create_ipsec,\ mock.patch.object(self.nsxlib_vpn.dpd_profile, 'create') as create_dpd,\ mock.patch.object(self.nsxlib_vpn.session, 'create') as create_sesson: self.driver.create_ipsec_site_connection(self.context, FAKE_IPSEC_CONNECTION) create_ike.assert_called_once() create_ipsec.assert_called_once() create_dpd.assert_called_once() create_sesson.assert_called_once() update_adv.assert_called_once() def test_update_ipsec_site_connection(self): with mock.patch.object(self.service_plugin, '_get_vpnservice', return_value=FAKE_VPNSERVICE),\ mock.patch.object(self.plugin, 'get_router', return_value=FAKE_ROUTER),\ mock.patch.object(self.plugin, 'update_router_firewall') as update_fw,\ mock.patch.object(self.nsxlib_vpn.session, 'update') as update_sesson,\ mock.patch("vmware_nsx.db.db.get_nsx_vpn_connection_mapping"): self.driver.update_ipsec_site_connection(self.context, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) update_sesson.assert_called_once() update_fw.assert_called_once() def test_delete_ipsec_site_connection(self): with mock.patch.object(self.service_plugin, 'get_ikepolicy', return_value=FAKE_IKE_POLICY),\ mock.patch.object(self.service_plugin, 'get_ipsecpolicy', return_value=FAKE_IPSEC_POLICY),\ mock.patch.object(self.service_plugin, '_get_vpnservice', return_value=FAKE_VPNSERVICE),\ mock.patch.object(self.service_plugin, 'get_vpnservices', return_value=[FAKE_VPNSERVICE]),\ mock.patch.object(self.plugin, 'get_router', return_value=FAKE_ROUTER),\ mock.patch.object(self.plugin, 'get_subnet', return_value=FAKE_SUBNET),\ mock.patch.object(self.plugin.nsxlib.logical_router, 'update_advertisement_rules') as update_adv,\ mock.patch("vmware_nsx.db.db.get_nsx_vpn_connection_mapping"),\ mock.patch.object(self.nsxlib_vpn.ike_profile, 'delete') as delete_ike,\ mock.patch.object(self.nsxlib_vpn.tunnel_profile, 'delete') as delete_ipsec,\ mock.patch.object(self.nsxlib_vpn.dpd_profile, 'delete') as delete_dpd,\ mock.patch.object(self.nsxlib_vpn.session, 'delete') as delete_sesson: self.driver.delete_ipsec_site_connection(self.context, FAKE_IPSEC_CONNECTION) delete_ike.assert_called_once() delete_ipsec.assert_called_once() delete_dpd.assert_called_once() delete_sesson.assert_called_once() update_adv.assert_called_once() def test_create_vpn_service_legal(self): """Create a legal vpn service""" # create an external network with a subnet, and a router providernet_args = {extnet_apidef.EXTERNAL: True} router_db = namedtuple("Router", FAKE_ROUTER.keys())( *FAKE_ROUTER.values()) tier0_uuid = 'tier-0' with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net),\ mock.patch.object(self.plugin, '_get_tier0_uuid_by_router', return_value=tier0_uuid),\ self.router(external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet(cidr='1.1.0.0/24') as sub: # add an interface to the router self.l3plugin.add_router_interface( self.context, router['id'], {'subnet_id': sub['subnet']['id']}) # create the service dummy_port = {'id': 'dummy_port', 'fixed_ips': [{'ip_address': '1.1.1.1'}]} tier0_rtr = {'high_availability_mode': 'ACTIVE_STANDBY'} with mock.patch.object(self.service_plugin, '_get_vpnservice', return_value=FAKE_VPNSERVICE),\ mock.patch.object(self.nsxlib_vpn.service, 'create') as create_service,\ mock.patch.object(self.l3plugin, '_get_router', return_value=router_db),\ mock.patch.object(self.plugin, 'get_router', return_value=FAKE_ROUTER),\ mock.patch.object(self.plugin, 'get_ports', return_value=[dummy_port]),\ mock.patch.object(self.plugin, 'delete_port'),\ mock.patch.object(self.plugin.nsxlib.logical_router, 'get', return_value=tier0_rtr): self.driver.create_vpnservice(self.context, FAKE_VPNSERVICE) create_service.assert_called_once() # Delete the service nsx_services = [{'logical_router_id': tier0_uuid, 'id': 'xxx'}] with mock.patch.object( self.nsxlib_vpn.service, 'list', return_value={'results': nsx_services}),\ mock.patch.object(self.service_plugin, 'get_vpnservices', return_value=[]),\ mock.patch.object(self.nsxlib_vpn.service, 'delete') as delete_service: self.driver.delete_vpnservice( self.context, FAKE_VPNSERVICE) delete_service.assert_called_once() def test_create_another_vpn_service(self): # make sure another backend service is not created providernet_args = {extnet_apidef.EXTERNAL: True} router_db = namedtuple("Router", FAKE_ROUTER.keys())( *FAKE_ROUTER.values()) tier0_rtr_id = _uuid() with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net),\ mock.patch.object(self.plugin, '_get_tier0_uuid_by_router', return_value=tier0_rtr_id),\ self.router(external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet(cidr='1.1.0.0/24') as sub: # add an interface to the router self.l3plugin.add_router_interface( self.context, router['id'], {'subnet_id': sub['subnet']['id']}) # create the service dummy_port = {'id': 'dummy_port', 'fixed_ips': [{'ip_address': '1.1.1.1'}]} tier0_rtr = {'id': tier0_rtr_id, 'high_availability_mode': 'ACTIVE_STANDBY'} nsx_srv = {'logical_router_id': tier0_rtr_id, 'id': _uuid(), 'enabled': True} with mock.patch.object(self.service_plugin, '_get_vpnservice', return_value=FAKE_VPNSERVICE),\ mock.patch.object(self.nsxlib_vpn.service, 'create') as create_service,\ mock.patch.object( self.nsxlib_vpn.service, 'list', return_value={'results': [nsx_srv]}) as create_service,\ mock.patch.object(self.l3plugin, '_get_router', return_value=router_db),\ mock.patch.object(self.plugin, 'get_router', return_value=FAKE_ROUTER),\ mock.patch.object(self.plugin, 'get_ports', return_value=[dummy_port]),\ mock.patch.object(self.plugin, 'delete_port'),\ mock.patch.object(self.plugin.nsxlib.logical_router, 'get', return_value=tier0_rtr): self.driver.create_vpnservice(self.context, FAKE_VPNSERVICE) create_service.assert_called_once() # now delete both nsx_services = [{'logical_router_id': tier0_rtr_id, 'id': 'xxx'}] with mock.patch.object( self.nsxlib_vpn.service, 'list', return_value={'results': nsx_services}),\ mock.patch.object( self.service_plugin, 'get_vpnservices', return_value=[{'id': 'dummy', 'router_id': 'dummy'}]),\ mock.patch.object(self.nsxlib_vpn.service, 'delete') as delete_service: self.driver.delete_vpnservice( self.context, FAKE_VPNSERVICE) delete_service.assert_not_called() with mock.patch.object( self.nsxlib_vpn.service, 'list', return_value={'results': nsx_services}),\ mock.patch.object(self.service_plugin, 'get_vpnservices', return_value=[]),\ mock.patch.object(self.service_plugin, 'get_vpnservices', return_value=[]),\ mock.patch.object(self.nsxlib_vpn.service, 'delete') as delete_service: self.driver.delete_vpnservice( self.context, FAKE_VPNSERVICE) delete_service.assert_called_once() pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/services/vpnaas/test_nsxv_vpnaas.py0000644000175000017500000004132600000000000030611 0ustar00coreycorey00000000000000# Copyright 2016 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from neutron_lib.api.definitions import external_net as extnet_apidef from neutron_lib import context from neutron_lib.plugins import directory from neutron_vpnaas.db.vpn import vpn_models # noqa from neutron_vpnaas.extensions import vpnaas from oslo_utils import uuidutils from vmware_nsx.common import exceptions as nsxv_exc from vmware_nsx.plugins.nsx_v.vshield.common import exceptions as vcns_exc from vmware_nsx.services.vpnaas.nsxv import ipsec_driver from vmware_nsx.tests.unit.nsx_v import test_plugin _uuid = uuidutils.generate_uuid DRIVER_PATH = "vmware_nsx.services.vpnaas.nsxv.ipsec_driver.NSXvIPsecVpnDriver" VALI_PATH = "vmware_nsx.services.vpnaas.nsxv.ipsec_validator.IPsecValidator" FAKE_ROUTER_ID = "aaaaaa-bbbbb-ccc" FAKE_VPNSERVICE_ID = _uuid() FAKE_IPSEC_CONNECTION = {"vpnservice_id": FAKE_VPNSERVICE_ID, "id": _uuid()} FAKE_EDGE_ID = _uuid() FAKE_IPSEC_VPN_SITE = {"peerIp": "192.168.1.1"} FAKE_VCNSAPIEXC = {"status": "fail", "head": "fake_head", "response": "error"} FAKE_NEW_CONNECTION = {"peer_cidrs": "192.168.1.0/24"} class TestVpnaasDriver(test_plugin.NsxVPluginV2TestCase): def setUp(self): super(TestVpnaasDriver, self).setUp() self.context = context.get_admin_context() self.service_plugin = mock.Mock() self.validator = mock.Mock() self.driver = ipsec_driver.NSXvIPsecVpnDriver(self.service_plugin) self.plugin = directory.get_plugin() self.l3plugin = self.plugin @contextlib.contextmanager def router(self, name='vpn-test-router', tenant_id=_uuid(), admin_state_up=True, **kwargs): request = {'router': {'tenant_id': tenant_id, 'name': name, 'admin_state_up': admin_state_up}} for arg in kwargs: request['router'][arg] = kwargs[arg] router = self.l3plugin.create_router(self.context, request) yield router @mock.patch('%s._convert_ipsec_conn' % DRIVER_PATH) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._generate_new_sites' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_status' % DRIVER_PATH) @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) def test_create_ipsec_site_connection(self, mock_update_fw, mock_update_status, mock_update_ipsec, mock_gen_new, mock_get_id, mock_conv_ipsec): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_conv_ipsec.return_value = FAKE_IPSEC_VPN_SITE mock_gen_new.return_value = FAKE_IPSEC_VPN_SITE self.driver.create_ipsec_site_connection(self.context, FAKE_IPSEC_CONNECTION) mock_conv_ipsec.assert_called_with(self.context, FAKE_IPSEC_CONNECTION) mock_get_id.assert_called_with(self.context, FAKE_VPNSERVICE_ID) mock_gen_new.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE, enabled=True) mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) mock_update_status.assert_called_with( self.context, FAKE_IPSEC_CONNECTION["vpnservice_id"], FAKE_IPSEC_CONNECTION["id"], "ACTIVE") @mock.patch('%s._convert_ipsec_conn' % DRIVER_PATH) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._generate_new_sites' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_status' % DRIVER_PATH) def test_create_ipsec_site_connection_fail(self, mock_update_status, mock_update_ipsec, mock_gen_new, mock_get_id, mock_conv_ipsec): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_conv_ipsec.return_value = FAKE_IPSEC_VPN_SITE mock_gen_new.return_value = FAKE_IPSEC_VPN_SITE mock_update_ipsec.side_effect = ( vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) self.assertRaises(nsxv_exc.NsxPluginException, self.driver.create_ipsec_site_connection, self.context, FAKE_IPSEC_CONNECTION) mock_conv_ipsec.assert_called_with(self.context, FAKE_IPSEC_CONNECTION) mock_get_id.assert_called_with(self.context, FAKE_VPNSERVICE_ID) mock_gen_new.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE, enabled=True) mock_update_status.assert_called_with( self.context, FAKE_IPSEC_CONNECTION["vpnservice_id"], FAKE_IPSEC_CONNECTION["id"], "ERROR") @mock.patch('%s._convert_ipsec_conn' % DRIVER_PATH) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._generate_new_sites' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_status' % DRIVER_PATH) @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) def test_update_fw_fail(self, mock_update_fw, mock_update_status, mock_update_ipsec, mock_gen_new, mock_get_id, mock_conv_ipsec): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_conv_ipsec.return_value = FAKE_IPSEC_VPN_SITE mock_gen_new.return_value = FAKE_IPSEC_VPN_SITE mock_update_fw.side_effect = ( vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) self.assertRaises(nsxv_exc.NsxPluginException, self.driver.create_ipsec_site_connection, self.context, FAKE_IPSEC_CONNECTION) mock_conv_ipsec.assert_called_with(self.context, FAKE_IPSEC_CONNECTION) mock_get_id.assert_called_with(self.context, FAKE_VPNSERVICE_ID) mock_gen_new.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE, enabled=True) mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) mock_update_status.assert_called_with( self.context, FAKE_IPSEC_CONNECTION["vpnservice_id"], FAKE_IPSEC_CONNECTION["id"], "ERROR") @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._update_site_dict' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) def test_update_ipsec(self, mock_update_fw, mock_update_ipsec, mock_update_sites, mock_get_id): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_update_sites.return_value = FAKE_IPSEC_VPN_SITE self.driver.update_ipsec_site_connection(self.context, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_sites.assert_called_with(self.context, FAKE_EDGE_ID, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._update_site_dict' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) def test_update_ipsec_fail_with_notfound(self, mock_update_fw, mock_update_ipsec, mock_update_sites, mock_get_id): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_update_sites.return_value = {} self.assertRaises(nsxv_exc.NsxIPsecVpnMappingNotFound, self.driver.update_ipsec_site_connection, self.context, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_sites.assert_called_with(self.context, FAKE_EDGE_ID, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._update_site_dict' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_firewall_rules' % DRIVER_PATH) def test_update_ipsec_fail_with_fw_fail(self, mock_update_fw, mock_update_ipsec, mock_update_sites, mock_get_id): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_update_fw.side_effect = ( vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) self.assertRaises(nsxv_exc.NsxPluginException, self.driver.update_ipsec_site_connection, self.context, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_sites.assert_called_with(self.context, FAKE_EDGE_ID, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_fw.assert_called_with(self.context, FAKE_VPNSERVICE_ID) @mock.patch('%s._get_router_edge_id' % DRIVER_PATH) @mock.patch('%s._update_site_dict' % DRIVER_PATH) @mock.patch('%s._update_ipsec_config' % DRIVER_PATH) @mock.patch('%s._update_status' % DRIVER_PATH) def test_update_ipsec_fail_with_site_fail(self, mock_update_status, mock_update_ipsec, mock_update_sites, mock_get_id): mock_get_id.return_value = (FAKE_ROUTER_ID, FAKE_EDGE_ID) mock_update_sites.return_value = FAKE_IPSEC_VPN_SITE mock_update_ipsec.side_effect = ( vcns_exc.VcnsApiException(**FAKE_VCNSAPIEXC)) self.assertRaises(nsxv_exc.NsxPluginException, self.driver.update_ipsec_site_connection, self.context, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_sites.assert_called_with(self.context, FAKE_EDGE_ID, FAKE_IPSEC_CONNECTION, FAKE_NEW_CONNECTION) mock_update_ipsec.assert_called_with(FAKE_EDGE_ID, FAKE_IPSEC_VPN_SITE) mock_update_status.assert_called_with( self.context, FAKE_IPSEC_CONNECTION["vpnservice_id"], FAKE_IPSEC_CONNECTION["id"], "ERROR") def test_create_vpn_service_legal(self): """Create a legal vpn service""" # create an external network with a subnet, and an exclusive router providernet_args = {extnet_apidef.EXTERNAL: True} with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net),\ self.router(router_type='exclusive', external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet() as sub: # add an interface to the router self.l3plugin.add_router_interface( self.context, router['id'], {'subnet_id': sub['subnet']['id']}) # create the service vpnservice = {'router_id': router['id'], 'id': _uuid(), 'subnet_id': sub['subnet']['id']} with mock.patch.object(self.driver, '_get_gateway_ips', return_value=(None, None)): self.driver.create_vpnservice(self.context, vpnservice) def test_create_vpn_service_on_shared_router(self): """Creating a service with shared router is not allowed""" # create an external network with a subnet, and a shared router providernet_args = {extnet_apidef.EXTERNAL: True} with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net),\ self.router(router_type='shared', external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet() as sub: # add an interface to the router self.l3plugin.add_router_interface( self.context, router['id'], {'subnet_id': sub['subnet']['id']}) # create the service vpnservice = {'router_id': router['id'], 'id': _uuid(), 'subnet_id': sub['subnet']['id']} self.assertRaises(nsxv_exc.NsxPluginException, self.driver.create_vpnservice, self.context, vpnservice) def test_create_vpn_service_on_router_without_if(self): """Creating a service with unattached subnet is not allowed""" # create an external network with a subnet, and an exclusive router providernet_args = {extnet_apidef.EXTERNAL: True} with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net),\ self.router(router_type='exclusive', external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet() as sub: # create the service vpnservice = {'router_id': router['id'], 'id': _uuid(), 'subnet_id': sub['subnet']['id']} self.assertRaises(vpnaas.SubnetIsNotConnectedToRouter, self.driver.create_vpnservice, self.context, vpnservice) def test_create_vpn_service_without_subnet(self): """Creating a service without a subnet is not allowed""" # create an external network with a subnet, and an exclusive router providernet_args = {extnet_apidef.EXTERNAL: True} with self.network(name='ext-net', providernet_args=providernet_args, arg_list=(extnet_apidef.EXTERNAL, )) as ext_net,\ self.subnet(ext_net),\ self.router(router_type='exclusive', external_gateway_info={'network_id': ext_net['network']['id']}) as router,\ self.subnet() as sub: # add an interface to the router self.l3plugin.add_router_interface( self.context, router['id'], {'subnet_id': sub['subnet']['id']}) # create the service without the subnet vpnservice = {'router_id': router['id'], 'id': _uuid(), 'subnet_id': None} self.assertRaises(nsxv_exc.NsxPluginException, self.driver.create_vpnservice, self.context, vpnservice) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.2462552 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/shell/0000755000175000017500000000000000000000000022620 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/shell/__init__.py0000644000175000017500000000000000000000000024717 0ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/shell/test_admin_utils.py0000644000175000017500000003460600000000000026552 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from oslo_config import cfg from oslo_log import _options from oslo_log import log as logging from oslo_utils import uuidutils import six from neutron.common import config as neutron_config from neutron.db import servicetype_db # noqa from neutron import quota from neutron.quota import resource_registry from neutron.tests import base from neutron_lib.callbacks import registry from neutron_lib.plugins import constants from vmware_nsx._i18n import _ from vmware_nsx.common import config # noqa from vmware_nsx.db import nsxv_db from vmware_nsx.dvs import dvs_utils from vmware_nsx.shell.admin.plugins.nsxp.resources import utils as nsxp_utils from vmware_nsx.shell.admin.plugins.nsxv.resources import migration from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as nsxv_utils from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as nsxv3_utils from vmware_nsx.shell import resources from vmware_nsx.tests import unit as vmware from vmware_nsx.tests.unit.nsx_p import test_plugin as test_p_plugin from vmware_nsx.tests.unit.nsx_v import test_plugin as test_v_plugin from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin from vmware_nsxlib.v3 import core_resources from vmware_nsxlib.v3 import resources as nsx_v3_resources LOG = logging.getLogger(__name__) NSX_INI_PATH = vmware.get_fake_conf('nsx.ini.test') BASE_CONF_PATH = vmware.get_fake_conf('neutron.conf.test') @six.add_metaclass(abc.ABCMeta) class AbstractTestAdminUtils(base.BaseTestCase): def setUp(self): cfg.CONF.unregister_opts(_options.common_cli_opts) cfg.CONF.register_cli_opts(resources.cli_opts) super(AbstractTestAdminUtils, self).setUp() # remove resource registration conflicts resource_registry.unregister_all_resources() self.edgeapi = nsxv_utils.NeutronDbClient() # Init the neutron config neutron_config.init(args=['--config-file', BASE_CONF_PATH, '--config-file', NSX_INI_PATH]) self._init_mock_plugin() self._init_resource_plugin() self.addCleanup(resource_registry.unregister_all_resources) def _init_mock_plugin(self): mock_query = mock.patch( "vmware_nsx.shell.admin.plugins.common.utils.query_yes_no") mock_query.start() def _init_mock_quota(self): make_res = mock.patch.object(quota.QuotaEngine, 'make_reservation') self.mock_quota_make_res = make_res.start() commit_res = mock.patch.object(quota.QuotaEngine, 'commit_reservation') self.mock_quota_commit_res = commit_res.start() @abc.abstractmethod def _get_plugin_name(self): pass def _init_resource_plugin(self): plugin_name = self._get_plugin_name() resources.init_resource_plugin( plugin_name, resources.get_plugin_dir(plugin_name)) def _test_resource(self, res_name, op, **kwargs): errors = self._test_resource_with_errors(res_name, op, **kwargs) if len(errors) > 0: msg = (_("admin util %(res)s/%(op)s failed with message: " "%(err)s") % {'res': res_name, 'op': op, 'err': errors[0]}) self.fail(msg=msg) def _test_resource_with_errors(self, res_name, op, **kwargs): # Must call the internal notify_loop in order to get the errors return registry._get_callback_manager()._notify_loop( res_name, op, 'nsxadmin', **kwargs) def _test_resources(self, res_dict): for res in res_dict.keys(): res_name = res_dict[res].name for op in res_dict[res].supported_ops: self._test_resource(res_name, op) def _test_resources_with_args(self, res_dict, func_args): for res in res_dict.keys(): res_name = res_dict[res].name for op in res_dict[res].supported_ops: args = {'property': func_args} self._test_resource(res_name, op, **args) def _create_router(self): tenant_id = uuidutils.generate_uuid() data = {'router': {'tenant_id': tenant_id}} data['router']['name'] = 'dummy' data['router']['admin_state_up'] = True return self._plugin.create_router(self.edgeapi.context, data) class TestNsxvAdminUtils(AbstractTestAdminUtils, test_v_plugin.NsxVPluginV2TestCase): def _get_plugin_name(self): return 'nsxv' def _init_mock_plugin(self, *mocks): super(TestNsxvAdminUtils, self)._init_mock_plugin() # support the dvs manager: mock.patch.object(dvs_utils, 'dvs_create_session').start() # override metadata get-object dummy_lb = { 'enabled': True, 'enableServiceInsertion': True, 'accelerationEnabled': True, 'virtualServer': [], 'applicationProfile': [], 'pool': [], 'applicationRule': [] } mock.patch('vmware_nsx.plugins.nsx_v.vshield.nsxv_edge_cfg_obj.' 'NsxvEdgeCfgObj.get_object', return_value=dummy_lb).start() # Tests shouldn't wait for dummy spawn jobs to finish mock.patch('vmware_nsx.shell.admin.plugins.nsxv.resources.utils.' 'NsxVPluginWrapper.count_spawn_jobs', return_value=0).start() self._plugin = nsxv_utils.NsxVPluginWrapper() def get_plugin_mock(alias=constants.CORE): if alias in (constants.CORE, constants.L3): return self._plugin mock.patch("neutron_lib.plugins.directory.get_plugin", side_effect=get_plugin_mock).start() self._init_mock_quota() # Create a router to make sure we have deployed an edge self.router = self._create_router() self.network = self._create_net() def tearDown(self): if self.router and self.router.get('id'): self._plugin.delete_router( self.edgeapi.context, self.router['id']) if self.network and self.network.get('id'): self._plugin.delete_network( self.edgeapi.context, self.network['id']) super(TestNsxvAdminUtils, self).tearDown() def test_nsxv_resources(self): self._test_resources(resources.nsxv_resources) def _test_edge_nsx_update(self, edge_id, params): args = {'property': ["edge-id=%s" % edge_id]} args['property'].extend(params) self._test_resource('edges', 'nsx-update', **args) def _create_router(self): # Create an exclusive router (with an edge) tenant_id = uuidutils.generate_uuid() data = {'router': {'tenant_id': tenant_id}} data['router']['name'] = 'dummy' data['router']['admin_state_up'] = True data['router']['router_type'] = 'exclusive' return self._plugin.create_router(self.edgeapi.context, data) def _create_net(self): tenant_id = uuidutils.generate_uuid() data = {'network': {'tenant_id': tenant_id, 'name': 'dummy', 'admin_state_up': True, 'shared': False}} net = self._plugin.create_network(self.edgeapi.context, data) data = {'subnet': {'tenant_id': tenant_id, 'name': 'dummy', 'admin_state_up': True, 'network_id': net['id'], 'cidr': '1.1.1.0/16', 'enable_dhcp': True, 'ip_version': 4, 'dns_nameservers': None, 'host_routes': None, 'allocation_pools': None}} self._plugin.create_subnet(self.edgeapi.context, data) return net def get_edge_id(self): bindings = nsxv_db.get_nsxv_router_bindings( self.edgeapi.context.session) for binding in bindings: if binding.edge_id: return binding.edge_id # use a dummy edge return "edge-1" def test_edge_nsx_updates(self): """Test eges/nsx-update utility with different inputs.""" edge_id = self.get_edge_id() self._test_edge_nsx_update(edge_id, ["appliances=true"]) self._test_edge_nsx_update(edge_id, ["size=compact"]) self._test_edge_nsx_update(edge_id, ["hostgroup=update"]) self._test_edge_nsx_update(edge_id, ["hostgroup=all"]) self._test_edge_nsx_update(edge_id, ["hostgroup=clean"]) self._test_edge_nsx_update(edge_id, ["highavailability=True"]) self._test_edge_nsx_update(edge_id, ["resource=cpu", "limit=100"]) self._test_edge_nsx_update(edge_id, ["syslog-server=1.1.1.1", "syslog-proto=tcp", "log-level=debug"]) def test_bad_args(self): args = {'property': ["xxx"]} errors = self._test_resource_with_errors( 'networks', 'nsx-update', **args) self.assertEqual(1, len(errors)) def test_resources_with_common_args(self): """Run all nsxv admin utilities with some common arguments Using arguments like edge-id which many apis need This improves the test coverage """ edge_id = self.get_edge_id() args = ["edge-id=%s" % edge_id, "router-id=e5b9b249-0034-4729-8ab6-fe4dacaa3a12", "policy-id=1", "network_id=net-1", "net-id=net-1", "security-group-id=sg-1", "dvs-id=dvs-1", "moref=virtualwire-1", "teamingpolicy=LACP_ACTIVE", "log-allowed-traffic=true" ] self._test_resources_with_args( resources.nsxv_resources, args) def test_router_recreate(self): # Testing router-recreate separately because it may change the edge-id edge_id = self.get_edge_id() args = {'property': ["edge-id=%s" % edge_id]} self._test_resource('routers', 'nsx-recreate', **args) def test_migration_validation(self): # check that validation fails args = {'property': ["transit-network=1.1.1.0/24"]} try: migration.validate_config_for_migration( 'nsx-migrate-v2t', 'validate', None, **args) except SystemExit: return else: self.assertTrue(False) class TestNsxv3AdminUtils(AbstractTestAdminUtils, test_v3_plugin.NsxV3PluginTestCaseMixin): def _patch_object(self, *args, **kwargs): patcher = mock.patch.object(*args, **kwargs) patcher.start() self._patchers.append(patcher) def _init_mock_plugin(self): test_v3_plugin._mock_nsx_backend_calls() # mock resources for cls in (nsx_v3_resources.LogicalPort, nsx_v3_resources.LogicalDhcpServer, core_resources.NsxLibLogicalRouter, core_resources.NsxLibSwitchingProfile): self._patch_object(cls, 'list', return_value={'results': []}) self._patch_object(cls, 'get', return_value={'id': uuidutils.generate_uuid()}) self._patch_object(cls, 'update') self._patch_object(core_resources.NsxLibSwitchingProfile, 'find_by_display_name', return_value=[{'id': uuidutils.generate_uuid()}]) super(TestNsxv3AdminUtils, self)._init_mock_plugin() self._plugin = nsxv3_utils.NsxV3PluginWrapper() mock_nm_get_plugin = mock.patch( "neutron_lib.plugins.directory.get_plugin") self.mock_nm_get_plugin = mock_nm_get_plugin.start() self.mock_nm_get_plugin.return_value = self._plugin self._init_mock_quota() def _get_plugin_name(self): return 'nsxv3' def test_nsxv3_resources(self): self._test_resources(resources.nsxv3_resources) def test_resources_with_common_args(self): """Run all nsxv3 admin utilities with some common arguments Using arguments like dhcp_profile_uuid which many apis need This improves the test coverage """ args = ["dhcp_profile_uuid=e5b9b249-0034-4729-8ab6-fe4dacaa3a12", "metadata_proxy_uuid=e5b9b249-0034-4729-8ab6-fe4dacaa3a12", "nsx-id=e5b9b249-0034-4729-8ab6-fe4dacaa3a12", "availability-zone=default", "server-ip=1.1.1.1", "log-allowed-traffic=true" ] # Create some neutron objects for the utilities to run on self._create_router() with self._create_l3_ext_network() as network: with self.subnet(network=network) as subnet: with self.port(subnet=subnet): # Run all utilities with backend objects self._test_resources_with_args( resources.nsxv3_resources, args) class TestNsxtvdAdminUtils(AbstractTestAdminUtils): def _get_plugin_name(self): return 'nsxtvd' def test_nsxtv_resources(self): self._test_resources(resources.nsxtvd_resources) class TestNsxpAdminUtils(AbstractTestAdminUtils, test_p_plugin.NsxPPluginTestCaseMixin): def _get_plugin_name(self): return 'nsxp' def _init_mock_plugin(self): self._plugin = nsxp_utils.NsxPolicyPluginWrapper() self._init_mock_quota() def test_nsxp_resources(self): self._test_resources(resources.nsxp_resources) def test_nsxp_resources_with_objects(self): # Create some neutron objects for the utilities to run on self._create_router() with self.network(): # Run all utilities with backend objects self._test_resources(resources.nsxp_resources) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/tests/unit/test_utils.py0000644000175000017500000000415000000000000024262 0ustar00coreycorey00000000000000# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg def override_nsx_ini_test(): cfg.CONF.set_override("default_tz_uuid", "fake_tz_uuid") cfg.CONF.set_override("nsx_controllers", ["fake1", "fake_2"]) cfg.CONF.set_override("nsx_user", "foo") cfg.CONF.set_override("nsx_password", "bar") cfg.CONF.set_override("default_l3_gw_service_uuid", "whatever") cfg.CONF.set_override("default_l2_gw_service_uuid", "whatever") cfg.CONF.set_override("manager_uri", "https://fake_manager", group="nsxv") cfg.CONF.set_override("user", "fake_user", group="nsxv") cfg.CONF.set_override("password", "fake_password", group="nsxv") cfg.CONF.set_override("vdn_scope_id", "fake_vdn_scope_id", group="nsxv") cfg.CONF.set_override("dvs_id", "fake_dvs_id", group="nsxv") cfg.CONF.set_override("cluster_moid", "fake_cluster_moid", group="nsxv") cfg.CONF.set_override("external_network", "fake_net", group="nsxv") def override_nsx_ini_full_test(): cfg.CONF.set_override("default_tz_uuid", "fake_tz_uuid") cfg.CONF.set_override("nsx_controllers", ["fake1", "fake_2"]) cfg.CONF.set_override("nsx_user", "foo") cfg.CONF.set_override("nsx_password", "bar") cfg.CONF.set_override("default_l3_gw_service_uuid", "whatever") cfg.CONF.set_override("default_l2_gw_service_uuid", "whatever") cfg.CONF.set_override("nsx_default_interface_name", "whatever") cfg.CONF.set_override("http_timeout", 13) cfg.CONF.set_override("redirects", 12) cfg.CONF.set_override("retries", "11") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542525.0 vmware-nsx-15.0.1.dev143/vmware_nsx/version.py0000644000175000017500000000125100000000000021426 0ustar00coreycorey00000000000000# Copyright 2015 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('vmware-nsx') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1586542531.1822534 vmware-nsx-15.0.1.dev143/vmware_nsx.egg-info/0000755000175000017500000000000000000000000021062 5ustar00coreycorey00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542531.0 vmware-nsx-15.0.1.dev143/vmware_nsx.egg-info/PKG-INFO0000644000175000017500000000326700000000000022167 0ustar00coreycorey00000000000000Metadata-Version: 1.2 Name: vmware-nsx Version: 15.0.1.dev143 Summary: VMware NSX library for OpenStack projects Home-page: https://launchpad.net/vmware-nsx Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: =================== VMware-NSX package =================== You have come across the VMware-NSX family of Neutron plugins External Resources: ------------------- The homepage for the VMware-NSX project is on Launchpad_. .. _Launchpad: https://launchpad.net/vmware-nsx Use this site for asking for help, and filing bugs. Code is available both git.openstack.org_ and github_. .. _git.openstack.org: https://git.openstack.org/cgit/openstack/vmware-nsx/tree/ .. _github: https://github.com/openstack/vmware-nsx For help on usage and hacking of VMware-NSX, please send a message to the openstack-discuss_ mailing list. .. _openstack-discuss: mailto:openstack-discuss@lists.openstack.org For information on how to contribute to VMware-NSX, please see the contents of the CONTRIBUTING.rst file. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542531.0 vmware-nsx-15.0.1.dev143/vmware_nsx.egg-info/SOURCES.txt0000644000175000017500000010024000000000000022743 0ustar00coreycorey00000000000000.coveragerc .mailmap .pylintrc .stestr.conf .testr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MANIFEST.in README.rst TESTING.rst babel.cfg bindep.txt lower-constraints.txt requirements.txt run_tests.sh setup.cfg setup.py test-requirements.txt tox.ini api-ref/rest.md devstack/README.rst devstack/localrc_nsx_p devstack/localrc_nsx_v3 devstack/override-defaults devstack/plugin.sh devstack/settings devstack/lib/nsx_common devstack/lib/nsx_v3_p_common devstack/lib/vmware_dvs devstack/lib/vmware_nsx devstack/lib/vmware_nsx_p devstack/lib/vmware_nsx_tvd devstack/lib/vmware_nsx_v devstack/lib/vmware_nsx_v3 devstack/nsx_p/devstackgaterc devstack/nsx_v/devstackgaterc devstack/nsx_v/tvd_devstackgaterc devstack/nsx_v3/controller_local.conf.sample devstack/nsx_v3/devstackgaterc devstack/nsx_v3/kvm_compute_local.conf.sample devstack/tools/nsxp_cleanup.py devstack/tools/nsxv3_cleanup.py devstack/tools/nsxv_cleanup.py devstack/tools/nsxv_edge_resources.py devstack/tools/nsxv_fw_autodraft_setting.py doc/requirements.txt doc/source/admin_util.rst doc/source/conf.py doc/source/contributing.rst doc/source/devstack.rst doc/source/housekeeper.rst doc/source/index.rst doc/source/installation.rst doc/source/readme.rst doc/source/usage.rst etc/README.txt etc/oslo-config-generator/nsx.ini etc/oslo-policy-generator/policy.conf etc/policy.d/flow-classifier.json etc/policy.d/routers.json releasenotes/notes/.placeholder releasenotes/notes/bind-floating-ips-per-az-142f0de7ebfae1c8.yaml releasenotes/notes/block-all-no-security-groups-47af550349dbc85a.yaml releasenotes/notes/dns-search-domain-configuration-a134af0ef028282c.yaml releasenotes/notes/dvs_dns_integration-831224f15acbc728.yaml releasenotes/notes/ens_support-49dbc626ba1b16be.yaml releasenotes/notes/fwaas_v2-9445ea0aaea91c60.yaml releasenotes/notes/nsx-dns-integration-extension-8260456051d61743.yaml releasenotes/notes/nsx-extension-drivers-b1aedabe5296d4d0.yaml releasenotes/notes/nsxv-availability-zones-85db159a647762b3.yaml releasenotes/notes/nsxv-bgp-support-44f857d382943e08.yaml releasenotes/notes/nsxv-edge-random-placement-9534371967edec8f.yaml releasenotes/notes/nsxv-exclusive-dhcp-7e5cde1cd88f8c5b.yaml releasenotes/notes/nsxv-fwaas-driver-4c457dee3fc3bae2.yaml releasenotes/notes/nsxv-fwaas-v2-driver-0b78d5e2c4034b21.yaml releasenotes/notes/nsxv-ipam-support-6eb1ac4e0e025ddd.yaml releasenotes/notes/nsxv-lbaas-l7-704f748300d1a399.yaml releasenotes/notes/nsxv-policy-3f552191f94873cd.yaml releasenotes/notes/nsxv-router-flavors-8e4cea7f6e12d44d.yaml releasenotes/notes/nsxv-service-insertion-32ab34a0e0f6ab4f.yaml releasenotes/notes/nsxv-subnets-dhcp-mtu-c7028748b516422e.yaml releasenotes/notes/nsxv-vlan-selection-ec73aac44b3648a1.yaml releasenotes/notes/nsxv3-add-trunk-driver-925ad1205972cbdf.yaml releasenotes/notes/nsxv3-availability-zones-8decf892df62.yaml releasenotes/notes/nsxv3-default-tier0-router-2983c6de10dd465a.yaml releasenotes/notes/nsxv3-dhcp-relay-32cf1ae281e1.yaml releasenotes/notes/nsxv3-init-from-tags-bcd4f3245a78e9a6.yaml releasenotes/notes/nsxv3-ipam-support-137174152c65459d.yaml releasenotes/notes/nsxv3-lbaasv2-driver-57f37d6614eb1510.yaml releasenotes/notes/nsxv3-lbaasv2-error-no-member-635ffc6308289aca.yaml releasenotes/notes/nsxv3-multi-managers-b645c4202a8476e9.yaml releasenotes/notes/nsxv3-native-dhcp-config-2b6bdd372a2d643f.yaml releasenotes/notes/nsxv3-native-dhcp-metadata-27af1de98302162f.yaml releasenotes/notes/nsxv3-switching-profiles-250aa43f5070dc37.yaml releasenotes/notes/nsxv3-taas-driver-1a316cf3915fcb3d.yaml releasenotes/notes/nsxv3-trnasparent-vlan-fe06e1d3aa2fbcd9.yaml releasenotes/notes/nsxv3-update-provider-types-aa1c20e988878ffe.yaml releasenotes/notes/nsxv3-vlan-selection-30c3d1dc1abe41d1.yaml releasenotes/notes/nsxv3-vpnaas-0b02762ff4b83904.yaml releasenotes/notes/octavia-support-2fa83d464dbc4e52.yaml releasenotes/notes/provider-security-group-2cfc1231dcaf21ac.yaml releasenotes/notes/qos-support-d52b5e3abfc6c8d4.yaml releasenotes/notes/rename_uuid_config_params-b36c379f64838334.yaml releasenotes/notes/rename_uuid_to_name-e64699df75176d4d.yaml releasenotes/notes/universal-switch-41487c280ad3c8ad.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder rhosp13/instructions.rst rhosp13/nsx-openstack-puppet.tar.gz rhosp13/nsx-openstack-tripleo-heat-templates.tar.gz tools/__init__.py tools/clean.sh tools/coding-checks.sh tools/generate_config_file_samples.sh tools/install_venv.py tools/install_venv_common.py tools/migrate_v_conf.sh tools/misc-sanity-checks.sh tools/test-setup.sh tools/with_venv.sh vmware_nsx/__init__.py vmware_nsx/_i18n.py vmware_nsx/nsx_cluster.py vmware_nsx/opts.py vmware_nsx/plugin.py vmware_nsx/version.py vmware_nsx.egg-info/PKG-INFO vmware_nsx.egg-info/SOURCES.txt vmware_nsx.egg-info/dependency_links.txt vmware_nsx.egg-info/entry_points.txt vmware_nsx.egg-info/not-zip-safe vmware_nsx.egg-info/pbr.json vmware_nsx.egg-info/requires.txt vmware_nsx.egg-info/top_level.txt vmware_nsx/api_client/__init__.py vmware_nsx/api_client/base.py vmware_nsx/api_client/client.py vmware_nsx/api_client/eventlet_client.py vmware_nsx/api_client/eventlet_request.py vmware_nsx/api_client/exception.py vmware_nsx/api_client/request.py vmware_nsx/api_client/version.py vmware_nsx/api_replay/__init__.py vmware_nsx/api_replay/cli.py vmware_nsx/api_replay/client.py vmware_nsx/api_replay/utils.py vmware_nsx/common/__init__.py vmware_nsx/common/availability_zones.py vmware_nsx/common/config.py vmware_nsx/common/driver_api.py vmware_nsx/common/exceptions.py vmware_nsx/common/l3_rpc_agent_api.py vmware_nsx/common/locking.py vmware_nsx/common/managers.py vmware_nsx/common/nsx_constants.py vmware_nsx/common/nsx_utils.py vmware_nsx/common/nsxv_constants.py vmware_nsx/common/profile.py vmware_nsx/common/utils.py vmware_nsx/db/__init__.py vmware_nsx/db/db.py vmware_nsx/db/distributedrouter.py vmware_nsx/db/extended_security_group.py vmware_nsx/db/extended_security_group_rule.py vmware_nsx/db/lsn_db.py vmware_nsx/db/maclearning.py vmware_nsx/db/nsx_models.py vmware_nsx/db/nsx_portbindings_db.py vmware_nsx/db/nsxrouter.py vmware_nsx/db/nsxv_db.py vmware_nsx/db/nsxv_models.py vmware_nsx/db/routertype.py vmware_nsx/db/vcns_models.py vmware_nsx/db/vnic_index_db.py vmware_nsx/db/migration/__init__.py vmware_nsx/db/migration/alembic_migrations/__init__.py vmware_nsx/db/migration/alembic_migrations/env.py vmware_nsx/db/migration/alembic_migrations/script.py.mako vmware_nsx/db/migration/alembic_migrations/versions/CONTRACT_HEAD vmware_nsx/db/migration/alembic_migrations/versions/EXPAND_HEAD vmware_nsx/db/migration/alembic_migrations/versions/kilo_release.py vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/393bf843b96_initial_liberty_no_op_contract_script.py vmware_nsx/db/migration/alembic_migrations/versions/liberty/contract/3c88bdea3054_nsxv_vdr_dhcp_binding.py vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/279b70ac3ae8_nsxv3_add_l2gwconnection_table.py vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/28430956782d_nsxv3_security_groups.py vmware_nsx/db/migration/alembic_migrations/versions/liberty/expand/53a3254aa95e_initial_liberty_no_op_expand_script.py vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/20483029f1ff_update_tz_network_bindings.py vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/2af850eb3970_update_nsxv_tz_binding_type.py vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/312211a5725f_nsxv_lbv2.py vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/4c45bcadccf9_extend_secgroup_rule.py vmware_nsx/db/migration/alembic_migrations/versions/mitaka/expand/69fb78b33d41_nsxv_add_search_domain_to_subnets.py vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/081af0e396d7_nsx_extended_rule_table_rename.py vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/5ed1ffbc0d2a_nsx_security_group_logging.py vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/d49ac91b560e_nsxv_lbaasv2_shared_pools.py vmware_nsx/db/migration/alembic_migrations/versions/newton/contract/dbe29d208ac6_nsxv_add_dhcp_mtu_to_subnets.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/1b4eaffe4f31_nsx_provider_security_group.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/2c87aedb206f_nsxv_security_group_logging.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/3e4dccfe6fb4_nsx_security_group_logging.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/5e564e781d77_add_nsx_binding_type.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/633514d94b93_add_support_for_taas.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/6e6da8296c0e_add_nsxv_ipam.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7b5ec3caa9a4_nsxv_fix_az_default.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/7e46906f8997_lbaas_foreignkeys.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/86a55205337c_nsxv_availability_zone_router_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/967462f585e1_add_dvs_id_to_switch_mappings.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/aede17d51d0f_timestamps.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/b7f41687cbad_nsxv3_qos_policy_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c288bb6a7252_nsxv_add_resource_pool_to_router_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/newton/expand/c644ec62c585_nsxv3_add_nsx_dhcp_service_tables.py vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/14a89ddf96e2_add_az_internal_network.py vmware_nsx/db/migration/alembic_migrations/versions/ocata/contract/5c8f451290b7_nsx_ipam_table_rename.py vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/01a33f93f5fd_nsxv_lbv2_l7pol.py vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/dd9fe5a3a526_nsx_add_certificate_table.py vmware_nsx/db/migration/alembic_migrations/versions/ocata/expand/e816d4fe9d4f_nsx_add_policy_security_group.py vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/84ceffa27115_nsxv3_qos_policy_no_foreign_key.py vmware_nsx/db/migration/alembic_migrations/versions/pike/contract/8c0a81a07691_fix_ipam_table.py vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/53eb497903a4_drop_vdr_dhcp_bindings.py vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/7c4704ad37df_nsxv_lbv2_l7pol_fix.py vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/8699700cd95c_nsxv_bgp_speaker_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/e4c503f4133f_port_vnic_type_support.py vmware_nsx/db/migration/alembic_migrations/versions/pike/expand/ea7a72ab9643_nsxv3_lbaas_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/717f7f63a219_nsxv3_lbaas_l7policy.py vmware_nsx/db/migration/alembic_migrations/versions/queens/contract/a1be06050b41_update_nsx_binding_types.py vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/0dbeda408e41_nsxv3_vpn_mapping.py vmware_nsx/db/migration/alembic_migrations/versions/queens/expand/9799427fc0e1_nsx_tv_map.py vmware_nsx/db/migration/alembic_migrations/versions/stein/expand/99bfcb6003c6_lbaas_error_no_member.py vmware_nsx/db/migration/alembic_migrations/versions/stein/expand/fc6308289aca_lbaas_no_foreign_key.py vmware_nsx/db/migration/models/__init__.py vmware_nsx/db/migration/models/head.py vmware_nsx/dhcp_meta/__init__.py vmware_nsx/dhcp_meta/combined.py vmware_nsx/dhcp_meta/constants.py vmware_nsx/dhcp_meta/lsnmanager.py vmware_nsx/dhcp_meta/migration.py vmware_nsx/dhcp_meta/modes.py vmware_nsx/dhcp_meta/nsx.py vmware_nsx/dhcp_meta/rpc.py vmware_nsx/dvs/__init__.py vmware_nsx/dvs/dvs.py vmware_nsx/dvs/dvs_utils.py vmware_nsx/extension_drivers/__init__.py vmware_nsx/extension_drivers/dns_integration.py vmware_nsx/extensions/__init__.py vmware_nsx/extensions/advancedserviceproviders.py vmware_nsx/extensions/api_replay.py vmware_nsx/extensions/dhcp_mtu.py vmware_nsx/extensions/dns_search_domain.py vmware_nsx/extensions/edge_service_gateway_bgp_peer.py vmware_nsx/extensions/housekeeper.py vmware_nsx/extensions/lsn.py vmware_nsx/extensions/maclearning.py vmware_nsx/extensions/nsxpolicy.py vmware_nsx/extensions/projectpluginmap.py vmware_nsx/extensions/providersecuritygroup.py vmware_nsx/extensions/routersize.py vmware_nsx/extensions/routertype.py vmware_nsx/extensions/secgroup_rule_local_ip_prefix.py vmware_nsx/extensions/securitygrouplogging.py vmware_nsx/extensions/securitygrouppolicy.py vmware_nsx/extensions/vnicindex.py vmware_nsx/nsxlib/__init__.py vmware_nsx/nsxlib/mh/__init__.py vmware_nsx/nsxlib/mh/lsn.py vmware_nsx/nsxlib/mh/switch.py vmware_nsx/osc/__init__.py vmware_nsx/osc/plugin.py vmware_nsx/osc/v2/__init__.py vmware_nsx/osc/v2/port.py vmware_nsx/osc/v2/project_plugin_map.py vmware_nsx/osc/v2/router.py vmware_nsx/osc/v2/security_group.py vmware_nsx/osc/v2/subnet.py vmware_nsx/osc/v2/utils.py vmware_nsx/plugins/__init__.py vmware_nsx/plugins/common/__init__.py vmware_nsx/plugins/common/plugin.py vmware_nsx/plugins/common/housekeeper/__init__.py vmware_nsx/plugins/common/housekeeper/base_job.py vmware_nsx/plugins/common/housekeeper/housekeeper.py vmware_nsx/plugins/common_v3/__init__.py vmware_nsx/plugins/common_v3/availability_zones.py vmware_nsx/plugins/common_v3/plugin.py vmware_nsx/plugins/dvs/__init__.py vmware_nsx/plugins/dvs/dhcp.py vmware_nsx/plugins/dvs/plugin.py vmware_nsx/plugins/nsx/__init__.py vmware_nsx/plugins/nsx/plugin.py vmware_nsx/plugins/nsx/utils.py vmware_nsx/plugins/nsx_p/__init__.py vmware_nsx/plugins/nsx_p/availability_zones.py vmware_nsx/plugins/nsx_p/plugin.py vmware_nsx/plugins/nsx_v/__init__.py vmware_nsx/plugins/nsx_v/availability_zones.py vmware_nsx/plugins/nsx_v/managers.py vmware_nsx/plugins/nsx_v/md_proxy.py vmware_nsx/plugins/nsx_v/plugin.py vmware_nsx/plugins/nsx_v/drivers/__init__.py vmware_nsx/plugins/nsx_v/drivers/abstract_router_driver.py vmware_nsx/plugins/nsx_v/drivers/distributed_router_driver.py vmware_nsx/plugins/nsx_v/drivers/exclusive_router_driver.py vmware_nsx/plugins/nsx_v/drivers/shared_router_driver.py vmware_nsx/plugins/nsx_v/housekeeper/__init__.py vmware_nsx/plugins/nsx_v/housekeeper/error_backup_edge.py vmware_nsx/plugins/nsx_v/housekeeper/error_dhcp_edge.py vmware_nsx/plugins/nsx_v/vshield/__init__.py vmware_nsx/plugins/nsx_v/vshield/edge_appliance_driver.py vmware_nsx/plugins/nsx_v/vshield/edge_dynamic_routing_driver.py vmware_nsx/plugins/nsx_v/vshield/edge_firewall_driver.py vmware_nsx/plugins/nsx_v/vshield/edge_ipsecvpn_driver.py vmware_nsx/plugins/nsx_v/vshield/edge_utils.py vmware_nsx/plugins/nsx_v/vshield/nsxv_edge_cfg_obj.py vmware_nsx/plugins/nsx_v/vshield/nsxv_loadbalancer.py vmware_nsx/plugins/nsx_v/vshield/securitygroup_utils.py vmware_nsx/plugins/nsx_v/vshield/vcns.py vmware_nsx/plugins/nsx_v/vshield/vcns_driver.py vmware_nsx/plugins/nsx_v/vshield/common/VcnsApiClient.py vmware_nsx/plugins/nsx_v/vshield/common/__init__.py vmware_nsx/plugins/nsx_v/vshield/common/constants.py vmware_nsx/plugins/nsx_v/vshield/common/exceptions.py vmware_nsx/plugins/nsx_v/vshield/tasks/__init__.py vmware_nsx/plugins/nsx_v/vshield/tasks/constants.py vmware_nsx/plugins/nsx_v/vshield/tasks/tasks.py vmware_nsx/plugins/nsx_v3/__init__.py vmware_nsx/plugins/nsx_v3/availability_zones.py vmware_nsx/plugins/nsx_v3/cert_utils.py vmware_nsx/plugins/nsx_v3/plugin.py vmware_nsx/plugins/nsx_v3/utils.py vmware_nsx/plugins/nsx_v3/api_replay/__init__.py vmware_nsx/plugins/nsx_v3/housekeeper/__init__.py vmware_nsx/plugins/nsx_v3/housekeeper/mismatch_logical_port.py vmware_nsx/plugins/nsx_v3/housekeeper/orphaned_dhcp_server.py vmware_nsx/plugins/nsx_v3/housekeeper/orphaned_firewall_section.py vmware_nsx/plugins/nsx_v3/housekeeper/orphaned_logical_router.py vmware_nsx/plugins/nsx_v3/housekeeper/orphaned_logical_switch.py vmware_nsx/policies/__init__.py vmware_nsx/policies/base.py vmware_nsx/policies/housekeeper.py vmware_nsx/policies/lsn.py vmware_nsx/policies/maclearning.py vmware_nsx/policies/network_gateway.py vmware_nsx/policies/nsxpolicy.py vmware_nsx/policies/providersecuritygroup.py vmware_nsx/policies/security_group.py vmware_nsx/services/__init__.py vmware_nsx/services/dynamic_routing/__init__.py vmware_nsx/services/dynamic_routing/bgp_plugin.py vmware_nsx/services/dynamic_routing/nsx_v/__init__.py vmware_nsx/services/dynamic_routing/nsx_v/driver.py vmware_nsx/services/flowclassifier/__init__.py vmware_nsx/services/flowclassifier/nsx_v/__init__.py vmware_nsx/services/flowclassifier/nsx_v/driver.py vmware_nsx/services/flowclassifier/nsx_v/utils.py vmware_nsx/services/fwaas/__init__.py vmware_nsx/services/fwaas/common/__init__.py vmware_nsx/services/fwaas/common/api_replay_driver.py vmware_nsx/services/fwaas/common/fwaas_callbacks_v2.py vmware_nsx/services/fwaas/common/fwaas_driver_base.py vmware_nsx/services/fwaas/common/fwaas_mocks.py vmware_nsx/services/fwaas/common/utils.py vmware_nsx/services/fwaas/common/v3_utils.py vmware_nsx/services/fwaas/nsx_p/__init__.py vmware_nsx/services/fwaas/nsx_p/edge_fwaas_driver_v2.py vmware_nsx/services/fwaas/nsx_p/fwaas_callbacks_v2.py vmware_nsx/services/fwaas/nsx_tv/__init__.py vmware_nsx/services/fwaas/nsx_tv/edge_fwaas_driver_v2.py vmware_nsx/services/fwaas/nsx_tv/plugin_v2.py vmware_nsx/services/fwaas/nsx_v/__init__.py vmware_nsx/services/fwaas/nsx_v/edge_fwaas_driver_v2.py vmware_nsx/services/fwaas/nsx_v/fwaas_callbacks_v2.py vmware_nsx/services/fwaas/nsx_v3/__init__.py vmware_nsx/services/fwaas/nsx_v3/edge_fwaas_driver_base.py vmware_nsx/services/fwaas/nsx_v3/edge_fwaas_driver_v2.py vmware_nsx/services/fwaas/nsx_v3/fwaas_callbacks_v2.py vmware_nsx/services/ipam/__init__.py vmware_nsx/services/ipam/common/__init__.py vmware_nsx/services/ipam/common/driver.py vmware_nsx/services/ipam/nsx_tvd/__init__.py vmware_nsx/services/ipam/nsx_tvd/driver.py vmware_nsx/services/ipam/nsx_v/__init__.py vmware_nsx/services/ipam/nsx_v/driver.py vmware_nsx/services/ipam/nsx_v3/__init__.py vmware_nsx/services/ipam/nsx_v3/driver.py vmware_nsx/services/l2gateway/__init__.py vmware_nsx/services/l2gateway/nsx_tvd/__init__.py vmware_nsx/services/l2gateway/nsx_tvd/driver.py vmware_nsx/services/l2gateway/nsx_tvd/plugin.py vmware_nsx/services/l2gateway/nsx_v/__init__.py vmware_nsx/services/l2gateway/nsx_v/driver.py vmware_nsx/services/l2gateway/nsx_v3/__init__.py vmware_nsx/services/l2gateway/nsx_v3/driver.py vmware_nsx/services/lbaas/__init__.py vmware_nsx/services/lbaas/base_mgr.py vmware_nsx/services/lbaas/lb_common.py vmware_nsx/services/lbaas/lb_const.py vmware_nsx/services/lbaas/nsx_p/__init__.py vmware_nsx/services/lbaas/nsx_p/implementation/__init__.py vmware_nsx/services/lbaas/nsx_p/implementation/healthmonitor_mgr.py vmware_nsx/services/lbaas/nsx_p/implementation/l7policy_mgr.py vmware_nsx/services/lbaas/nsx_p/implementation/l7rule_mgr.py vmware_nsx/services/lbaas/nsx_p/implementation/lb_const.py vmware_nsx/services/lbaas/nsx_p/implementation/lb_utils.py vmware_nsx/services/lbaas/nsx_p/implementation/listener_mgr.py vmware_nsx/services/lbaas/nsx_p/implementation/loadbalancer_mgr.py vmware_nsx/services/lbaas/nsx_p/implementation/member_mgr.py vmware_nsx/services/lbaas/nsx_p/implementation/pool_mgr.py vmware_nsx/services/lbaas/nsx_v/__init__.py vmware_nsx/services/lbaas/nsx_v/lbaas_common.py vmware_nsx/services/lbaas/nsx_v/implementation/__init__.py vmware_nsx/services/lbaas/nsx_v/implementation/healthmon_mgr.py vmware_nsx/services/lbaas/nsx_v/implementation/l7policy_mgr.py vmware_nsx/services/lbaas/nsx_v/implementation/l7rule_mgr.py vmware_nsx/services/lbaas/nsx_v/implementation/listener_mgr.py vmware_nsx/services/lbaas/nsx_v/implementation/loadbalancer_mgr.py vmware_nsx/services/lbaas/nsx_v/implementation/member_mgr.py vmware_nsx/services/lbaas/nsx_v/implementation/pool_mgr.py vmware_nsx/services/lbaas/nsx_v3/__init__.py vmware_nsx/services/lbaas/nsx_v3/implementation/__init__.py vmware_nsx/services/lbaas/nsx_v3/implementation/healthmonitor_mgr.py vmware_nsx/services/lbaas/nsx_v3/implementation/l7policy_mgr.py vmware_nsx/services/lbaas/nsx_v3/implementation/l7rule_mgr.py vmware_nsx/services/lbaas/nsx_v3/implementation/lb_utils.py vmware_nsx/services/lbaas/nsx_v3/implementation/listener_mgr.py vmware_nsx/services/lbaas/nsx_v3/implementation/loadbalancer_mgr.py vmware_nsx/services/lbaas/nsx_v3/implementation/member_mgr.py vmware_nsx/services/lbaas/nsx_v3/implementation/pool_mgr.py vmware_nsx/services/lbaas/octavia/__init__.py vmware_nsx/services/lbaas/octavia/constants.py vmware_nsx/services/lbaas/octavia/octavia_driver.py vmware_nsx/services/lbaas/octavia/octavia_listener.py vmware_nsx/services/lbaas/octavia/tvd_wrapper.py vmware_nsx/services/qos/__init__.py vmware_nsx/services/qos/common/__init__.py vmware_nsx/services/qos/common/utils.py vmware_nsx/services/qos/nsx_tvd/__init__.py vmware_nsx/services/qos/nsx_tvd/plugin.py vmware_nsx/services/qos/nsx_v/__init__.py vmware_nsx/services/qos/nsx_v/driver.py vmware_nsx/services/qos/nsx_v/plugin.py vmware_nsx/services/qos/nsx_v/utils.py vmware_nsx/services/qos/nsx_v3/__init__.py vmware_nsx/services/qos/nsx_v3/driver.py vmware_nsx/services/qos/nsx_v3/message_queue.py vmware_nsx/services/qos/nsx_v3/pol_utils.py vmware_nsx/services/qos/nsx_v3/utils.py vmware_nsx/services/trunk/__init__.py vmware_nsx/services/trunk/nsx_p/__init__.py vmware_nsx/services/trunk/nsx_p/driver.py vmware_nsx/services/trunk/nsx_v3/__init__.py vmware_nsx/services/trunk/nsx_v3/driver.py vmware_nsx/services/vpnaas/__init__.py vmware_nsx/services/vpnaas/nsx_plugin.py vmware_nsx/services/vpnaas/common_v3/__init__.py vmware_nsx/services/vpnaas/common_v3/ipsec_driver.py vmware_nsx/services/vpnaas/common_v3/ipsec_utils.py vmware_nsx/services/vpnaas/common_v3/ipsec_validator.py vmware_nsx/services/vpnaas/nsx_tvd/__init__.py vmware_nsx/services/vpnaas/nsx_tvd/ipsec_driver.py vmware_nsx/services/vpnaas/nsx_tvd/ipsec_validator.py vmware_nsx/services/vpnaas/nsx_tvd/plugin.py vmware_nsx/services/vpnaas/nsxp/__init__.py vmware_nsx/services/vpnaas/nsxp/ipsec_driver.py vmware_nsx/services/vpnaas/nsxp/ipsec_validator.py vmware_nsx/services/vpnaas/nsxv/__init__.py vmware_nsx/services/vpnaas/nsxv/ipsec_driver.py vmware_nsx/services/vpnaas/nsxv/ipsec_validator.py vmware_nsx/services/vpnaas/nsxv3/__init__.py vmware_nsx/services/vpnaas/nsxv3/ipsec_driver.py vmware_nsx/services/vpnaas/nsxv3/ipsec_validator.py vmware_nsx/shell/__init__.py vmware_nsx/shell/commands.py vmware_nsx/shell/hk_trigger.sh vmware_nsx/shell/nsx_instance_if_migrate.py vmware_nsx/shell/nsxadmin.py vmware_nsx/shell/resources.py vmware_nsx/shell/admin/README.rst vmware_nsx/shell/admin/__init__.py vmware_nsx/shell/admin/version.py vmware_nsx/shell/admin/plugins/__init__.py vmware_nsx/shell/admin/plugins/common/__init__.py vmware_nsx/shell/admin/plugins/common/constants.py vmware_nsx/shell/admin/plugins/common/formatters.py vmware_nsx/shell/admin/plugins/common/utils.py vmware_nsx/shell/admin/plugins/common/v3_common_cert.py vmware_nsx/shell/admin/plugins/nsxp/__init__.py vmware_nsx/shell/admin/plugins/nsxp/resources/__init__.py vmware_nsx/shell/admin/plugins/nsxp/resources/certificates.py vmware_nsx/shell/admin/plugins/nsxp/resources/networks.py vmware_nsx/shell/admin/plugins/nsxp/resources/routers.py vmware_nsx/shell/admin/plugins/nsxp/resources/securitygroups.py vmware_nsx/shell/admin/plugins/nsxp/resources/system.py vmware_nsx/shell/admin/plugins/nsxp/resources/utils.py vmware_nsx/shell/admin/plugins/nsxtvd/__init__.py vmware_nsx/shell/admin/plugins/nsxtvd/resources/__init__.py vmware_nsx/shell/admin/plugins/nsxtvd/resources/migrate.py vmware_nsx/shell/admin/plugins/nsxv/__init__.py vmware_nsx/shell/admin/plugins/nsxv/resources/__init__.py vmware_nsx/shell/admin/plugins/nsxv/resources/backup_edges.py vmware_nsx/shell/admin/plugins/nsxv/resources/config.py vmware_nsx/shell/admin/plugins/nsxv/resources/dhcp_binding.py vmware_nsx/shell/admin/plugins/nsxv/resources/edges.py vmware_nsx/shell/admin/plugins/nsxv/resources/gw_edges.py vmware_nsx/shell/admin/plugins/nsxv/resources/metadata.py vmware_nsx/shell/admin/plugins/nsxv/resources/migration.py vmware_nsx/shell/admin/plugins/nsxv/resources/networks.py vmware_nsx/shell/admin/plugins/nsxv/resources/routers.py vmware_nsx/shell/admin/plugins/nsxv/resources/securitygroups.py vmware_nsx/shell/admin/plugins/nsxv/resources/spoofguard_policy.py vmware_nsx/shell/admin/plugins/nsxv/resources/utils.py vmware_nsx/shell/admin/plugins/nsxv3/__init__.py vmware_nsx/shell/admin/plugins/nsxv3/resources/__init__.py vmware_nsx/shell/admin/plugins/nsxv3/resources/certificates.py vmware_nsx/shell/admin/plugins/nsxv3/resources/cluster.py vmware_nsx/shell/admin/plugins/nsxv3/resources/config.py vmware_nsx/shell/admin/plugins/nsxv3/resources/dhcp_binding.py vmware_nsx/shell/admin/plugins/nsxv3/resources/dhcp_servers.py vmware_nsx/shell/admin/plugins/nsxv3/resources/http_service.py vmware_nsx/shell/admin/plugins/nsxv3/resources/loadbalancer.py vmware_nsx/shell/admin/plugins/nsxv3/resources/metadata_proxy.py vmware_nsx/shell/admin/plugins/nsxv3/resources/networks.py vmware_nsx/shell/admin/plugins/nsxv3/resources/ports.py vmware_nsx/shell/admin/plugins/nsxv3/resources/routers.py vmware_nsx/shell/admin/plugins/nsxv3/resources/securitygroups.py vmware_nsx/shell/admin/plugins/nsxv3/resources/utils.py vmware_nsx/tests/__init__.py vmware_nsx/tests/functional/__init__.py vmware_nsx/tests/functional/requirements.txt vmware_nsx/tests/unit/__init__.py vmware_nsx/tests/unit/test_utils.py vmware_nsx/tests/unit/common_plugin/__init__.py vmware_nsx/tests/unit/common_plugin/common_v3.py vmware_nsx/tests/unit/common_plugin/test_housekeeper.py vmware_nsx/tests/unit/db/__init__.py vmware_nsx/tests/unit/db/test_migrations.py vmware_nsx/tests/unit/dvs/__init__.py vmware_nsx/tests/unit/dvs/test_plugin.py vmware_nsx/tests/unit/dvs/test_utils.py vmware_nsx/tests/unit/etc/fake_get_gwservice.json vmware_nsx/tests/unit/etc/fake_get_lqueue.json vmware_nsx/tests/unit/etc/fake_get_lrouter.json vmware_nsx/tests/unit/etc/fake_get_lrouter_lport.json vmware_nsx/tests/unit/etc/fake_get_lrouter_lport_att.json vmware_nsx/tests/unit/etc/fake_get_lrouter_nat.json vmware_nsx/tests/unit/etc/fake_get_lswitch.json vmware_nsx/tests/unit/etc/fake_get_lswitch_lport.json vmware_nsx/tests/unit/etc/fake_get_lswitch_lport_att.json vmware_nsx/tests/unit/etc/fake_get_lswitch_lport_status.json vmware_nsx/tests/unit/etc/fake_get_security_profile.json vmware_nsx/tests/unit/etc/fake_post_gwservice.json vmware_nsx/tests/unit/etc/fake_post_lqueue.json vmware_nsx/tests/unit/etc/fake_post_lrouter.json vmware_nsx/tests/unit/etc/fake_post_lrouter_lport.json vmware_nsx/tests/unit/etc/fake_post_lrouter_nat.json vmware_nsx/tests/unit/etc/fake_post_lswitch.json vmware_nsx/tests/unit/etc/fake_post_lswitch_lport.json vmware_nsx/tests/unit/etc/fake_post_security_profile.json vmware_nsx/tests/unit/etc/fake_put_lrouter_lport_att.json vmware_nsx/tests/unit/etc/fake_put_lswitch_lport_att.json vmware_nsx/tests/unit/etc/neutron.conf.test vmware_nsx/tests/unit/etc/nsx.ini.agentless.test vmware_nsx/tests/unit/etc/nsx.ini.basic.test vmware_nsx/tests/unit/etc/nsx.ini.combined.test vmware_nsx/tests/unit/etc/nsx.ini.full.test vmware_nsx/tests/unit/etc/nsx.ini.test vmware_nsx/tests/unit/etc/nvp.ini.full.test vmware_nsx/tests/unit/etc/vcns.ini.test vmware_nsx/tests/unit/extension_drivers/__init__.py vmware_nsx/tests/unit/extension_drivers/test_dns_integration.py vmware_nsx/tests/unit/extensions/__init__.py vmware_nsx/tests/unit/extensions/test_addresspairs.py vmware_nsx/tests/unit/extensions/test_dhcp_mtu.py vmware_nsx/tests/unit/extensions/test_dns_search_domain.py vmware_nsx/tests/unit/extensions/test_maclearning.py vmware_nsx/tests/unit/extensions/test_metadata.py vmware_nsx/tests/unit/extensions/test_portsecurity.py vmware_nsx/tests/unit/extensions/test_provider_security_groups.py vmware_nsx/tests/unit/extensions/test_providernet.py vmware_nsx/tests/unit/extensions/test_secgroup_rule_local_ip_prefix.py vmware_nsx/tests/unit/extensions/test_security_group_policy.py vmware_nsx/tests/unit/extensions/test_securitygroup.py vmware_nsx/tests/unit/extensions/test_vnic_index.py vmware_nsx/tests/unit/nsx_p/__init__.py vmware_nsx/tests/unit/nsx_p/test_api_replay.py vmware_nsx/tests/unit/nsx_p/test_availability_zones.py vmware_nsx/tests/unit/nsx_p/test_dhcp_metadata.py vmware_nsx/tests/unit/nsx_p/test_fwaas_v2_driver.py vmware_nsx/tests/unit/nsx_p/test_plugin.py vmware_nsx/tests/unit/nsx_p/test_policy_dhcp_metadata.py vmware_nsx/tests/unit/nsx_tvd/__init__.py vmware_nsx/tests/unit/nsx_tvd/test_plugin.py vmware_nsx/tests/unit/nsx_v/__init__.py vmware_nsx/tests/unit/nsx_v/test_availability_zones.py vmware_nsx/tests/unit/nsx_v/test_edge_loadbalancer_driver_v2.py vmware_nsx/tests/unit/nsx_v/test_fwaas_v2_driver.py vmware_nsx/tests/unit/nsx_v/test_lbaas_common.py vmware_nsx/tests/unit/nsx_v/test_misc.py vmware_nsx/tests/unit/nsx_v/test_nsxv_loadbalancer.py vmware_nsx/tests/unit/nsx_v/test_plugin.py vmware_nsx/tests/unit/nsx_v/housekeeper/__init__.py vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_backup_edge.py vmware_nsx/tests/unit/nsx_v/housekeeper/test_error_dhcp_edge.py vmware_nsx/tests/unit/nsx_v/vshield/__init__.py vmware_nsx/tests/unit/nsx_v/vshield/fake_vcns.py vmware_nsx/tests/unit/nsx_v/vshield/test_edge_utils.py vmware_nsx/tests/unit/nsx_v/vshield/test_vcns_driver.py vmware_nsx/tests/unit/nsx_v3/__init__.py vmware_nsx/tests/unit/nsx_v3/test_api_replay.py vmware_nsx/tests/unit/nsx_v3/test_availability_zones.py vmware_nsx/tests/unit/nsx_v3/test_client_cert.py vmware_nsx/tests/unit/nsx_v3/test_constants.py vmware_nsx/tests/unit/nsx_v3/test_dhcp_metadata.py vmware_nsx/tests/unit/nsx_v3/test_fwaas_v2_driver.py vmware_nsx/tests/unit/nsx_v3/test_plugin.py vmware_nsx/tests/unit/nsx_v3/housekeeper/__init__.py vmware_nsx/tests/unit/nsx_v3/housekeeper/test_mismatch_logical_port.py vmware_nsx/tests/unit/nsx_v3/housekeeper/test_orphaned_dhcp_server.py vmware_nsx/tests/unit/nsx_v3/housekeeper/test_orphaned_logical_router.py vmware_nsx/tests/unit/nsx_v3/housekeeper/test_orphaned_logical_switch.py vmware_nsx/tests/unit/nsxlib/mh/__init__.py vmware_nsx/tests/unit/nsxlib/mh/base.py vmware_nsx/tests/unit/nsxlib/mh/test_lsn.py vmware_nsx/tests/unit/nsxlib/mh/test_switch.py vmware_nsx/tests/unit/osc/__init__.py vmware_nsx/tests/unit/osc/v2/__init__.py vmware_nsx/tests/unit/osc/v2/test_port.py vmware_nsx/tests/unit/osc/v2/test_router.py vmware_nsx/tests/unit/osc/v2/test_security_group.py vmware_nsx/tests/unit/osc/v2/test_subnet.py vmware_nsx/tests/unit/services/__init__.py vmware_nsx/tests/unit/services/dynamic_routing/__init__.py vmware_nsx/tests/unit/services/dynamic_routing/test_nsxv_bgp_driver.py vmware_nsx/tests/unit/services/flowclassifier/__init__.py vmware_nsx/tests/unit/services/flowclassifier/test_nsxv_driver.py vmware_nsx/tests/unit/services/ipam/__init__.py vmware_nsx/tests/unit/services/ipam/test_nsxv3_driver.py vmware_nsx/tests/unit/services/ipam/test_nsxv_driver.py vmware_nsx/tests/unit/services/l2gateway/__init__.py vmware_nsx/tests/unit/services/l2gateway/test_nsxv3_driver.py vmware_nsx/tests/unit/services/l2gateway/test_nsxv_driver.py vmware_nsx/tests/unit/services/lbaas/__init__.py vmware_nsx/tests/unit/services/lbaas/lb_constants.py vmware_nsx/tests/unit/services/lbaas/lb_data_models.py vmware_nsx/tests/unit/services/lbaas/lb_db_models.py vmware_nsx/tests/unit/services/lbaas/lb_translators.py vmware_nsx/tests/unit/services/lbaas/test_nsxp_driver.py vmware_nsx/tests/unit/services/lbaas/test_nsxv3_driver.py vmware_nsx/tests/unit/services/lbaas/test_octavia_driver.py vmware_nsx/tests/unit/services/lbaas/test_octavia_listener.py vmware_nsx/tests/unit/services/qos/__init__.py vmware_nsx/tests/unit/services/qos/test_nsxp_notification.py vmware_nsx/tests/unit/services/qos/test_nsxv3_notification.py vmware_nsx/tests/unit/services/qos/test_nsxv_notification.py vmware_nsx/tests/unit/services/trunk/__init__.py vmware_nsx/tests/unit/services/trunk/test_nsxp_driver.py vmware_nsx/tests/unit/services/trunk/test_nsxv3_driver.py vmware_nsx/tests/unit/services/vpnaas/__init__.py vmware_nsx/tests/unit/services/vpnaas/test_nsxp_vpnaas.py vmware_nsx/tests/unit/services/vpnaas/test_nsxv3_vpnaas.py vmware_nsx/tests/unit/services/vpnaas/test_nsxv_vpnaas.py vmware_nsx/tests/unit/shell/__init__.py vmware_nsx/tests/unit/shell/test_admin_utils.py././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542531.0 vmware-nsx-15.0.1.dev143/vmware_nsx.egg-info/dependency_links.txt0000644000175000017500000000000100000000000025130 0ustar00coreycorey00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542531.0 vmware-nsx-15.0.1.dev143/vmware_nsx.egg-info/entry_points.txt0000644000175000017500000001076300000000000024367 0ustar00coreycorey00000000000000[console_scripts] neutron-check-nsx-config = vmware_nsx.check_nsx_config:main nsx-migration = vmware_nsx.api_replay.cli:main nsxadmin = vmware_nsx.shell.nsxadmin:main [firewall_drivers] vmware_nsxp_edge_v2 = vmware_nsx.services.fwaas.nsx_p.edge_fwaas_driver_v2:EdgeFwaasPDriverV2 vmware_nsxtvd_edge_v2 = vmware_nsx.services.fwaas.nsx_tv.edge_fwaas_driver_v2:EdgeFwaasTVDriverV2 vmware_nsxv3_edge_v2 = vmware_nsx.services.fwaas.nsx_v3.edge_fwaas_driver_v2:EdgeFwaasV3DriverV2 vmware_nsxv_edge_v2 = vmware_nsx.services.fwaas.nsx_v.edge_fwaas_driver_v2:EdgeFwaasVDriverV2 [networking_sfc.flowclassifier.drivers] vmware-nsxv-sfc = vmware_nsx.services.flowclassifier.nsx_v.driver:NsxvFlowClassifierDriver [neutron.core_plugins] vmware_dvs = vmware_nsx.plugin:NsxDvsPlugin vmware_nsx = vmware_nsx.plugin:NsxPlugin vmware_nsxp = vmware_nsx.plugin:NsxPolicyPlugin vmware_nsxtvd = vmware_nsx.plugin:NsxTVDPlugin vmware_nsxv = vmware_nsx.plugin:NsxVPlugin vmware_nsxv3 = vmware_nsx.plugin:NsxV3Plugin [neutron.db.alembic_migrations] vmware-nsx = vmware_nsx.db.migration:alembic_migrations [neutron.ipam_drivers] vmware_nsxtvd_ipam = vmware_nsx.services.ipam.nsx_tvd.driver:NsxTvdIpamDriver vmware_nsxv3_ipam = vmware_nsx.services.ipam.nsx_v3.driver:Nsxv3IpamDriver vmware_nsxv_ipam = vmware_nsx.services.ipam.nsx_v.driver:NsxvIpamDriver [neutron.policies] vmware-nsx = vmware_nsx.policies:list_rules [neutron.qos.notification_drivers] vmware_nsxv3_message_queue = vmware_nsx.services.qos.nsx_v3.message_queue:NsxV3QosNotificationDriver [neutron.service_plugins] vmware_nsx_vpnaas = vmware_nsx.services.vpnaas.nsx_plugin:NsxVPNPlugin vmware_nsxtvd_l2gw = vmware_nsx.services.l2gateway.nsx_tvd.plugin:L2GatewayPlugin vmware_nsxtvd_qos = vmware_nsx.services.qos.nsx_tvd.plugin:QoSPlugin vmware_nsxtvd_vpnaas = vmware_nsx.services.vpnaas.nsx_tvd.plugin:VPNPlugin vmware_nsxv_qos = vmware_nsx.services.qos.nsx_v.plugin:NsxVQosPlugin [octavia.api.drivers] vmwareedge = vmware_nsx.services.lbaas.octavia.octavia_driver:NSXOctaviaDriver [octavia.driver_agent.provider_agents] vmwareagent = vmware_nsx.services.lbaas.octavia.octavia_driver:vmware_nsx_provider_agent [openstack.cli.extension] nsxclient = vmware_nsx.osc.plugin [openstack.nsxclient.v2] port_create = vmware_nsx.osc.v2.port:NsxCreatePort port_set = vmware_nsx.osc.v2.port:NsxSetPort project_plugin_create = vmware_nsx.osc.v2.project_plugin_map:CreateProjectPluginMap project_plugin_list = vmware_nsx.osc.v2.project_plugin_map:ListProjectPluginMap project_plugin_show = vmware_nsx.osc.v2.project_plugin_map:ShowProjectPluginMap router_create = vmware_nsx.osc.v2.router:NsxCreateRouter router_set = vmware_nsx.osc.v2.router:NsxSetRouter security_group_create = vmware_nsx.osc.v2.security_group:NsxCreateSecurityGroup security_group_set = vmware_nsx.osc.v2.security_group:NsxSetSecurityGroup subnet_create = vmware_nsx.osc.v2.subnet:NsxCreateSubnet subnet_set = vmware_nsx.osc.v2.subnet:NsxSetSubnet [oslo.config.opts] nsx = vmware_nsx.opts:list_opts [oslo.policy.policies] vmware-nsx = vmware_nsx.policies:list_rules [vmware_nsx.extension_drivers] vmware_dvs_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverDVS vmware_nsxp_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverNSXp vmware_nsxv3_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverNSXv3 vmware_nsxv_dns = vmware_nsx.extension_drivers.dns_integration:DNSExtensionDriverNSXv [vmware_nsx.neutron.nsxv.housekeeper.jobs] error_backup_edge = vmware_nsx.plugins.nsx_v.housekeeper.error_backup_edge:ErrorBackupEdgeJob error_dhcp_edge = vmware_nsx.plugins.nsx_v.housekeeper.error_dhcp_edge:ErrorDhcpEdgeJob [vmware_nsx.neutron.nsxv.router_type_drivers] distributed = vmware_nsx.plugins.nsx_v.drivers.distributed_router_driver:RouterDistributedDriver exclusive = vmware_nsx.plugins.nsx_v.drivers.exclusive_router_driver:RouterExclusiveDriver shared = vmware_nsx.plugins.nsx_v.drivers.shared_router_driver:RouterSharedDriver [vmware_nsx.neutron.nsxv3.housekeeper.jobs] mismatch_logical_port = vmware_nsx.plugins.nsx_v3.housekeeper.mismatch_logical_port:MismatchLogicalportJob orphaned_dhcp_server = vmware_nsx.plugins.nsx_v3.housekeeper.orphaned_dhcp_server:OrphanedDhcpServerJob orphaned_firewall_section = vmware_nsx.plugins.nsx_v3.housekeeper.orphaned_firewall_section:OrphanedFirewallSectionJob orphaned_logical_router = vmware_nsx.plugins.nsx_v3.housekeeper.orphaned_logical_router:OrphanedLogicalRouterJob orphaned_logical_switch = vmware_nsx.plugins.nsx_v3.housekeeper.orphaned_logical_switch:OrphanedLogicalSwitchJob ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542531.0 vmware-nsx-15.0.1.dev143/vmware_nsx.egg-info/not-zip-safe0000644000175000017500000000000100000000000023310 0ustar00coreycorey00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542531.0 vmware-nsx-15.0.1.dev143/vmware_nsx.egg-info/pbr.json0000644000175000017500000000006100000000000022535 0ustar00coreycorey00000000000000{"git_version": "75cfea261", "is_release": false}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542531.0 vmware-nsx-15.0.1.dev143/vmware_nsx.egg-info/requires.txt0000644000175000017500000000131500000000000023462 0ustar00coreycorey00000000000000PrettyTable<0.8,>=0.7.2 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.2.0 decorator>=4.4.1 eventlet>=0.24.1 httplib2>=0.9.1 mock>=2.0.0 netaddr>=0.7.18 networking-l2gw>=15.0.0 networking-sfc>=9.0.0.0 neutron-dynamic-routing>=15.0.0.0 neutron-fwaas>=15.0.0.0 neutron-lib>=2.0.0 neutron-vpnaas>=15.0.0.0 neutron>=15.0.0.0 octavia-lib>=1.3.1 osc-lib>=1.14.0 oslo.concurrency>=3.26.0 oslo.config>=5.2.0 oslo.context>=2.19.2 oslo.db>=4.37.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.policy>=1.30.0 oslo.serialization>=2.28.1 oslo.service>=1.31.0 oslo.utils>=3.33.0 oslo.vmware>=2.17.0 pbr>=4.0.0 python-openstackclient>=4.0.0 requests>=2.14.2 six>=1.11.0 stevedore>=1.20.0 tenacity>=5.0.2 tooz>=1.58.0 vmware-nsxlib>=15.0.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1586542531.0 vmware-nsx-15.0.1.dev143/vmware_nsx.egg-info/top_level.txt0000644000175000017500000000001300000000000023606 0ustar00coreycorey00000000000000vmware_nsx